Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenFunction.cpp | //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This coordinates the per-function state used while generating code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CGCleanup.h"
#include "CGCUDARuntime.h"
#include "CGHLSLRuntime.h" // HLSL Change
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGOpenMPRuntime.h"
#include "CodeGenModule.h"
#include "CodeGenPGO.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Operator.h"
#include "dxc/DXIL/DxilMetadataHelper.h" // HLSL Change
using namespace clang;
using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
CurFn(nullptr), CapturedStmtInfo(nullptr),
SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
IsOutlinedSEHHelper(false), BlockInfo(nullptr), BlockPointer(nullptr),
LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
DebugInfo(CGM.getModuleDebugInfo()),
DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
CXXABIThisValue(nullptr), CXXThisValue(nullptr),
CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
TerminateHandler(nullptr), TrapBB(nullptr) {
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
llvm::FastMathFlags FMF;
if (CGM.getLangOpts().FastMath)
FMF.setUnsafeAlgebra();
if (CGM.getLangOpts().FiniteMathOnly) {
FMF.setNoNaNs();
FMF.setNoInfs();
}
if (CGM.getCodeGenOpts().NoNaNsFPMath) {
FMF.setNoNaNs();
}
if (CGM.getCodeGenOpts().NoSignedZeros) {
FMF.setNoSignedZeros();
}
if (CGM.getCodeGenOpts().ReciprocalMath) {
FMF.setAllowReciprocal();
}
Builder.SetFastMathFlags(FMF);
}
CodeGenFunction::~CodeGenFunction() {
assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
// If there are any unclaimed block infos, go ahead and destroy them
// now. This can happen if IR-gen gets clever and skips evaluating
// something.
if (FirstBlockInfo)
destroyBlockInfos(FirstBlockInfo);
#if 0 // HLSL Change - no OpenMP support
if (getLangOpts().OpenMP) {
CGM.getOpenMPRuntime().functionFinished(*this);
}
#endif // HLSL Change - no OpenMP support
}
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
CharUnits Alignment;
if (CGM.getCXXABI().isTypeInfoCalculable(T)) {
Alignment = getContext().getTypeAlignInChars(T);
unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign;
if (MaxAlign && Alignment.getQuantity() > MaxAlign &&
!getContext().isAlignmentRequired(T))
Alignment = CharUnits::fromQuantity(MaxAlign);
}
return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T));
}
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
return CGM.getTypes().ConvertTypeForMem(T);
}
llvm::Type *CodeGenFunction::ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
}
TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
type = type.getCanonicalType();
while (true) {
switch (type->getTypeClass()) {
#define TYPE(name, parent)
#define ABSTRACT_TYPE(name, parent)
#define NON_CANONICAL_TYPE(name, parent) case Type::name:
#define DEPENDENT_TYPE(name, parent) case Type::name:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("non-canonical or dependent type in IR-generation");
case Type::Auto:
llvm_unreachable("undeduced auto type in IR-generation");
// Various scalar types.
case Type::Builtin:
case Type::Pointer:
case Type::BlockPointer:
case Type::LValueReference:
case Type::RValueReference:
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Enum:
case Type::ObjCObjectPointer:
return TEK_Scalar;
// Complexes.
case Type::Complex:
return TEK_Complex;
// Arrays, records, and Objective-C objects.
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::Record:
case Type::ObjCObject:
case Type::ObjCInterface:
// HLSL Change Starts
if (hlsl::IsHLSLVecType(type)) {
// Treat hlsl vector as ext vector.
return TEK_Scalar;
}
if (hlsl::IsHLSLMatType(type)) {
// Treat hlsl matrix as scalar type too.
return TEK_Scalar;
}
// HLSL Change Ends
return TEK_Aggregate;
// We operate on atomic values according to their underlying type.
case Type::Atomic:
type = cast<AtomicType>(type)->getValueType();
continue;
}
llvm_unreachable("unknown type kind!");
}
}
llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
// For cleanliness, we try to avoid emitting the return block for
// simple cases.
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
if (CurBB) {
assert(!CurBB->getTerminator() && "Unexpected terminated block.");
// We have a valid insert point, reuse it if it is empty or there are no
// explicit jumps to the return block.
if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
delete ReturnBlock.getBlock();
} else
EmitBlock(ReturnBlock.getBlock());
return llvm::DebugLoc();
}
// Otherwise, if the return block is the target of a single direct
// branch then we can just put the code in that block instead. This
// cleans up functions which started with a unified return block.
if (ReturnBlock.getBlock()->hasOneUse()) {
llvm::BranchInst *BI =
dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
if (BI && BI->isUnconditional() &&
BI->getSuccessor(0) == ReturnBlock.getBlock()) {
// Record/return the DebugLoc of the simple 'return' expression to be used
// later by the actual 'ret' instruction.
llvm::DebugLoc Loc = BI->getDebugLoc();
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
delete ReturnBlock.getBlock();
return Loc;
}
}
// FIXME: We are at an unreachable point, there is no reason to emit the block
// unless it has uses. However, we still need a place to put the debug
// region.end for now.
EmitBlock(ReturnBlock.getBlock());
return llvm::DebugLoc();
}
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
if (!BB) return;
if (!BB->use_empty())
return CGF.CurFn->getBasicBlockList().push_back(BB);
delete BB;
}
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
&& NumSimpleReturnExprs == NumReturnExprs
&& ReturnBlock.getBlock()->use_empty();
// Usually the return expression is evaluated before the cleanup
// code. If the function contains only a simple return statement,
// such as a constant, the location before the cleanup code becomes
// the last useful breakpoint in the function, because the simple
// return expression will be evaluated after the cleanup code. To be
// safe, set the debug location for cleanup code to the location of
// the return statement. Otherwise the cleanup code should be at the
// end of the function's lexical scope.
//
// If there are multiple branches to the return block, the branch
// instructions will get the location of the return statements and
// all will be fine.
if (CGDebugInfo *DI = getDebugInfo()) {
if (OnlySimpleReturnStmts)
DI->EmitLocation(Builder, LastStopPoint);
else
DI->EmitLocation(Builder, EndLoc);
}
// Pop any cleanups that might have been associated with the
// parameters. Do this in whatever block we're currently in; it's
// important to do this before we enter the return block or return
// edges will be *really* confused.
bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
bool HasOnlyLifetimeMarkers =
HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
if (HasCleanups) {
// Make sure the line table doesn't jump back into the body for
// the ret after it's been at EndLoc.
if (CGDebugInfo *DI = getDebugInfo())
if (OnlySimpleReturnStmts)
DI->EmitLocation(Builder, EndLoc);
PopCleanupBlocks(PrologueCleanupDepth);
}
// Emit function epilog (to return).
llvm::DebugLoc Loc = EmitReturnBlock();
if (ShouldInstrumentFunction())
EmitFunctionInstrumentation("__cyg_profile_func_exit");
// Emit debug descriptor for function end.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitFunctionEnd(Builder);
// Reset the debug location to that of the simple 'return' expression, if any
// rather than that of the end of the function's scope '}'.
ApplyDebugLocation AL(*this, Loc);
EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
EmitEndEHSpec(CurCodeDecl);
assert(EHStack.empty() &&
"did not remove all scopes from cleanup stack!");
// If someone did an indirect goto, emit the indirect goto block at the end of
// the function.
if (IndirectBranch) {
EmitBlock(IndirectBranch->getParent());
Builder.ClearInsertionPoint();
}
// If some of our locals escaped, insert a call to llvm.localescape in the
// entry block.
if (!EscapedLocals.empty()) {
// Invert the map from local to index into a simple vector. There should be
// no holes.
SmallVector<llvm::Value *, 4> EscapeArgs;
EscapeArgs.resize(EscapedLocals.size());
for (auto &Pair : EscapedLocals)
EscapeArgs[Pair.second] = Pair.first;
llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::localescape);
CGBuilderTy(AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
}
// Remove the AllocaInsertPt instruction, which is just a convenience for us.
llvm::Instruction *Ptr = AllocaInsertPt;
AllocaInsertPt = nullptr;
Ptr->eraseFromParent();
// If someone took the address of a label but never did an indirect goto, we
// made a zero entry PHI node, which is illegal, zap it now.
if (IndirectBranch) {
llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
if (PN->getNumIncomingValues() == 0) {
PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
PN->eraseFromParent();
}
}
EmitIfUsed(*this, EHResumeBlock);
EmitIfUsed(*this, TerminateLandingPad);
EmitIfUsed(*this, TerminateHandler);
EmitIfUsed(*this, UnreachableBlock);
if (CGM.getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
I = DeferredReplacements.begin(),
E = DeferredReplacements.end();
I != E; ++I) {
I->first->replaceAllUsesWith(I->second);
I->first->eraseFromParent();
}
}
/// ShouldInstrumentFunction - Return true if the current function should be
/// instrumented with __cyg_profile_func_* calls
bool CodeGenFunction::ShouldInstrumentFunction() {
if (!CGM.getCodeGenOpts().InstrumentFunctions)
return false;
if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
return false;
return true;
}
/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
/// instrumentation function with the current function and the call site, if
/// function instrumentation is enabled.
void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
// void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
llvm::PointerType *PointerTy = Int8PtrTy;
llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
llvm::FunctionType *FunctionTy =
llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
llvm::CallInst *CallSite = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
llvm::ConstantInt::get(Int32Ty, 0),
"callsite");
llvm::Value *args[] = {
llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
CallSite
};
EmitNounwindRuntimeCall(F, args);
}
void CodeGenFunction::EmitMCountInstrumentation() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
llvm::Constant *MCountFn =
CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
EmitNounwindRuntimeCall(MCountFn);
}
// OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
// information in the program executable. The argument information stored
// includes the argument name, its type, the address and access qualifiers used.
static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
CodeGenModule &CGM, llvm::LLVMContext &Context,
SmallVector<llvm::Metadata *, 5> &kernelMDArgs,
CGBuilderTy &Builder, ASTContext &ASTCtx) {
// Create MDNodes that represent the kernel arg metadata.
// Each MDNode is a list in the form of "key", N number of values which is
// the same number of values as their are kernel arguments.
const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
// MDNode for the kernel argument address space qualifiers.
SmallVector<llvm::Metadata *, 8> addressQuals;
addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
// MDNode for the kernel argument access qualifiers (images only).
SmallVector<llvm::Metadata *, 8> accessQuals;
accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
// MDNode for the kernel argument type names.
SmallVector<llvm::Metadata *, 8> argTypeNames;
argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
// MDNode for the kernel argument base type names.
SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
argBaseTypeNames.push_back(
llvm::MDString::get(Context, "kernel_arg_base_type"));
// MDNode for the kernel argument type qualifiers.
SmallVector<llvm::Metadata *, 8> argTypeQuals;
argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
// MDNode for the kernel argument names.
SmallVector<llvm::Metadata *, 8> argNames;
argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
const ParmVarDecl *parm = FD->getParamDecl(i);
QualType ty = parm->getType();
std::string typeQuals;
if (ty->isPointerType()) {
QualType pointeeTy = ty->getPointeeType();
// Get address qualifier.
addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
ASTCtx.getTargetAddressSpace(pointeeTy.getAddressSpace()))));
// Get argument type name.
std::string typeName =
pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
// Turn "unsigned type" to "utype"
std::string::size_type pos = typeName.find("unsigned");
if (pointeeTy.isCanonical() && pos != std::string::npos)
typeName.erase(pos+1, 8);
argTypeNames.push_back(llvm::MDString::get(Context, typeName));
std::string baseTypeName =
pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
Policy) +
"*";
// Turn "unsigned type" to "utype"
pos = baseTypeName.find("unsigned");
if (pos != std::string::npos)
baseTypeName.erase(pos+1, 8);
argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
// Get argument type qualifiers:
if (ty.isRestrictQualified())
typeQuals = "restrict";
if (pointeeTy.isConstQualified() ||
(pointeeTy.getAddressSpace() == LangAS::opencl_constant))
typeQuals += typeQuals.empty() ? "const" : " const";
if (pointeeTy.isVolatileQualified())
typeQuals += typeQuals.empty() ? "volatile" : " volatile";
} else {
uint32_t AddrSpc = 0;
if (ty->isImageType())
AddrSpc =
CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
addressQuals.push_back(
llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
// Get argument type name.
std::string typeName = ty.getUnqualifiedType().getAsString(Policy);
// Turn "unsigned type" to "utype"
std::string::size_type pos = typeName.find("unsigned");
if (ty.isCanonical() && pos != std::string::npos)
typeName.erase(pos+1, 8);
argTypeNames.push_back(llvm::MDString::get(Context, typeName));
std::string baseTypeName =
ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
// Turn "unsigned type" to "utype"
pos = baseTypeName.find("unsigned");
if (pos != std::string::npos)
baseTypeName.erase(pos+1, 8);
argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
// Get argument type qualifiers:
if (ty.isConstQualified())
typeQuals = "const";
if (ty.isVolatileQualified())
typeQuals += typeQuals.empty() ? "volatile" : " volatile";
}
argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
// Get image access qualifier:
if (ty->isImageType()) {
const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
if (A && A->isWriteOnly())
accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
else
accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
// FIXME: what about read_write?
} else
accessQuals.push_back(llvm::MDString::get(Context, "none"));
// Get argument name.
argNames.push_back(llvm::MDString::get(Context, parm->getName()));
}
kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames));
kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
}
void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::Function *Fn)
{
if (!FD->hasAttr<OpenCLKernelAttr>())
return;
llvm::LLVMContext &Context = getLLVMContext();
SmallVector<llvm::Metadata *, 5> kernelMDArgs;
kernelMDArgs.push_back(llvm::ConstantAsMetadata::get(Fn));
GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, Builder,
getContext());
if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
QualType hintQTy = A->getTypeHint();
const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
bool isSignedInteger =
hintQTy->isSignedIntegerType() ||
(hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
llvm::Metadata *attrMDArgs[] = {
llvm::MDString::get(Context, "vec_type_hint"),
llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
CGM.getTypes().ConvertType(A->getTypeHint()))),
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
llvm::IntegerType::get(Context, 32),
llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
}
if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
llvm::Metadata *attrMDArgs[] = {
llvm::MDString::get(Context, "work_group_size_hint"),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
}
if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
llvm::Metadata *attrMDArgs[] = {
llvm::MDString::get(Context, "reqd_work_group_size"),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
}
llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
llvm::NamedMDNode *OpenCLKernelMetadata =
CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
OpenCLKernelMetadata->addOperand(kernelMDNode);
}
/// Determine whether the function F ends with a return stmt.
static bool endsWithReturn(const Decl* F) {
const Stmt *Body = nullptr;
if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
Body = FD->getBody();
else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
Body = OMD->getBody();
if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
auto LastStmt = CS->body_rbegin();
if (LastStmt != CS->body_rend())
return isa<ReturnStmt>(*LastStmt);
}
return false;
}
void CodeGenFunction::StartFunction(GlobalDecl GD,
QualType RetTy,
llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
const FunctionArgList &Args,
SourceLocation Loc,
SourceLocation StartLoc) {
assert(!CurFn &&
"Do not use a CodeGenFunction object for more than one function");
const Decl *D = GD.getDecl();
DidCallStackSave = false;
CurCodeDecl = D;
CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
FnRetTy = RetTy;
CurFn = Fn;
CurFnInfo = &FnInfo;
assert(CurFn->isDeclaration() && "Function already has body?");
if (CGM.isInSanitizerBlacklist(Fn, Loc))
SanOpts.clear();
if (D) {
// Apply the no_sanitize* attributes to SanOpts.
for (auto Attr : D->specific_attrs<NoSanitizeAttr>())
SanOpts.Mask &= ~Attr->getMask();
}
// Apply sanitizer attributes to the function.
if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
if (SanOpts.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
if (SanOpts.has(SanitizerKind::Memory))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
if (SanOpts.has(SanitizerKind::SafeStack))
Fn->addFnAttr(llvm::Attribute::SafeStack);
// Pass inline keyword to optimizer if it appears explicitly on any
// declaration. Also, in the case of -fno-inline attach NoInline
// attribute to all function that are not marked AlwaysInline.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (!CGM.getCodeGenOpts().NoInline) {
for (auto RI : FD->redecls())
if (RI->isInlineSpecified()) {
Fn->addFnAttr(llvm::Attribute::InlineHint);
break;
}
} else if (!FD->hasAttr<AlwaysInlineAttr>())
Fn->addFnAttr(llvm::Attribute::NoInline);
}
if (getLangOpts().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
EmitOpenCLKernelMetadata(FD, Fn);
}
// If we are checking function types, emit a function type signature as
// prologue data.
if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
if (llvm::Constant *PrologueSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
llvm::Constant *FTRTTIConst =
CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst };
llvm::Constant *PrologueStructConst =
llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
Fn->setPrologueData(PrologueStructConst);
}
}
}
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
// later. Don't create this with the builder, because we don't want it
// folded.
llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
ReturnBlock = getJumpDestInCurrentScope("return");
Builder.SetInsertPoint(EntryBB);
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
SmallVector<QualType, 16> ArgTypes;
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i) {
ArgTypes.push_back((*i)->getType());
}
QualType FnType = getContext().getFunctionType(
RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(), None); // HLSL Change - add param mods - TODO: review for inout
DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
}
if (ShouldInstrumentFunction())
EmitFunctionInstrumentation("__cyg_profile_func_enter");
if (CGM.getCodeGenOpts().InstrumentForProfiling)
EmitMCountInstrumentation();
if (RetTy->isVoidType()) {
// Void type; nothing to return.
ReturnValue = nullptr;
// Count the implicit return.
if (!endsWithReturn(D))
++NumReturnExprs;
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
// Indirect aggregate return; emit returned value directly into sret slot.
// This reduces code size, and affects correctness in C++.
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
ReturnValue = AI;
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
// Load the sret pointer from the argument struct and return into that.
unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
llvm::Value *Addr = Builder.CreateStructGEP(nullptr, EI, Idx);
ReturnValue = Builder.CreateLoad(Addr, "agg.result");
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
// HLSL Change begin
cast<llvm::Instruction>(ReturnValue)
->setMetadata(hlsl::DxilMDHelper::kDxilTempAllocaMDName, llvm::MDTuple::get(ReturnValue->getContext(), {}));
// HLSL Change end
// Tell the epilog emitter to autorelease the result. We do this
// now so that various specialized functions can suppress it
// during their IR-generation.
if (getLangOpts().ObjCAutoRefCount &&
!CurFnInfo->isReturnsRetained() &&
RetTy->isObjCRetainableType())
AutoreleaseResult = true;
}
EmitStartEHSpec(CurCodeDecl);
PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
if (MD->getParent()->isLambda() &&
MD->getOverloadedOperator() == OO_Call) {
// We're in a lambda; figure out the captures.
MD->getParent()->getCaptureFields(LambdaCaptureFields,
LambdaThisCaptureField);
if (LambdaThisCaptureField) {
// If this lambda captures this, load it.
LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
CXXThisValue = EmitLoadOfLValue(ThisLValue,
SourceLocation()).getScalarVal();
}
for (auto *FD : MD->getParent()->fields()) {
if (FD->hasCapturedVLAType()) {
auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
SourceLocation()).getScalarVal();
auto VAT = FD->getCapturedVLAType();
VLASizeMap[VAT->getSizeExpr()] = ExprArg;
}
}
} else {
// Not in a lambda; just use 'this' from the method.
// FIXME: Should we generate a new load for each use of 'this'? The
// fast register allocator would be happier...
CXXThisValue = CXXABIThisValue;
}
}
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i) {
const VarDecl *VD = *i;
// Dig out the type as written from ParmVarDecls; it's unclear whether
// the standard (C99 6.9.1p10) requires this, but we're following the
// precedent set by gcc.
QualType Ty;
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
Ty = PVD->getOriginalType();
else
Ty = VD->getType();
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
}
// Emit a location at the end of the prologue.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, StartLoc);
}
void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
const Stmt *Body) {
incrementProfileCounter(Body);
if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
EmitCompoundStmtWithoutScope(*S);
else
EmitStmt(Body);
}
/// When instrumenting to collect profile data, the counts for some blocks
/// such as switch cases need to not include the fall-through counts, so
/// emit a branch around the instrumentation code. When not instrumenting,
/// this just calls EmitBlock().
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
const Stmt *S) {
llvm::BasicBlock *SkipCountBB = nullptr;
if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
// When instrumenting for profiling, the fallthrough to certain
// statements needs to skip over the instrumentation code so that we
// get an accurate count.
SkipCountBB = createBasicBlock("skipcount");
EmitBranch(SkipCountBB);
}
EmitBlock(BB);
uint64_t CurrentCount = getCurrentProfileCount();
incrementProfileCounter(S);
setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
if (SkipCountBB)
EmitBlock(SkipCountBB);
}
/// Tries to mark the given function nounwind based on the
/// non-existence of any throwing calls within it. We believe this is
/// lightweight enough to do at -O0.
static void TryMarkNoThrow(llvm::Function *F) {
// LLVM treats 'nounwind' on a function as part of the type, so we
// can't do this on functions that can be overwritten.
if (F->mayBeOverridden()) return;
for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
for (llvm::BasicBlock::iterator
BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
if (!Call->doesNotThrow())
return;
} else if (isa<llvm::ResumeInst>(&*BI)) {
return;
}
F->setDoesNotThrow();
}
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// Check if we should generate debug info for this function.
if (FD->hasAttr<NoDebugAttr>())
DebugInfo = nullptr; // disable debug info indefinitely for this function
FunctionArgList Args;
QualType ResTy = FD->getReturnType();
CurGD = GD;
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isInstance()) {
if (CGM.getCXXABI().HasThisReturn(GD))
ResTy = MD->getThisType(getContext());
else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
ResTy = CGM.getContext().VoidPtrTy;
CGM.getCXXABI().buildThisParam(*this, Args);
}
Args.append(FD->param_begin(), FD->param_end());
if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
SourceRange BodyRange;
if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
CurEHLocation = BodyRange.getEnd();
// Use the location of the start of the function to determine where
// the function definition is located. By default use the location
// of the declaration as the location for the subprogram. A function
// may lack a declaration in the source code if it is created by code
// gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
SourceLocation Loc = FD->getLocation();
// If this is a function specialization then use the pattern body
// as the location for the function.
if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
if (SpecDecl->hasBody(SpecDecl))
Loc = SpecDecl->getLocation();
// Emit the standard function prologue.
StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
// Generate the body of the function.
PGO.checkGlobalDecl(GD);
PGO.assignRegionCounters(GD.getDecl(), CurFn);
if (isa<CXXDestructorDecl>(FD))
EmitDestructorBody(Args);
else if (isa<CXXConstructorDecl>(FD))
EmitConstructorBody(Args);
else if (getLangOpts().CUDA &&
!getLangOpts().CUDAIsDevice &&
FD->hasAttr<CUDAGlobalAttr>())
CGM.getCUDARuntime().emitDeviceStub(*this, Args);
else if (isa<CXXConversionDecl>(FD) &&
cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
// The lambda conversion to block pointer is special; the semantics can't be
// expressed in the AST, so IRGen needs to special-case it.
EmitLambdaToBlockPointerBody(Args);
} else if (isa<CXXMethodDecl>(FD) &&
cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
// The lambda static invoker function is special, because it forwards or
// clones the body of the function call operator (but is actually static).
EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
} else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
(cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
// Implicit copy-assignment gets the same special treatment as implicit
// copy-constructors.
emitImplicitAssignmentOperatorBody(Args);
} else if (Stmt *Body = FD->getBody()) {
EmitFunctionBody(Args, Body);
} else
llvm_unreachable("no definition for emitted function");
// C++11 [stmt.return]p2:
// Flowing off the end of a function [...] results in undefined behavior in
// a value-returning function.
// C11 6.9.1p12:
// If the '}' that terminates a function is reached, and the value of the
// function call is used by the caller, the behavior is undefined.
if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
!FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
if (SanOpts.has(SanitizerKind::Return)) {
SanitizerScope SanScope(this);
llvm::Value *IsFalse = Builder.getFalse();
EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
"missing_return", EmitCheckSourceLocation(FD->getLocation()),
None);
} else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
EmitTrapCall(llvm::Intrinsic::trap);
}
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
}
// Emit the standard function epilogue.
FinishFunction(BodyRange.getEnd());
// If we haven't marked the function nothrow through other means, do
// a quick pass now to see if we can.
if (!CurFn->doesNotThrow())
TryMarkNoThrow(CurFn);
}
/// ContainsLabel - Return true if the statement contains a label in it. If
/// this statement is not executed normally, it not containing a label means
/// that we can just remove the code.
bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
// Null statement, not a label!
if (!S) return false;
// If this is a label, we have to emit the code, consider something like:
// if (0) { ... foo: bar(); } goto foo;
//
// TODO: If anyone cared, we could track __label__'s, since we know that you
// can't jump to one from outside their declared region.
if (isa<LabelStmt>(S))
return true;
// If this is a case/default statement, and we haven't seen a switch, we have
// to emit the code.
if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
return true;
// If this is a switch statement, we want to ignore cases below it.
if (isa<SwitchStmt>(S))
IgnoreCaseStmts = true;
// Scan subexpressions for verboten labels.
for (const Stmt *SubStmt : S->children())
if (ContainsLabel(SubStmt, IgnoreCaseStmts))
return true;
return false;
}
/// containsBreak - Return true if the statement contains a break out of it.
/// If the statement (recursively) contains a switch or loop with a break
/// inside of it, this is fine.
bool CodeGenFunction::containsBreak(const Stmt *S) {
// Null statement, not a label!
if (!S) return false;
// If this is a switch or loop that defines its own break scope, then we can
// include it and anything inside of it.
if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
isa<ForStmt>(S))
return false;
if (isa<BreakStmt>(S))
return true;
// Scan subexpressions for verboten breaks.
for (const Stmt *SubStmt : S->children())
if (containsBreak(SubStmt))
return true;
return false;
}
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the boolean result in Result.
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
bool &ResultBool) {
llvm::APSInt ResultInt;
if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
return false;
ResultBool = ResultInt.getBoolValue();
return true;
}
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the folded value.
bool CodeGenFunction::
ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
llvm::APSInt Int;
if (!Cond->EvaluateAsInt(Int, getContext()))
return false; // Not foldable, not integer or not fully evaluatable.
if (CodeGenFunction::ContainsLabel(Cond))
return false; // Contains a label.
ResultInt = Int;
return true;
}
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
/// statement) to the specified blocks. Based on the condition, this might try
/// to simplify the codegen of the conditional based on the branch.
///
void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock,
uint64_t TrueCount) {
Cond = Cond->IgnoreParens();
if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
// Handle X && Y in a condition.
if (CondBOp->getOpcode() == BO_LAnd) {
// If we have "1 && X", simplify the code. "0 && X" would have constant
// folded if the case was simple enough.
bool ConstantBool = false;
if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
ConstantBool) {
// br(1 && X) -> br(X).
incrementProfileCounter(CondBOp);
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
TrueCount);
}
// If we have "X && 1", simplify the code to use an uncond branch.
// "X && 0" would have been constant folded to 0.
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
ConstantBool) {
// br(X && 1) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
TrueCount);
}
// HLSL Change Begins.
if (getLangOpts().HLSL &&
getLangOpts().HLSLVersion < hlsl::LangStd::v2021) {
// HLSL does not short circuit by default.
// Emit the code with the fully general case.
llvm::Value *CondV;
{
ApplyDebugLocation DL(*this, Cond);
CondV = EvaluateExprAsBool(Cond);
}
Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
return;
}
// HLSL Change Ends.
// Emit the LHS as a conditional. If the LHS conditional is false, we
// want to jump to the FalseBlock.
llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
// The counter tells us how often we evaluate RHS, and all of TrueCount
// can be propagated to that branch.
uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
ConditionalEvaluation eval(*this);
{
ApplyDebugLocation DL(*this, Cond);
EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
EmitBlock(LHSTrue);
}
incrementProfileCounter(CondBOp);
setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
// Any temporaries created here are conditional.
eval.begin(*this);
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
eval.end(*this);
return;
}
if (CondBOp->getOpcode() == BO_LOr) {
// If we have "0 || X", simplify the code. "1 || X" would have constant
// folded if the case was simple enough.
bool ConstantBool = false;
if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
!ConstantBool) {
// br(0 || X) -> br(X).
incrementProfileCounter(CondBOp);
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
TrueCount);
}
// If we have "X || 0", simplify the code to use an uncond branch.
// "X || 1" would have been constant folded to 1.
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
!ConstantBool) {
// br(X || 0) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
TrueCount);
}
// HLSL Change Begins.
if (getLangOpts().HLSL &&
getLangOpts().HLSLVersion < hlsl::LangStd::v2021) {
// HLSL does not short circuit by default.
// Emit the code with the fully general case.
llvm::Value *CondV;
{
ApplyDebugLocation DL(*this, Cond);
CondV = EvaluateExprAsBool(Cond);
}
Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
return;
}
// HLSL Change Ends.
// Emit the LHS as a conditional. If the LHS conditional is true, we
// want to jump to the TrueBlock.
llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
// We have the count for entry to the RHS and for the whole expression
// being true, so we can divy up True count between the short circuit and
// the RHS.
uint64_t LHSCount =
getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
uint64_t RHSCount = TrueCount - LHSCount;
ConditionalEvaluation eval(*this);
{
ApplyDebugLocation DL(*this, Cond);
EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
EmitBlock(LHSFalse);
}
incrementProfileCounter(CondBOp);
setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
// Any temporaries created here are conditional.
eval.begin(*this);
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
eval.end(*this);
return;
}
}
if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
// br(!x, t, f) -> br(x, f, t)
if (CondUOp->getOpcode() == UO_LNot) {
// Negate the count.
uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
// Negate the condition and swap the destination blocks.
return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
FalseCount);
}
}
if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
// br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
ConditionalEvaluation cond(*this);
EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
getProfileCount(CondOp));
// When computing PGO branch weights, we only know the overall count for
// the true block. This code is essentially doing tail duplication of the
// naive code-gen, introducing new edges for which counts are not
// available. Divide the counts proportionally between the LHS and RHS of
// the conditional operator.
uint64_t LHSScaledTrueCount = 0;
if (TrueCount) {
double LHSRatio =
getProfileCount(CondOp) / (double)getCurrentProfileCount();
LHSScaledTrueCount = TrueCount * LHSRatio;
}
cond.begin(*this);
EmitBlock(LHSBlock);
incrementProfileCounter(CondOp);
{
ApplyDebugLocation DL(*this, Cond);
EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
LHSScaledTrueCount);
}
cond.end(*this);
cond.begin(*this);
EmitBlock(RHSBlock);
EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
TrueCount - LHSScaledTrueCount);
cond.end(*this);
return;
}
if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
// Conditional operator handling can give us a throw expression as a
// condition for a case like:
// br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
// Fold this to:
// br(c, throw x, br(y, t, f))
EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
return;
}
// Create branch weights based on the number of times we get here and the
// number of times the condition should be true.
uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
llvm::MDNode *Weights =
createProfileWeights(TrueCount, CurrentCount - TrueCount);
// Emit the code with the fully general case.
llvm::Value *CondV;
{
ApplyDebugLocation DL(*this, Cond);
CondV = EvaluateExprAsBool(Cond);
}
Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights);
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified stmt yet.
void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
CGM.ErrorUnsupported(S, Type);
}
/// emitNonZeroVLAInit - Emit the "zero" initialization of a
/// variable-length array whose elements have a non-zero bit-pattern.
///
/// \param baseType the inner-most element type of the array
/// \param src - a char* pointing to the bit-pattern for a single
/// base element of the array
/// \param sizeInChars - the total size of the VLA, in chars
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
llvm::Value *dest, llvm::Value *src,
llvm::Value *sizeInChars) {
std::pair<CharUnits,CharUnits> baseSizeAndAlign
= CGF.getContext().getTypeInfoInChars(baseType);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *baseSizeInChars
= llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
llvm::Type *i8p = Builder.getInt8PtrTy();
llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
// Make a loop over the VLA. C99 guarantees that the VLA element
// count must be nonzero.
CGF.EmitBlock(loopBB);
llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
cur->addIncoming(begin, originBB);
// memcpy the individual element bit-pattern.
Builder.CreateMemCpy(cur, src, baseSizeInChars,
baseSizeAndAlign.second.getQuantity(),
/*volatile*/ false);
// Go to the next element.
llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(),
cur, 1, "vla.next");
// Leave if that's the end of the VLA.
llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
Builder.CreateCondBr(done, contBB, loopBB);
cur->addIncoming(next, loopBB);
CGF.EmitBlock(contBB);
}
void
CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
// Ignore empty classes in C++.
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
return;
}
}
// Cast the dest ptr to the appropriate i8 pointer type.
unsigned DestAS =
cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP);
// Get size and alignment info for this aggregate.
std::pair<CharUnits, CharUnits> TypeInfo =
getContext().getTypeInfoInChars(Ty);
CharUnits Size = TypeInfo.first;
CharUnits Align = TypeInfo.second;
llvm::Value *SizeVal;
const VariableArrayType *vla;
// Don't bother emitting a zero-byte memset.
if (Size.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
if (const VariableArrayType *vlaType =
dyn_cast_or_null<VariableArrayType>(
getContext().getAsArrayType(Ty))) {
QualType eltType;
llvm::Value *numElts;
std::tie(numElts, eltType) = getVLASize(vlaType);
SizeVal = numElts;
CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
if (!eltSize.isOne())
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
vla = vlaType;
} else {
return;
}
} else {
SizeVal = CGM.getSize(Size);
vla = nullptr;
}
// If the type contains a pointer to data member we can't memset it to zero.
// Instead, create a null constant and copy it to the destination.
// TODO: there are other patterns besides zero that we can usefully memset,
// like -1, which happens to be the pattern used by member-pointers.
if (!CGM.getTypes().isZeroInitializable(Ty)) {
// For a VLA, emit a single element, then splat that over the VLA.
if (vla) Ty = getContext().getBaseElementType(vla);
llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
llvm::GlobalVariable *NullVariable =
new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
/*isConstant=*/true,
llvm::GlobalVariable::PrivateLinkage,
NullConstant, Twine());
llvm::Value *SrcPtr =
Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
// Get and call the appropriate llvm.memcpy overload.
Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
Align.getQuantity(), false);
}
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
// Make sure that there is a block for the indirect goto.
if (!IndirectBranch)
GetIndirectGotoBlock();
llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
// Make sure the indirect branch includes all of the address-taken blocks.
IndirectBranch->addDestination(BB);
return llvm::BlockAddress::get(CurFn, BB);
}
llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
// If we already made the indirect branch for indirect goto, return its block.
if (IndirectBranch) return IndirectBranch->getParent();
CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
// Create the PHI node that indirect gotos will add entries to.
llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
"indirect.goto.dest");
// Create the indirect branch instruction.
IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
return IndirectBranch->getParent();
}
/// Computes the length of an array in elements, as well as the base
/// element type and a properly-typed first element pointer.
llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
QualType &baseType,
llvm::Value *&addr) {
const ArrayType *arrayType = origArrayType;
// If it's a VLA, we have to load the stored size. Note that
// this is the size of the VLA in bytes, not its size in elements.
llvm::Value *numVLAElements = nullptr;
if (isa<VariableArrayType>(arrayType)) {
numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
// Walk into all VLAs. This doesn't require changes to addr,
// which has type T* where T is the first non-VLA element type.
do {
QualType elementType = arrayType->getElementType();
arrayType = getContext().getAsArrayType(elementType);
// If we only have VLA components, 'addr' requires no adjustment.
if (!arrayType) {
baseType = elementType;
return numVLAElements;
}
} while (isa<VariableArrayType>(arrayType));
// We get out here only if we find a constant array type
// inside the VLA.
}
// We have some number of constant-length arrays, so addr should
// have LLVM type [M x [N x [...]]]*. Build a GEP that walks
// down to the first element of addr.
SmallVector<llvm::Value*, 8> gepIndices;
// GEP down to the array type.
llvm::ConstantInt *zero = Builder.getInt32(0);
gepIndices.push_back(zero);
uint64_t countFromCLAs = 1;
QualType eltType;
llvm::ArrayType *llvmArrayType =
dyn_cast<llvm::ArrayType>(
cast<llvm::PointerType>(addr->getType())->getElementType());
while (llvmArrayType) {
assert(isa<ConstantArrayType>(arrayType));
assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
== llvmArrayType->getNumElements());
gepIndices.push_back(zero);
countFromCLAs *= llvmArrayType->getNumElements();
eltType = arrayType->getElementType();
llvmArrayType =
dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
arrayType = getContext().getAsArrayType(arrayType->getElementType());
assert((!llvmArrayType || arrayType) &&
"LLVM and Clang types are out-of-synch");
}
if (arrayType) {
// From this point onwards, the Clang array type has been emitted
// as some other type (probably a packed struct). Compute the array
// size, and just emit the 'begin' expression as a bitcast.
while (arrayType) {
countFromCLAs *=
cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
eltType = arrayType->getElementType();
arrayType = getContext().getAsArrayType(eltType);
}
unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
} else {
// Create the actual GEP.
addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
}
baseType = eltType;
llvm::Value *numElements
= llvm::ConstantInt::get(SizeTy, countFromCLAs);
// If we had any VLA dimensions, factor them in.
if (numVLAElements)
numElements = Builder.CreateNUWMul(numVLAElements, numElements);
return numElements;
}
std::pair<llvm::Value*, QualType>
CodeGenFunction::getVLASize(QualType type) {
const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
assert(vla && "type was not a variable array type!");
return getVLASize(vla);
}
std::pair<llvm::Value*, QualType>
CodeGenFunction::getVLASize(const VariableArrayType *type) {
// The number of elements so far; always size_t.
llvm::Value *numElements = nullptr;
QualType elementType;
do {
elementType = type->getElementType();
llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
assert(vlaSize && "no size for VLA!");
assert(vlaSize->getType() == SizeTy);
if (!numElements) {
numElements = vlaSize;
} else {
// It's undefined behavior if this wraps around, so mark it that way.
// FIXME: Teach -fsanitize=undefined to trap this.
numElements = Builder.CreateNUWMul(numElements, vlaSize);
}
} while ((type = getContext().getAsVariableArrayType(elementType)));
return std::pair<llvm::Value*,QualType>(numElements, elementType);
}
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
assert(type->isVariablyModifiedType() &&
"Must pass variably modified type to EmitVLASizes!");
EnsureInsertPoint();
// We're going to walk down into the type and look for VLA
// expressions.
do {
assert(type->isVariablyModifiedType());
const Type *ty = type.getTypePtr();
switch (ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
llvm_unreachable("unexpected dependent type!");
// These types are never variably-modified.
case Type::Builtin:
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
case Type::TemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
type = cast<AdjustedType>(ty)->getAdjustedType();
break;
case Type::Decayed:
type = cast<DecayedType>(ty)->getPointeeType();
break;
case Type::Pointer:
type = cast<PointerType>(ty)->getPointeeType();
break;
case Type::BlockPointer:
type = cast<BlockPointerType>(ty)->getPointeeType();
break;
case Type::LValueReference:
case Type::RValueReference:
type = cast<ReferenceType>(ty)->getPointeeType();
break;
case Type::MemberPointer:
type = cast<MemberPointerType>(ty)->getPointeeType();
break;
case Type::ConstantArray:
case Type::IncompleteArray:
// Losing element qualification here is fine.
type = cast<ArrayType>(ty)->getElementType();
break;
case Type::VariableArray: {
// Losing element qualification here is fine.
const VariableArrayType *vat = cast<VariableArrayType>(ty);
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
if (const Expr *size = vat->getSizeExpr()) {
// It's possible that we might have emitted this already,
// e.g. with a typedef and a pointer to it.
llvm::Value *&entry = VLASizeMap[size];
if (!entry) {
llvm::Value *Size = EmitScalarExpr(size);
// C11 6.7.6.2p5:
// If the size is an expression that is not an integer constant
// expression [...] each time it is evaluated it shall have a value
// greater than zero.
if (SanOpts.has(SanitizerKind::VLABound) &&
size->getType()->isSignedIntegerType()) {
SanitizerScope SanScope(this);
llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
llvm::Constant *StaticArgs[] = {
EmitCheckSourceLocation(size->getLocStart()),
EmitCheckTypeDescriptor(size->getType())
};
EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
SanitizerKind::VLABound),
"vla_bound_not_positive", StaticArgs, Size);
}
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
}
}
type = vat->getElementType();
break;
}
case Type::FunctionProto:
case Type::FunctionNoProto:
type = cast<FunctionType>(ty)->getReturnType();
break;
case Type::Paren:
case Type::TypeOf:
case Type::UnaryTransform:
case Type::Attributed:
case Type::SubstTemplateTypeParm:
case Type::PackExpansion:
// Keep walking after single level desugaring.
type = type.getSingleStepDesugaredType(getContext());
break;
case Type::Typedef:
case Type::Decltype:
case Type::Auto:
// Stop walking: nothing to do.
return;
case Type::TypeOfExpr:
// Stop walking: emit typeof expression.
EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
return;
case Type::Atomic:
type = cast<AtomicType>(ty)->getValueType();
break;
}
} while (type->isVariablyModifiedType());
}
llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
if (getContext().getBuiltinVaListType()->isArrayType())
return EmitScalarExpr(E);
return EmitLValue(E).getAddress();
}
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
llvm::Constant *Init) {
assert (Init && "Invalid DeclRefExpr initializer!");
if (CGDebugInfo *Dbg = getDebugInfo())
if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
Dbg->EmitGlobalVariable(E->getDecl(), Init);
}
CodeGenFunction::PeepholeProtection
CodeGenFunction::protectFromPeepholes(RValue rvalue) {
// At the moment, the only aggressive peephole we do in IR gen
// is trunc(zext) folding, but if we add more, we can easily
// extend this protection.
if (!rvalue.isScalar()) return PeepholeProtection();
llvm::Value *value = rvalue.getScalarVal();
if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
// Just make an extra bitcast.
assert(HaveInsertPoint());
llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
Builder.GetInsertBlock());
PeepholeProtection protection;
protection.Inst = inst;
return protection;
}
void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
if (!protection.Inst) return;
// In theory, we could try to duplicate the peepholes now, but whatever.
protection.Inst->eraseFromParent();
}
llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
llvm::Value *AnnotatedVal,
StringRef AnnotationStr,
SourceLocation Location) {
llvm::Value *Args[4] = {
AnnotatedVal,
Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
CGM.EmitAnnotationLineNo(Location)
};
return Builder.CreateCall(AnnotationFn, Args);
}
void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
// FIXME We create a new bitcast for every annotation because that's what
// llvm-gcc was doing.
for (const auto *I : D->specific_attrs<AnnotateAttr>())
EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
I->getAnnotation(), D->getLocation());
}
llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
llvm::Value *V) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
llvm::Type *VTy = V->getType();
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
CGM.Int8PtrTy);
for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
// FIXME Always emit the cast inst so we can differentiate between
// annotation on the first field of a struct and annotation on the struct
// itself.
if (VTy != CGM.Int8PtrTy)
V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
V = Builder.CreateBitCast(V, VTy);
}
return V;
}
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
: CGF(CGF) {
assert(!CGF->IsSanitizerScope);
CGF->IsSanitizerScope = true;
}
CodeGenFunction::SanitizerScope::~SanitizerScope() {
CGF->IsSanitizerScope = false;
}
void CodeGenFunction::InsertHelper(llvm::Instruction *I,
const llvm::Twine &Name,
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const {
LoopStack.InsertHelper(I);
if (IsSanitizerScope)
CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
}
template <bool PreserveNames>
void CGBuilderInserter<PreserveNames>::InsertHelper(
llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const {
llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB,
InsertPt);
if (CGF)
CGF->InsertHelper(I, Name, BB, InsertPt);
}
#ifdef NDEBUG
#define PreserveNames false
#else
#define PreserveNames true
#endif
template void CGBuilderInserter<PreserveNames>::InsertHelper(
llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
#undef PreserveNames
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/TargetInfo.cpp | //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes wrap the information about a call or function
// definition used to handle ABI compliancy.
//
//===----------------------------------------------------------------------===//
#include "TargetInfo.h"
#include "ABIInfo.h"
#include "CGCXXABI.h"
#include "CGValue.h"
#include "CodeGenFunction.h"
#include "clang/AST/RecordLayout.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm> // std::sort
using namespace clang;
using namespace CodeGen;
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
llvm::Value *Array,
llvm::Value *Value,
unsigned FirstIndex,
unsigned LastIndex) {
// Alternatively, we could emit this as a loop in the source.
for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
llvm::Value *Cell =
Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
Builder.CreateStore(Value, Cell);
}
}
static bool isAggregateTypeForABI(QualType T) {
return !CodeGenFunction::hasScalarEvaluationKind(T) ||
T->isMemberFunctionPointerType();
}
ABIInfo::~ABIInfo() {}
static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
CGCXXABI &CXXABI) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
if (!RD)
return CGCXXABI::RAA_Default;
return CXXABI.getRecordArgABI(RD);
}
static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
CGCXXABI &CXXABI) {
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return CGCXXABI::RAA_Default;
return getRecordArgABI(RT, CXXABI);
}
/// Pass transparent unions as if they were the type of the first element. Sema
/// should ensure that all elements of the union have the same "machine type".
static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
if (const RecordType *UT = Ty->getAsUnionType()) {
const RecordDecl *UD = UT->getDecl();
if (UD->hasAttr<TransparentUnionAttr>()) {
assert(!UD->field_empty() && "sema created an empty transparent union");
return UD->field_begin()->getType();
}
}
return Ty;
}
CGCXXABI &ABIInfo::getCXXABI() const {
return CGT.getCXXABI();
}
ASTContext &ABIInfo::getContext() const {
return CGT.getContext();
}
llvm::LLVMContext &ABIInfo::getVMContext() const {
return CGT.getLLVMContext();
}
const llvm::DataLayout &ABIInfo::getDataLayout() const {
return CGT.getDataLayout();
}
const TargetInfo &ABIInfo::getTarget() const {
return CGT.getTarget();
}
bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
return false;
}
bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const {
return false;
}
bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
return false;
}
void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
switch (TheKind) {
case Direct:
OS << "Direct Type=";
if (llvm::Type *Ty = getCoerceToType())
Ty->print(OS);
else
OS << "null";
break;
case Extend:
OS << "Extend";
break;
case Ignore:
OS << "Ignore";
break;
case InAlloca:
OS << "InAlloca Offset=" << getInAllocaFieldIndex();
break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
<< " ByVal=" << getIndirectByVal()
<< " Realign=" << getIndirectRealign();
break;
case Expand:
OS << "Expand";
break;
}
OS << ")\n";
}
TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
// If someone can figure out a general rule for this, that would be great.
// It's probably just doomed to be platform-dependent, though.
unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
// Verified for:
// x86-64 FreeBSD, Linux, Darwin
// x86-32 FreeBSD, Linux, Darwin
// PowerPC Linux, Darwin
// ARM Darwin (*not* EABI)
// AArch64 Linux
return 32;
}
bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
const FunctionNoProtoType *fnType) const {
// The following conventions are known to require this to be false:
// x86_stdcall
// MIPS
// For everything else, we just prefer false unless we opt out.
return false;
}
void
TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const {
// This assumes the user is passing a library name like "rt" instead of a
// filename like "librt.a/so", and that they don't care whether it's static or
// dynamic.
Opt = "-l";
Opt += Lib;
}
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
/// isEmptyField - Return true iff a the field is "empty", that is it
/// is an unnamed bit-field or an (array of) empty record(s).
static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
bool AllowArrays) {
if (FD->isUnnamedBitfield())
return true;
QualType FT = FD->getType();
// Constant arrays of empty records count as empty, strip them off.
// Constant arrays of zero length always count as empty.
if (AllowArrays)
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
if (AT->getSize() == 0)
return true;
FT = AT->getElementType();
}
const RecordType *RT = FT->getAs<RecordType>();
if (!RT)
return false;
// C++ record fields are never empty, at least in the Itanium ABI.
//
// FIXME: We should use a predicate for whether this behavior is true in the
// current ABI.
if (isa<CXXRecordDecl>(RT->getDecl()))
return false;
return isEmptyRecord(Context, FT, AllowArrays);
}
/// isEmptyRecord - Return true iff a structure contains only empty
/// fields. Note that a structure with a flexible array member is not
/// considered empty.
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return 0;
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return false;
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const auto &I : CXXRD->bases())
if (!isEmptyRecord(Context, I.getType(), true))
return false;
for (const auto *I : RD->fields())
if (!isEmptyField(Context, I, AllowArrays))
return false;
return true;
}
/// isSingleElementStruct - Determine if a structure is a "single
/// element struct", i.e. it has exactly one non-empty field or
/// exactly one field which is itself a single element
/// struct. Structures with flexible array members are never
/// considered single element structs.
///
/// \return The field declaration for the single non-empty field, if
/// it exists.
static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return nullptr;
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return nullptr;
const Type *Found = nullptr;
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CXXRD->bases()) {
// Ignore empty records.
if (isEmptyRecord(Context, I.getType(), true))
continue;
// If we already found an element then this isn't a single-element struct.
if (Found)
return nullptr;
// If this is non-empty and not a single element struct, the composite
// cannot be a single element struct.
Found = isSingleElementStruct(I.getType(), Context);
if (!Found)
return nullptr;
}
}
// Check for single element.
for (const auto *FD : RD->fields()) {
QualType FT = FD->getType();
// Ignore empty fields.
if (isEmptyField(Context, FD, true))
continue;
// If we already found an element then this isn't a single-element
// struct.
if (Found)
return nullptr;
// Treat single element arrays as the element.
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
if (AT->getSize().getZExtValue() != 1)
break;
FT = AT->getElementType();
}
if (!isAggregateTypeForABI(FT)) {
Found = FT.getTypePtr();
} else {
Found = isSingleElementStruct(FT, Context);
if (!Found)
return nullptr;
}
}
// We don't consider a struct a single-element struct if it has
// padding beyond the element type.
if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
return nullptr;
return Found;
}
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
// Treat complex types as the element type.
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
// Check for a type which we know has a simple scalar argument-passing
// convention without any padding. (We're specifically looking for 32
// and 64-bit integer and integer-equivalents, float, and double.)
if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
!Ty->isEnumeralType() && !Ty->isBlockPointerType())
return false;
uint64_t Size = Context.getTypeSize(Ty);
return Size == 32 || Size == 64;
}
/// canExpandIndirectArgument - Test whether an argument type which is to be
/// passed indirectly (on the stack) would have the equivalent layout if it was
/// expanded into separate arguments. If so, we prefer to do the latter to avoid
/// inhibiting optimizations.
///
// FIXME: This predicate is missing many cases, currently it just follows
// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
// should probably make this smarter, or better yet make the LLVM backend
// capable of handling it.
static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
// We can only expand structure types.
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return false;
// We can only expand (C) structures.
//
// FIXME: This needs to be generalized to handle classes as well.
const RecordDecl *RD = RT->getDecl();
if (!RD->isStruct())
return false;
// We try to expand CLike CXXRecordDecl.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (!CXXRD->isCLike())
return false;
}
uint64_t Size = 0;
for (const auto *FD : RD->fields()) {
if (!is32Or64BitBasicType(FD->getType(), Context))
return false;
// FIXME: Reject bit-fields wholesale; there are two problems, we don't know
// how to expand them yet, and the predicate for telling if a bitfield still
// counts as "basic" is more complicated than what we were doing previously.
if (FD->isBitField())
return false;
Size += Context.getTypeSize(FD->getType());
}
// Make sure there are not any holes in the struct.
if (Size != Context.getTypeSize(Ty))
return false;
return true;
}
namespace {
/// DefaultABIInfo - The default implementation for ABI specific
/// details. This implementation provides information which results in
/// self-consistent and sensible LLVM IR generation, but does not
/// conform to any particular ABI.
class DefaultABIInfo : public ABIInfo {
public:
DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
void computeInfo(CGFunctionInfo &FI) const override {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments())
I.info = classifyArgumentType(I.type);
}
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
};
class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
public:
DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
};
llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
return nullptr;
}
ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
if (isAggregateTypeForABI(Ty)) {
// Records with non-trivial destructors/copy-constructors should not be
// passed by value.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
return ABIArgInfo::getIndirect(0);
}
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy))
return ABIArgInfo::getIndirect(0);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
//===----------------------------------------------------------------------===//
// le32/PNaCl bitcode ABI Implementation
//
// This is a simplified version of the x86_32 ABI. Arguments and return values
// are always passed on the stack.
//===----------------------------------------------------------------------===//
class PNaClABIInfo : public ABIInfo {
public:
PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
};
class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
public:
PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
};
void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments())
I.info = classifyArgumentType(I.type);
}
llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
return nullptr;
}
/// \brief Classify argument of given type \p Ty.
ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
return ABIArgInfo::getIndirect(0);
} else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
// Treat an enum type as its underlying type.
Ty = EnumTy->getDecl()->getIntegerType();
} else if (Ty->isFloatingType()) {
// Floating-point types don't go inreg.
return ABIArgInfo::getDirect();
}
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// In the PNaCl ABI we always return records/structures on the stack.
if (isAggregateTypeForABI(RetTy))
return ABIArgInfo::getIndirect(0);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
/// IsX86_MMXType - Return true if this is an MMX type.
bool IsX86_MMXType(llvm::Type *IRType) {
// Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
IRType->getScalarSizeInBits() != 64;
}
static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
StringRef Constraint,
llvm::Type* Ty) {
if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
// Invalid MMX constraint
return nullptr;
}
return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
}
// No operation needed
return Ty;
}
/// Returns true if this type can be passed in SSE registers with the
/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
return true;
} else if (const VectorType *VT = Ty->getAs<VectorType>()) {
// vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
// registers specially.
unsigned VecSize = Context.getTypeSize(VT);
if (VecSize == 128 || VecSize == 256 || VecSize == 512)
return true;
}
return false;
}
/// Returns true if this aggregate is small enough to be passed in SSE registers
/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
return NumMembers <= 4;
}
//===----------------------------------------------------------------------===//
// X86-32 ABI Implementation
//===----------------------------------------------------------------------===//
/// \brief Similar to llvm::CCState, but for Clang.
struct CCState {
CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
unsigned CC;
unsigned FreeRegs;
unsigned FreeSSERegs;
};
/// X86_32ABIInfo - The X86-32 ABI information.
class X86_32ABIInfo : public ABIInfo {
enum Class {
Integer,
Float
};
static const unsigned MinABIStackAlignInBytes = 4;
bool IsDarwinVectorABI;
bool IsSmallStructInRegABI;
bool IsWin32StructABI;
unsigned DefaultNumRegisterParameters;
static bool isRegisterSize(unsigned Size) {
return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
}
bool isHomogeneousAggregateBaseType(QualType Ty) const override {
// FIXME: Assumes vectorcall is in use.
return isX86VectorTypeForVectorCall(getContext(), Ty);
}
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t NumMembers) const override {
// FIXME: Assumes vectorcall is in use.
return isX86VectorCallAggregateSmallEnough(NumMembers);
}
bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
ABIArgInfo getIndirectReturnResult(CCState &State) const;
/// \brief Return the alignment to use for the given type on the stack.
unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
Class classify(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
/// \brief Rewrite the function info so that all memory arguments use
/// inalloca.
void rewriteWithInAlloca(CGFunctionInfo &FI) const;
void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
unsigned &StackOffset, ABIArgInfo &Info,
QualType Type) const;
public:
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
unsigned r)
: ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
bool d, bool p, bool w, unsigned r)
:TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
static bool isStructReturnInRegABI(
const llvm::Triple &Triple, const CodeGenOptions &Opts);
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
// Darwin uses different dwarf register numbers for EH.
if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
return 4;
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
StringRef Constraint,
llvm::Type* Ty) const override {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
std::string &Constraints,
std::vector<llvm::Type *> &ResultRegTypes,
std::vector<llvm::Type *> &ResultTruncRegTypes,
std::vector<LValue> &ResultRegDests,
std::string &AsmString,
unsigned NumOutputs) const override;
llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
unsigned Sig = (0xeb << 0) | // jmp rel8
(0x06 << 8) | // .+0x08
('F' << 16) |
('T' << 24);
return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
}
};
}
/// Rewrite input constraint references after adding some output constraints.
/// In the case where there is one output and one input and we add one output,
/// we need to replace all operand references greater than or equal to 1:
/// mov $0, $1
/// mov eax, $1
/// The result will be:
/// mov $0, $2
/// mov eax, $2
static void rewriteInputConstraintReferences(unsigned FirstIn,
unsigned NumNewOuts,
std::string &AsmString) {
std::string Buf;
llvm::raw_string_ostream OS(Buf);
size_t Pos = 0;
while (Pos < AsmString.size()) {
size_t DollarStart = AsmString.find('$', Pos);
if (DollarStart == std::string::npos)
DollarStart = AsmString.size();
size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
if (DollarEnd == std::string::npos)
DollarEnd = AsmString.size();
OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
Pos = DollarEnd;
size_t NumDollars = DollarEnd - DollarStart;
if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
// We have an operand reference.
size_t DigitStart = Pos;
size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
if (DigitEnd == std::string::npos)
DigitEnd = AsmString.size();
StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
unsigned OperandIndex;
if (!OperandStr.getAsInteger(10, OperandIndex)) {
if (OperandIndex >= FirstIn)
OperandIndex += NumNewOuts;
OS << OperandIndex;
} else {
OS << OperandStr;
}
Pos = DigitEnd;
}
}
AsmString = std::move(OS.str());
}
/// Add output constraints for EAX:EDX because they are return registers.
void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
std::vector<llvm::Type *> &ResultRegTypes,
std::vector<llvm::Type *> &ResultTruncRegTypes,
std::vector<LValue> &ResultRegDests, std::string &AsmString,
unsigned NumOutputs) const {
uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
// Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
// larger.
if (!Constraints.empty())
Constraints += ',';
if (RetWidth <= 32) {
Constraints += "={eax}";
ResultRegTypes.push_back(CGF.Int32Ty);
} else {
// Use the 'A' constraint for EAX:EDX.
Constraints += "=A";
ResultRegTypes.push_back(CGF.Int64Ty);
}
// Truncate EAX or EAX:EDX to an integer of the appropriate size.
llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
ResultTruncRegTypes.push_back(CoerceTy);
// Coerce the integer by bitcasting the return slot pointer.
ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
CoerceTy->getPointerTo()));
ResultRegDests.push_back(ReturnSlot);
rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
}
/// shouldReturnTypeInRegister - Determine if the given type should be
/// passed in a register (for the Darwin ABI).
bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
ASTContext &Context) const {
uint64_t Size = Context.getTypeSize(Ty);
// Type must be register sized.
if (!isRegisterSize(Size))
return false;
if (Ty->isVectorType()) {
// 64- and 128- bit vectors inside structures are not returned in
// registers.
if (Size == 64 || Size == 128)
return false;
return true;
}
// If this is a builtin, pointer, enum, complex type, member pointer, or
// member function pointer it is ok.
if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
Ty->isAnyComplexType() || Ty->isEnumeralType() ||
Ty->isBlockPointerType() || Ty->isMemberPointerType())
return true;
// Arrays are treated like records.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
return shouldReturnTypeInRegister(AT->getElementType(), Context);
// Otherwise, it must be a record type.
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT) return false;
// FIXME: Traverse bases here too.
// Structure types are passed in register if all fields would be
// passed in a register.
for (const auto *FD : RT->getDecl()->fields()) {
// Empty fields are ignored.
if (isEmptyField(Context, FD, true))
continue;
// Check fields recursively.
if (!shouldReturnTypeInRegister(FD->getType(), Context))
return false;
}
return true;
}
ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
if (State.FreeRegs) {
--State.FreeRegs;
return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
}
return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
}
ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
CCState &State) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
const Type *Base = nullptr;
uint64_t NumElts = 0;
if (State.CC == llvm::CallingConv::X86_VectorCall &&
isHomogeneousAggregate(RetTy, Base, NumElts)) {
// The LLVM struct type for such an aggregate should lower properly.
return ABIArgInfo::getDirect();
}
if (const VectorType *VT = RetTy->getAs<VectorType>()) {
// On Darwin, some vectors are returned in registers.
if (IsDarwinVectorABI) {
uint64_t Size = getContext().getTypeSize(RetTy);
// 128-bit vectors are a special case; they are returned in
// registers and we need to make sure to pick a type the LLVM
// backend will like.
if (Size == 128)
return ABIArgInfo::getDirect(llvm::VectorType::get(
llvm::Type::getInt64Ty(getVMContext()), 2));
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
if ((Size == 8 || Size == 16 || Size == 32) ||
(Size == 64 && VT->getNumElements() == 1))
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
return getIndirectReturnResult(State);
}
return ABIArgInfo::getDirect();
}
if (isAggregateTypeForABI(RetTy)) {
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
// Structures with flexible arrays are always indirect.
if (RT->getDecl()->hasFlexibleArrayMember())
return getIndirectReturnResult(State);
}
// If specified, structs and unions are always indirect.
if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
return getIndirectReturnResult(State);
// Small structures which are register sized are generally returned
// in a register.
if (shouldReturnTypeInRegister(RetTy, getContext())) {
uint64_t Size = getContext().getTypeSize(RetTy);
// As a special-case, if the struct is a "single-element" struct, and
// the field is of type "float" or "double", return it in a
// floating-point register. (MSVC does not apply this special case.)
// We apply a similar transformation for pointer types to improve the
// quality of the generated IR.
if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
|| SeltTy->hasPointerRepresentation())
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
// FIXME: We should be able to narrow this integer in cases with dead
// padding.
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
}
return getIndirectReturnResult(State);
}
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
}
static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return 0;
const RecordDecl *RD = RT->getDecl();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const auto &I : CXXRD->bases())
if (!isRecordWithSSEVectorType(Context, I.getType()))
return false;
for (const auto *i : RD->fields()) {
QualType FT = i->getType();
if (isSSEVectorType(Context, FT))
return true;
if (isRecordWithSSEVectorType(Context, FT))
return true;
}
return false;
}
unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
unsigned Align) const {
// Otherwise, if the alignment is less than or equal to the minimum ABI
// alignment, just use the default; the backend will handle this.
if (Align <= MinABIStackAlignInBytes)
return 0; // Use default alignment.
// On non-Darwin, the stack type alignment is always 4.
if (!IsDarwinVectorABI) {
// Set explicit alignment, since we may need to realign the top.
return MinABIStackAlignInBytes;
}
// Otherwise, if the type contains an SSE vector type, the alignment is 16.
if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
isRecordWithSSEVectorType(getContext(), Ty)))
return 16;
return MinABIStackAlignInBytes;
}
ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
CCState &State) const {
if (!ByVal) {
if (State.FreeRegs) {
--State.FreeRegs; // Non-byval indirects just use one pointer.
return ABIArgInfo::getIndirectInReg(0, false);
}
return ABIArgInfo::getIndirect(0, false);
}
// Compute the byval alignment.
unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
if (StackAlign == 0)
return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
// If the stack alignment is less than the type alignment, realign the
// argument.
bool Realign = TypeAlign > StackAlign;
return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
}
X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
const Type *T = isSingleElementStruct(Ty, getContext());
if (!T)
T = Ty.getTypePtr();
if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
BuiltinType::Kind K = BT->getKind();
if (K == BuiltinType::Float || K == BuiltinType::Double)
return Float;
}
return Integer;
}
bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
bool &NeedsPadding) const {
NeedsPadding = false;
Class C = classify(Ty);
if (C == Float)
return false;
unsigned Size = getContext().getTypeSize(Ty);
unsigned SizeInRegs = (Size + 31) / 32;
if (SizeInRegs == 0)
return false;
if (SizeInRegs > State.FreeRegs) {
State.FreeRegs = 0;
return false;
}
State.FreeRegs -= SizeInRegs;
if (State.CC == llvm::CallingConv::X86_FastCall ||
State.CC == llvm::CallingConv::X86_VectorCall) {
if (Size > 32)
return false;
if (Ty->isIntegralOrEnumerationType())
return true;
if (Ty->isPointerType())
return true;
if (Ty->isReferenceType())
return true;
if (State.FreeRegs)
NeedsPadding = true;
return false;
}
return true;
}
ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
CCState &State) const {
// FIXME: Set alignment on indirect arguments.
Ty = useFirstFieldIfTransparentUnion(Ty);
// Check with the C++ ABI first.
const RecordType *RT = Ty->getAs<RecordType>();
if (RT) {
CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
if (RAA == CGCXXABI::RAA_Indirect) {
return getIndirectResult(Ty, false, State);
} else if (RAA == CGCXXABI::RAA_DirectInMemory) {
// The field index doesn't matter, we'll fix it up later.
return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
}
}
// vectorcall adds the concept of a homogenous vector aggregate, similar
// to other targets.
const Type *Base = nullptr;
uint64_t NumElts = 0;
if (State.CC == llvm::CallingConv::X86_VectorCall &&
isHomogeneousAggregate(Ty, Base, NumElts)) {
if (State.FreeSSERegs >= NumElts) {
State.FreeSSERegs -= NumElts;
if (Ty->isBuiltinType() || Ty->isVectorType())
return ABIArgInfo::getDirect();
return ABIArgInfo::getExpand();
}
return getIndirectResult(Ty, /*ByVal=*/false, State);
}
if (isAggregateTypeForABI(Ty)) {
if (RT) {
// Structs are always byval on win32, regardless of what they contain.
if (IsWin32StructABI)
return getIndirectResult(Ty, true, State);
// Structures with flexible arrays are always indirect.
if (RT->getDecl()->hasFlexibleArrayMember())
return getIndirectResult(Ty, true, State);
}
// Ignore empty structs/unions.
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
llvm::LLVMContext &LLVMContext = getVMContext();
llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
bool NeedsPadding;
if (shouldUseInReg(Ty, State, NeedsPadding)) {
unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
return ABIArgInfo::getDirectInReg(Result);
}
llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
// Expand small (<= 128-bit) record types when we know that the stack layout
// of those arguments will match the struct. This is important because the
// LLVM backend isn't smart enough to remove byval, which inhibits many
// optimizations.
if (getContext().getTypeSize(Ty) <= 4*32 &&
canExpandIndirectArgument(Ty, getContext()))
return ABIArgInfo::getExpandWithPadding(
State.CC == llvm::CallingConv::X86_FastCall ||
State.CC == llvm::CallingConv::X86_VectorCall,
PaddingType);
return getIndirectResult(Ty, true, State);
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
// On Darwin, some vectors are passed in memory, we handle this by passing
// it as an i8/i16/i32/i64.
if (IsDarwinVectorABI) {
uint64_t Size = getContext().getTypeSize(Ty);
if ((Size == 8 || Size == 16 || Size == 32) ||
(Size == 64 && VT->getNumElements() == 1))
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
}
if (IsX86_MMXType(CGT.ConvertType(Ty)))
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
return ABIArgInfo::getDirect();
}
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
bool NeedsPadding;
bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
if (Ty->isPromotableIntegerType()) {
if (InReg)
return ABIArgInfo::getExtendInReg();
return ABIArgInfo::getExtend();
}
if (InReg)
return ABIArgInfo::getDirectInReg();
return ABIArgInfo::getDirect();
}
void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
CCState State(FI.getCallingConvention());
if (State.CC == llvm::CallingConv::X86_FastCall)
State.FreeRegs = 2;
else if (State.CC == llvm::CallingConv::X86_VectorCall) {
State.FreeRegs = 2;
State.FreeSSERegs = 6;
} else if (FI.getHasRegParm())
State.FreeRegs = FI.getRegParm();
else
State.FreeRegs = DefaultNumRegisterParameters;
if (!getCXXABI().classifyReturnType(FI)) {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
} else if (FI.getReturnInfo().isIndirect()) {
// The C++ ABI is not aware of register usage, so we have to check if the
// return value was sret and put it in a register ourselves if appropriate.
if (State.FreeRegs) {
--State.FreeRegs; // The sret parameter consumes a register.
FI.getReturnInfo().setInReg(true);
}
}
// The chain argument effectively gives us another free register.
if (FI.isChainCall())
++State.FreeRegs;
bool UsedInAlloca = false;
for (auto &I : FI.arguments()) {
I.info = classifyArgumentType(I.type, State);
UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
}
// If we needed to use inalloca for any argument, do a second pass and rewrite
// all the memory arguments to use inalloca.
if (UsedInAlloca)
rewriteWithInAlloca(FI);
}
void
X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
unsigned &StackOffset,
ABIArgInfo &Info, QualType Type) const {
assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
Info = ABIArgInfo::getInAlloca(FrameFields.size());
FrameFields.push_back(CGT.ConvertTypeForMem(Type));
StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
// Insert padding bytes to respect alignment. For x86_32, each argument is 4
// byte aligned.
if (StackOffset % 4U) {
unsigned OldOffset = StackOffset;
StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
unsigned NumBytes = StackOffset - OldOffset;
assert(NumBytes);
llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
Ty = llvm::ArrayType::get(Ty, NumBytes);
FrameFields.push_back(Ty);
}
}
static bool isArgInAlloca(const ABIArgInfo &Info) {
// Leave ignored and inreg arguments alone.
switch (Info.getKind()) {
case ABIArgInfo::InAlloca:
return true;
case ABIArgInfo::Indirect:
assert(Info.getIndirectByVal());
return true;
case ABIArgInfo::Ignore:
return false;
case ABIArgInfo::Direct:
case ABIArgInfo::Extend:
case ABIArgInfo::Expand:
if (Info.getInReg())
return false;
return true;
}
llvm_unreachable("invalid enum");
}
void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
assert(IsWin32StructABI && "inalloca only supported on win32");
// Build a packed struct type for all of the arguments in memory.
SmallVector<llvm::Type *, 6> FrameFields;
unsigned StackOffset = 0;
CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
// Put 'this' into the struct before 'sret', if necessary.
bool IsThisCall =
FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
ABIArgInfo &Ret = FI.getReturnInfo();
if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
isArgInAlloca(I->info)) {
addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
++I;
}
// Put the sret parameter into the inalloca struct if it's in memory.
if (Ret.isIndirect() && !Ret.getInReg()) {
CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
// On Windows, the hidden sret parameter is always returned in eax.
Ret.setInAllocaSRet(IsWin32StructABI);
}
// Skip the 'this' parameter in ecx.
if (IsThisCall)
++I;
// Put arguments passed in memory into the struct.
for (; I != E; ++I) {
if (isArgInAlloca(I->info))
addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
}
FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
/*isPacked=*/true));
}
llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
// Compute if the address needs to be aligned
unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
Align = getTypeStackAlignInBytes(Ty, Align);
Align = std::max(Align, 4U);
if (Align > 4) {
// addr = (addr + align - 1) & -align;
llvm::Value *Offset =
llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
Addr = CGF.Builder.CreateGEP(Addr, Offset);
llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
CGF.Int32Ty);
llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
Addr->getType(),
"ap.cur.aligned");
}
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
}
bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
const llvm::Triple &Triple, const CodeGenOptions &Opts) {
assert(Triple.getArch() == llvm::Triple::x86);
switch (Opts.getStructReturnConvention()) {
case CodeGenOptions::SRCK_Default:
break;
case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
return false;
case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
return true;
}
if (Triple.isOSDarwin())
return true;
switch (Triple.getOS()) {
case llvm::Triple::DragonFly:
case llvm::Triple::FreeBSD:
case llvm::Triple::OpenBSD:
case llvm::Triple::Bitrig:
case llvm::Triple::Win32:
return true;
default:
return false;
}
}
void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
// Get the LLVM function.
llvm::Function *Fn = cast<llvm::Function>(GV);
// Now add the 'alignstack' attribute with a value of 16.
llvm::AttrBuilder B;
B.addStackAlignmentAttr(16);
Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
llvm::AttributeSet::get(CGM.getLLVMContext(),
llvm::AttributeSet::FunctionIndex,
B));
}
}
}
bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-7 are the eight integer registers; the order is different
// on Darwin (for EH), but the range is the same.
// 8 is %eip.
AssignToArrayRange(Builder, Address, Four8, 0, 8);
if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
// 12-16 are st(0..4). Not sure why we stop at 4.
// These have size 16, which is sizeof(long double) on
// platforms with 8-byte alignment for that type.
llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
} else {
// 9 is %eflags, which doesn't get a size on Darwin for some
// reason.
Builder.CreateStore(
Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9));
// 11-16 are st(0..5). Not sure why we stop at 5.
// These have size 12, which is sizeof(long double) on
// platforms with 4-byte alignment for that type.
llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
}
return false;
}
//===----------------------------------------------------------------------===//
// X86-64 ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
/// The AVX ABI level for X86 targets.
enum class X86AVXABILevel {
None,
AVX,
AVX512
};
/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
switch (AVXLevel) {
case X86AVXABILevel::AVX512:
return 512;
case X86AVXABILevel::AVX:
return 256;
case X86AVXABILevel::None:
return 128;
}
llvm_unreachable("Unknown AVXLevel");
}
/// X86_64ABIInfo - The X86_64 ABI information.
class X86_64ABIInfo : public ABIInfo {
enum Class {
Integer = 0,
SSE,
SSEUp,
X87,
X87Up,
ComplexX87,
NoClass,
Memory
};
/// merge - Implement the X86_64 ABI merging algorithm.
///
/// Merge an accumulating classification \arg Accum with a field
/// classification \arg Field.
///
/// \param Accum - The accumulating classification. This should
/// always be either NoClass or the result of a previous merge
/// call. In addition, this should never be Memory (the caller
/// should just return Memory for the aggregate).
static Class merge(Class Accum, Class Field);
/// postMerge - Implement the X86_64 ABI post merging algorithm.
///
/// Post merger cleanup, reduces a malformed Hi and Lo pair to
/// final MEMORY or SSE classes when necessary.
///
/// \param AggregateSize - The size of the current aggregate in
/// the classification process.
///
/// \param Lo - The classification for the parts of the type
/// residing in the low word of the containing object.
///
/// \param Hi - The classification for the parts of the type
/// residing in the higher words of the containing object.
///
void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
/// classify - Determine the x86_64 register classes in which the
/// given type T should be passed.
///
/// \param Lo - The classification for the parts of the type
/// residing in the low word of the containing object.
///
/// \param Hi - The classification for the parts of the type
/// residing in the high word of the containing object.
///
/// \param OffsetBase - The bit offset of this type in the
/// containing object. Some parameters are classified different
/// depending on whether they straddle an eightbyte boundary.
///
/// \param isNamedArg - Whether the argument in question is a "named"
/// argument, as used in AMD64-ABI 3.5.7.
///
/// If a word is unused its result will be NoClass; if a type should
/// be passed in Memory then at least the classification of \arg Lo
/// will be Memory.
///
/// The \arg Lo class will be NoClass iff the argument is ignored.
///
/// If the \arg Lo class is ComplexX87, then the \arg Hi class will
/// also be ComplexX87.
void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
bool isNamedArg) const;
llvm::Type *GetByteVectorType(QualType Ty) const;
llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset, QualType SourceTy,
unsigned SourceOffset) const;
llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset, QualType SourceTy,
unsigned SourceOffset) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be returned in memory.
ABIArgInfo getIndirectReturnResult(QualType Ty) const;
/// getIndirectResult - Give a source type \arg Ty, return a suitable result
/// such that the argument will be passed in memory.
///
/// \param freeIntRegs - The number of free integer registers remaining
/// available.
ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty,
unsigned freeIntRegs,
unsigned &neededInt,
unsigned &neededSSE,
bool isNamedArg) const;
bool IsIllegalVectorType(QualType Ty) const;
/// The 0.98 ABI revision clarified a lot of ambiguities,
/// unfortunately in ways that were not always consistent with
/// certain previous compilers. In particular, platforms which
/// required strict binary compatibility with older versions of GCC
/// may need to exempt themselves.
bool honorsRevision0_98() const {
return !getTarget().getTriple().isOSDarwin();
}
X86AVXABILevel AVXLevel;
// Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
// 64-bit hardware.
bool Has64BitPointers;
public:
X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
ABIInfo(CGT), AVXLevel(AVXLevel),
Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
}
bool isPassedUsingAVXType(QualType type) const {
unsigned neededInt, neededSSE;
// The freeIntRegs argument doesn't matter here.
ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
/*isNamedArg*/true);
if (info.isDirect()) {
llvm::Type *ty = info.getCoerceToType();
if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
return (vectorTy->getBitWidth() > 128);
}
return false;
}
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
bool has64BitPointers() const {
return Has64BitPointers;
}
};
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
class WinX86_64ABIInfo : public ABIInfo {
ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
bool IsReturnType) const;
public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
bool isHomogeneousAggregateBaseType(QualType Ty) const override {
// FIXME: Assumes vectorcall is in use.
return isX86VectorTypeForVectorCall(getContext(), Ty);
}
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t NumMembers) const override {
// FIXME: Assumes vectorcall is in use.
return isX86VectorCallAggregateSmallEnough(NumMembers);
}
};
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
: TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
const X86_64ABIInfo &getABIInfo() const {
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 7;
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override {
llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
// 0-15 are the 16 integer registers.
// 16 is %rip.
AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
return false;
}
llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
StringRef Constraint,
llvm::Type* Ty) const override {
return X86AdjustInlineAsmType(CGF, Constraint, Ty);
}
bool isNoProtoCallVariadic(const CallArgList &args,
const FunctionNoProtoType *fnType) const override {
// The default CC on x86-64 sets %al to the number of SSA
// registers used, and GCC sets this when calling an unprototyped
// function, so we override the default behavior. However, don't do
// that when AVX types are involved: the ABI explicitly states it is
// undefined, and it doesn't work in practice because of how the ABI
// defines varargs anyway.
if (fnType->getCallConv() == CC_C) {
bool HasAVXType = false;
for (CallArgList::const_iterator
it = args.begin(), ie = args.end(); it != ie; ++it) {
if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
HasAVXType = true;
break;
}
}
if (!HasAVXType)
return true;
}
return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
}
llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
unsigned Sig;
if (getABIInfo().has64BitPointers())
Sig = (0xeb << 0) | // jmp rel8
(0x0a << 8) | // .+0x0c
('F' << 16) |
('T' << 24);
else
Sig = (0xeb << 0) | // jmp rel8
(0x06 << 8) | // .+0x08
('F' << 16) |
('T' << 24);
return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
}
};
class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
public:
PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
: X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
Opt = "\01";
Opt += Lib;
}
};
static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
// If the argument does not end in .lib, automatically add the suffix.
// If the argument contains a space, enclose it in quotes.
// This matches the behavior of MSVC.
bool Quote = (Lib.find(" ") != StringRef::npos);
std::string ArgStr = Quote ? "\"" : "";
ArgStr += Lib;
if (!Lib.endswith_lower(".lib"))
ArgStr += ".lib";
ArgStr += Quote ? "\"" : "";
return ArgStr;
}
class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
public:
WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
bool d, bool p, bool w, unsigned RegParms)
: X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
Opt = "/DEFAULTLIB:";
Opt += qualifyWindowsLibrary(Lib);
}
void getDetectMismatchOption(llvm::StringRef Name,
llvm::StringRef Value,
llvm::SmallString<32> &Opt) const override {
Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
}
};
static void addStackProbeSizeTargetAttribute(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) {
if (isa<FunctionDecl>(D)) {
if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
llvm::Function *Fn = cast<llvm::Function>(GV);
Fn->addFnAttr("stack-probe-size",
llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
}
}
}
void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
addStackProbeSizeTargetAttribute(D, GV, CGM);
}
class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
X86AVXABILevel AVXLevel)
: TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 7;
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override {
llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
// 0-15 are the 16 integer registers.
// 16 is %rip.
AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
return false;
}
void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const override {
Opt = "/DEFAULTLIB:";
Opt += qualifyWindowsLibrary(Lib);
}
void getDetectMismatchOption(llvm::StringRef Name,
llvm::StringRef Value,
llvm::SmallString<32> &Opt) const override {
Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
}
};
void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
addStackProbeSizeTargetAttribute(D, GV, CGM);
}
}
void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
Class &Hi) const {
// AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
//
// (a) If one of the classes is Memory, the whole argument is passed in
// memory.
//
// (b) If X87UP is not preceded by X87, the whole argument is passed in
// memory.
//
// (c) If the size of the aggregate exceeds two eightbytes and the first
// eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
// argument is passed in memory. NOTE: This is necessary to keep the
// ABI working for processors that don't support the __m256 type.
//
// (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
//
// Some of these are enforced by the merging logic. Others can arise
// only with unions; for example:
// union { _Complex double; unsigned; }
//
// Note that clauses (b) and (c) were added in 0.98.
//
if (Hi == Memory)
Lo = Memory;
if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
Lo = Memory;
if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
Lo = Memory;
if (Hi == SSEUp && Lo != SSE)
Hi = SSE;
}
X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
// AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
// classified recursively so that always two fields are
// considered. The resulting class is calculated according to
// the classes of the fields in the eightbyte:
//
// (a) If both classes are equal, this is the resulting class.
//
// (b) If one of the classes is NO_CLASS, the resulting class is
// the other class.
//
// (c) If one of the classes is MEMORY, the result is the MEMORY
// class.
//
// (d) If one of the classes is INTEGER, the result is the
// INTEGER.
//
// (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class.
//
// (f) Otherwise class SSE is used.
// Accum should never be memory (we should have returned) or
// ComplexX87 (because this cannot be passed in a structure).
assert((Accum != Memory && Accum != ComplexX87) &&
"Invalid accumulated classification during merge.");
if (Accum == Field || Field == NoClass)
return Accum;
if (Field == Memory)
return Memory;
if (Accum == NoClass)
return Field;
if (Accum == Integer || Field == Integer)
return Integer;
if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
Accum == X87 || Accum == X87Up)
return Memory;
return SSE;
}
void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Class &Lo, Class &Hi, bool isNamedArg) const {
// FIXME: This code can be simplified by introducing a simple value class for
// Class pairs with appropriate constructor methods for the various
// situations.
// FIXME: Some of the split computations are wrong; unaligned vectors
// shouldn't be passed in registers for example, so there is no chance they
// can straddle an eightbyte. Verify & simplify.
Lo = Hi = NoClass;
Class &Current = OffsetBase < 64 ? Lo : Hi;
Current = Memory;
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
BuiltinType::Kind k = BT->getKind();
if (k == BuiltinType::Void) {
Current = NoClass;
} else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
Lo = Integer;
Hi = Integer;
} else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
Current = Integer;
} else if (k == BuiltinType::Float || k == BuiltinType::Double) {
Current = SSE;
} else if (k == BuiltinType::LongDouble) {
const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
if (LDF == &llvm::APFloat::IEEEquad) {
Lo = SSE;
Hi = SSEUp;
} else if (LDF == &llvm::APFloat::x87DoubleExtended) {
Lo = X87;
Hi = X87Up;
} else if (LDF == &llvm::APFloat::IEEEdouble) {
Current = SSE;
} else
llvm_unreachable("unexpected long double representation!");
}
// FIXME: _Decimal32 and _Decimal64 are SSE.
// FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
return;
}
if (const EnumType *ET = Ty->getAs<EnumType>()) {
// Classify the underlying integer type.
classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
return;
}
if (Ty->hasPointerRepresentation()) {
Current = Integer;
return;
}
if (Ty->isMemberPointerType()) {
if (Ty->isMemberFunctionPointerType()) {
if (Has64BitPointers) {
// If Has64BitPointers, this is an {i64, i64}, so classify both
// Lo and Hi now.
Lo = Hi = Integer;
} else {
// Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
// straddles an eightbyte boundary, Hi should be classified as well.
uint64_t EB_FuncPtr = (OffsetBase) / 64;
uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
if (EB_FuncPtr != EB_ThisAdj) {
Lo = Hi = Integer;
} else {
Current = Integer;
}
}
} else {
Current = Integer;
}
return;
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
uint64_t Size = getContext().getTypeSize(VT);
if (Size == 32) {
// gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
// float> as integer.
Current = Integer;
// If this type crosses an eightbyte boundary, it should be
// split.
uint64_t EB_Real = (OffsetBase) / 64;
uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
if (EB_Real != EB_Imag)
Hi = Lo;
} else if (Size == 64) {
// gcc passes <1 x double> in memory. :(
if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
return;
// gcc passes <1 x long long> as INTEGER.
if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
Current = Integer;
else
Current = SSE;
// If this type crosses an eightbyte boundary, it should be
// split.
if (OffsetBase && OffsetBase != 64)
Hi = Lo;
} else if (Size == 128 ||
(isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
// Arguments of 256-bits are split into four eightbyte chunks. The
// least significant one belongs to class SSE and all the others to class
// SSEUP. The original Lo and Hi design considers that types can't be
// greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
// This design isn't correct for 256-bits, but since there're no cases
// where the upper parts would need to be inspected, avoid adding
// complexity and just consider Hi to match the 64-256 part.
//
// Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
// registers if they are "named", i.e. not part of the "..." of a
// variadic function.
//
// Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
// split into eight eightbyte chunks, one SSE and seven SSEUP.
Lo = SSE;
Hi = SSEUp;
}
return;
}
if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
QualType ET = getContext().getCanonicalType(CT->getElementType());
uint64_t Size = getContext().getTypeSize(Ty);
if (ET->isIntegralOrEnumerationType()) {
if (Size <= 64)
Current = Integer;
else if (Size <= 128)
Lo = Hi = Integer;
} else if (ET == getContext().FloatTy) {
Current = SSE;
} else if (ET == getContext().DoubleTy) {
Lo = Hi = SSE;
} else if (ET == getContext().LongDoubleTy) {
const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
if (LDF == &llvm::APFloat::IEEEquad)
Current = Memory;
else if (LDF == &llvm::APFloat::x87DoubleExtended)
Current = ComplexX87;
else if (LDF == &llvm::APFloat::IEEEdouble)
Lo = Hi = SSE;
else
llvm_unreachable("unexpected long double representation!");
}
// If this complex type crosses an eightbyte boundary then it
// should be split.
uint64_t EB_Real = (OffsetBase) / 64;
uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
if (Hi == NoClass && EB_Real != EB_Imag)
Hi = Lo;
return;
}
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
// Arrays are treated like structures.
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
// than four eightbytes, ..., it has class MEMORY.
if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
// fields, it has class MEMORY.
//
// Only need to check alignment of array base.
if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
return;
// Otherwise implement simplified merge. We could be smarter about
// this, but it isn't worth it and would be harder to verify.
Current = NoClass;
uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
uint64_t ArraySize = AT->getSize().getZExtValue();
// The only case a 256-bit wide vector could be used is when the array
// contains a single 256-bit element. Since Lo and Hi logic isn't extended
// to work for sizes wider than 128, early check and fallback to memory.
if (Size > 128 && EltSize != 256)
return;
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
Class FieldLo, FieldHi;
classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
break;
}
postMerge(Size, Lo, Hi);
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
return;
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
// than four eightbytes, ..., it has class MEMORY.
if (Size > 256)
return;
// AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
// copy constructor or a non-trivial destructor, it is passed by invisible
// reference.
if (getRecordArgABI(RT, getCXXABI()))
return;
const RecordDecl *RD = RT->getDecl();
// Assume variable sized types are passed in memory.
if (RD->hasFlexibleArrayMember())
return;
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
// Reset Lo class, this will be recomputed.
Current = NoClass;
// If this is a C++ record, classify the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CXXRD->bases()) {
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// Classify this field.
//
// AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
// single eightbyte, each is classified separately. Each eightbyte gets
// initialized to class NO_CLASS.
Class FieldLo, FieldHi;
uint64_t Offset =
OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory) {
postMerge(Size, Lo, Hi);
return;
}
}
}
// Classify the fields one at a time, merging the results.
unsigned idx = 0;
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
bool BitField = i->isBitField();
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
// four eightbytes, or it contains unaligned fields, it has class MEMORY.
//
// The only case a 256-bit wide vector could be used is when the struct
// contains a single 256-bit element. Since Lo and Hi logic isn't extended
// to work for sizes wider than 128, early check and fallback to memory.
//
if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
Lo = Memory;
postMerge(Size, Lo, Hi);
return;
}
// Note, skip this test for bit-fields, see below.
if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
Lo = Memory;
postMerge(Size, Lo, Hi);
return;
}
// Classify this field.
//
// AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
// exceeds a single eightbyte, each is classified
// separately. Each eightbyte gets initialized to class
// NO_CLASS.
Class FieldLo, FieldHi;
// Bit-fields require special handling, they do not force the
// structure to be passed in memory even if unaligned, and
// therefore they can straddle an eightbyte.
if (BitField) {
// Ignore padding bit-fields.
if (i->isUnnamedBitfield())
continue;
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
uint64_t Size = i->getBitWidthValue(getContext());
uint64_t EB_Lo = Offset / 64;
uint64_t EB_Hi = (Offset + Size - 1) / 64;
if (EB_Lo) {
assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
FieldLo = NoClass;
FieldHi = Integer;
} else {
FieldLo = Integer;
FieldHi = EB_Hi ? Integer : NoClass;
}
} else
classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
Lo = merge(Lo, FieldLo);
Hi = merge(Hi, FieldHi);
if (Lo == Memory || Hi == Memory)
break;
}
postMerge(Size, Lo, Hi);
}
}
ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
return ABIArgInfo::getIndirect(0);
}
bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
uint64_t Size = getContext().getTypeSize(VecTy);
unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
if (Size <= 64 || Size > LargestVector)
return true;
}
return false;
}
ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
unsigned freeIntRegs) const {
// If this is a scalar LLVM value then assume LLVM will pass it in the right
// place naturally.
//
// This assumption is optimistic, as there could be free registers available
// when we need to pass this argument in memory, and LLVM could try to pass
// the argument in the free register. This does not seem to happen currently,
// but this code would be much safer if we could mark the argument with
// 'onstack'. See PR12193.
if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
// Compute the byval alignment. We specify the alignment of the byval in all
// cases so that the mid-level optimizer knows the alignment of the byval.
unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
// Attempt to avoid passing indirect results using byval when possible. This
// is important for good codegen.
//
// We do this by coercing the value into a scalar type which the backend can
// handle naturally (i.e., without using byval).
//
// For simplicity, we currently only do this when we have exhausted all of the
// free integer registers. Doing this when there are free integer registers
// would require more care, as we would have to ensure that the coerced value
// did not claim the unused register. That would require either reording the
// arguments to the function (so that any subsequent inreg values came first),
// or only doing this optimization when there were no following arguments that
// might be inreg.
//
// We currently expect it to be rare (particularly in well written code) for
// arguments to be passed on the stack when there are still free integer
// registers available (this would typically imply large structs being passed
// by value), so this seems like a fair tradeoff for now.
//
// We can revisit this if the backend grows support for 'onstack' parameter
// attributes. See PR12193.
if (freeIntRegs == 0) {
uint64_t Size = getContext().getTypeSize(Ty);
// If this type fits in an eightbyte, coerce it into the matching integral
// type, which will end up on the stack (with alignment 8).
if (Align == 8 && Size <= 64)
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
}
return ABIArgInfo::getIndirect(Align);
}
/// The ABI specifies that a value should be passed in a full vector XMM/YMM
/// register. Pick an LLVM IR type that will be passed as a vector register.
llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
// Wrapper structs/arrays that only contain vectors are passed just like
// vectors; strip them off if present.
if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
Ty = QualType(InnerTy, 0);
llvm::Type *IRType = CGT.ConvertType(Ty);
if (isa<llvm::VectorType>(IRType) ||
IRType->getTypeID() == llvm::Type::FP128TyID)
return IRType;
// We couldn't find the preferred IR vector type for 'Ty'.
uint64_t Size = getContext().getTypeSize(Ty);
assert((Size == 128 || Size == 256) && "Invalid type found!");
// Return a LLVM IR vector type based on the size of 'Ty'.
return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
Size / 64);
}
/// BitsContainNoUserData - Return true if the specified [start,end) bit range
/// is known to either be off the end of the specified type or being in
/// alignment padding. The user type specified is known to be at most 128 bits
/// in size, and have passed through X86_64ABIInfo::classify with a successful
/// classification that put one of the two halves in the INTEGER class.
///
/// It is conservatively correct to return false.
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
unsigned EndBit, ASTContext &Context) {
// If the bytes being queried are off the end of the type, there is no user
// data hiding here. This handles analysis of builtins, vectors and other
// types that don't contain interesting padding.
unsigned TySize = (unsigned)Context.getTypeSize(Ty);
if (TySize <= StartBit)
return true;
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
// Check each element to see if the element overlaps with the queried range.
for (unsigned i = 0; i != NumElts; ++i) {
// If the element is after the span we care about, then we're done..
unsigned EltOffset = i*EltSize;
if (EltOffset >= EndBit) break;
unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
if (!BitsContainNoUserData(AT->getElementType(), EltStart,
EndBit-EltOffset, Context))
return false;
}
// If it overlaps no elements, then it is safe to process as padding.
return true;
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CXXRD->bases()) {
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// If the base is after the span we care about, ignore it.
unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
if (BaseOffset >= EndBit) continue;
unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
if (!BitsContainNoUserData(I.getType(), BaseStart,
EndBit-BaseOffset, Context))
return false;
}
}
// Verify that no field has data that overlaps the region of interest. Yes
// this could be sped up a lot by being smarter about queried fields,
// however we're only looking at structs up to 16 bytes, so we don't care
// much.
unsigned idx = 0;
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
// If we found a field after the region we care about, then we're done.
if (FieldOffset >= EndBit) break;
unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
Context))
return false;
}
// If nothing in this record overlapped the area of interest, then we're
// clean.
return true;
}
return false;
}
/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
/// float member at the specified offset. For example, {int,{float}} has a
/// float at offset 4. It is conservatively correct for this routine to return
/// false.
static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
const llvm::DataLayout &TD) {
// Base case if we find a float.
if (IROffset == 0 && IRType->isFloatTy())
return true;
// If this is a struct, recurse into the field at the specified offset.
if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
const llvm::StructLayout *SL = TD.getStructLayout(STy);
unsigned Elt = SL->getElementContainingOffset(IROffset);
IROffset -= SL->getElementOffset(Elt);
return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
}
// If this is an array, recurse into the field at the specified offset.
if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = TD.getTypeAllocSize(EltTy);
IROffset -= IROffset/EltSize*EltSize;
return ContainsFloatAtOffset(EltTy, IROffset, TD);
}
return false;
}
/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
/// low 8 bytes of an XMM register, corresponding to the SSE class.
llvm::Type *X86_64ABIInfo::
GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// The only three choices we have are either double, <2 x float>, or float. We
// pass as float if the last 4 bytes is just padding. This happens for
// structs that contain 3 floats.
if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
SourceOffset*8+64, getContext()))
return llvm::Type::getFloatTy(getVMContext());
// We want to pass as <2 x float> if the LLVM IR type contains a float at
// offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
// case.
if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
return llvm::Type::getDoubleTy(getVMContext());
}
/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
/// an 8-byte GPR. This means that we either have a scalar or we are talking
/// about the high or low part of an up-to-16-byte struct. This routine picks
/// the best LLVM IR type to represent this, which may be i64 or may be anything
/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
/// etc).
///
/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
/// the source type. IROffset is an offset in bytes into the LLVM IR type that
/// the 8-byte value references. PrefType may be null.
///
/// SourceTy is the source-level type for the entire argument. SourceOffset is
/// an offset into this that we're processing (which is always either 0 or 8).
///
llvm::Type *X86_64ABIInfo::
GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
QualType SourceTy, unsigned SourceOffset) const {
// If we're dealing with an un-offset LLVM IR type, then it means that we're
// returning an 8-byte unit starting with it. See if we can safely use it.
if (IROffset == 0) {
// Pointers and int64's always fill the 8-byte unit.
if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
IRType->isIntegerTy(64))
return IRType;
// If we have a 1/2/4-byte integer, we can use it only if the rest of the
// goodness in the source type is just tail padding. This is allowed to
// kick in for struct {double,int} on the int, but not on
// struct{double,int,int} because we wouldn't return the second int. We
// have to do this analysis on the source type because we can't depend on
// unions being lowered a specific way etc.
if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
IRType->isIntegerTy(32) ||
(isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
cast<llvm::IntegerType>(IRType)->getBitWidth();
if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
SourceOffset*8+64, getContext()))
return IRType;
}
}
if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
// If this is a struct, recurse into the field at the specified offset.
const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
if (IROffset < SL->getSizeInBytes()) {
unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
IROffset -= SL->getElementOffset(FieldIdx);
return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
SourceTy, SourceOffset);
}
}
if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
llvm::Type *EltTy = ATy->getElementType();
unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
unsigned EltOffset = IROffset/EltSize*EltSize;
return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
SourceOffset);
}
// Okay, we don't have any better idea of what to pass, so we pass this in an
// integer register that isn't too big to fit the rest of the struct.
unsigned TySizeInBytes =
(unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
assert(TySizeInBytes != SourceOffset && "Empty field?");
// It is always safe to classify this as an integer type up to i64 that
// isn't larger than the structure.
return llvm::IntegerType::get(getVMContext(),
std::min(TySizeInBytes-SourceOffset, 8U)*8);
}
/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
/// be used as elements of a two register pair to pass or return, return a
/// first class aggregate to represent them. For example, if the low part of
/// a by-value argument should be passed as i32* and the high part as float,
/// return {i32*, float}.
static llvm::Type *
GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
const llvm::DataLayout &TD) {
// In order to correctly satisfy the ABI, we need to the high part to start
// at offset 8. If the high and low parts we inferred are both 4-byte types
// (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
// the second element at offset 8. Check for this:
unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
unsigned HiAlign = TD.getABITypeAlignment(Hi);
unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
// To handle this, we have to increase the size of the low part so that the
// second element will start at an 8 byte offset. We can't increase the size
// of the second element because it might make us access off the end of the
// struct.
if (HiStart != 8) {
// There are usually two sorts of types the ABI generation code can produce
// for the low part of a pair that aren't 8 bytes in size: float or
// i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
// NaCl).
// Promote these to a larger type.
if (Lo->isFloatTy())
Lo = llvm::Type::getDoubleTy(Lo->getContext());
else {
assert((Lo->isIntegerTy() || Lo->isPointerTy())
&& "Invalid/unknown lo type");
Lo = llvm::Type::getInt64Ty(Lo->getContext());
}
}
llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
// Verify that the second element is at an 8-byte offset.
assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
"Invalid x86-64 argument pair!");
return Result;
}
ABIArgInfo X86_64ABIInfo::
classifyReturnType(QualType RetTy) const {
// AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
// classification algorithm.
X86_64ABIInfo::Class Lo, Hi;
classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
// Check some invariants.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
llvm::Type *ResType = nullptr;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
return ABIArgInfo::getIgnore();
// If the low part is just padding, it takes no register, leave ResType
// null.
assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
"Unknown missing lo part");
break;
case SSEUp:
case X87Up:
llvm_unreachable("Invalid classification for lo word.");
// AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
// hidden argument.
case Memory:
return getIndirectReturnResult(RetTy);
// AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
// available register of the sequence %rax, %rdx is used.
case Integer:
ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
if (RetTy->isIntegralOrEnumerationType() &&
RetTy->isPromotableIntegerType())
return ABIArgInfo::getExtend();
}
break;
// AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
// available SSE register of the sequence %xmm0, %xmm1 is used.
case SSE:
ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
break;
// AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
// returned on the X87 stack in %st0 as 80-bit x87 number.
case X87:
ResType = llvm::Type::getX86_FP80Ty(getVMContext());
break;
// AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
// part of the value is returned in %st0 and the imaginary part in
// %st1.
case ComplexX87:
assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
llvm::Type::getX86_FP80Ty(getVMContext()),
nullptr);
break;
}
llvm::Type *HighPart = nullptr;
switch (Hi) {
// Memory was handled previously and X87 should
// never occur as a hi class.
case Memory:
case X87:
llvm_unreachable("Invalid classification for hi word.");
case ComplexX87: // Previously handled.
case NoClass:
break;
case Integer:
HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
case SSE:
HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
// AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
// is passed in the next available eightbyte chunk if the last used
// vector register.
//
// SSEUP should always be preceded by SSE, just widen.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification.");
ResType = GetByteVectorType(RetTy);
break;
// AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
// returned together with the previous X87 value in %st0.
case X87Up:
// If X87Up is preceded by X87, we don't need to do
// anything. However, in some cases with unions it may not be
// preceded by X87. In such situations we follow gcc and pass the
// extra bits in an SSE reg.
if (Lo != X87) {
HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
if (Lo == NoClass) // Return HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
}
break;
}
// If a high part was specified, merge it together with the low part. It is
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
return ABIArgInfo::getDirect(ResType);
}
ABIArgInfo X86_64ABIInfo::classifyArgumentType(
QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
bool isNamedArg)
const
{
Ty = useFirstFieldIfTransparentUnion(Ty);
X86_64ABIInfo::Class Lo, Hi;
classify(Ty, 0, Lo, Hi, isNamedArg);
// Check some invariants.
// FIXME: Enforce these by construction.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
neededInt = 0;
neededSSE = 0;
llvm::Type *ResType = nullptr;
switch (Lo) {
case NoClass:
if (Hi == NoClass)
return ABIArgInfo::getIgnore();
// If the low part is just padding, it takes no register, leave ResType
// null.
assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
"Unknown missing lo part");
break;
// AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
// on the stack.
case Memory:
// AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
// COMPLEX_X87, it is passed in memory.
case X87:
case ComplexX87:
if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
++neededInt;
return getIndirectResult(Ty, freeIntRegs);
case SSEUp:
case X87Up:
llvm_unreachable("Invalid classification for lo word.");
// AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
// available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
// and %r9 is used.
case Integer:
++neededInt;
// Pick an 8-byte type based on the preferred type.
ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
// If we have a sign or zero extended integer, make sure to return Extend
// so that the parameter gets the right LLVM IR attributes.
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
if (Ty->isIntegralOrEnumerationType() &&
Ty->isPromotableIntegerType())
return ABIArgInfo::getExtend();
}
break;
// AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
// available SSE register is used, the registers are taken in the
// order from %xmm0 to %xmm7.
case SSE: {
llvm::Type *IRType = CGT.ConvertType(Ty);
ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
++neededSSE;
break;
}
}
llvm::Type *HighPart = nullptr;
switch (Hi) {
// Memory was handled previously, ComplexX87 and X87 should
// never occur as hi classes, and X87Up must be preceded by X87,
// which is passed in memory.
case Memory:
case X87:
case ComplexX87:
llvm_unreachable("Invalid classification for hi word.");
case NoClass: break;
case Integer:
++neededInt;
// Pick an 8-byte type based on the preferred type.
HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
break;
// X87Up generally doesn't occur here (long double is passed in
// memory), except in situations involving unions.
case X87Up:
case SSE:
HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
return ABIArgInfo::getDirect(HighPart, 8);
++neededSSE;
break;
// AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
// eightbyte is passed in the upper half of the last used SSE
// register. This only happens when 128-bit vectors are passed.
case SSEUp:
assert(Lo == SSE && "Unexpected SSEUp classification");
ResType = GetByteVectorType(Ty);
break;
}
// If a high part was specified, merge it together with the low part. It is
// known to pass in the high eightbyte of the result. We do this by forming a
// first class struct aggregate with the high and low part: {low, high}
if (HighPart)
ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
return ABIArgInfo::getDirect(ResType);
}
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
// Keep track of the number of assigned registers.
unsigned freeIntRegs = 6, freeSSERegs = 8;
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
if (FI.getReturnInfo().isIndirect())
--freeIntRegs;
// The chain argument effectively gives us another free register.
if (FI.isChainCall())
++freeIntRegs;
unsigned NumRequiredArgs = FI.getNumRequiredArgs();
// AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
// get assigned (in left-to-right order) for passing as follows...
unsigned ArgNo = 0;
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it, ++ArgNo) {
bool IsNamedArg = ArgNo < NumRequiredArgs;
unsigned neededInt, neededSSE;
it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
neededSSE, IsNamedArg);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
// stack. If registers have already been assigned for some
// eightbytes of such an argument, the assignments get reverted.
if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
freeIntRegs -= neededInt;
freeSSERegs -= neededSSE;
} else {
it->info = getIndirectResult(it->type, freeIntRegs);
}
}
}
static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
QualType Ty,
CodeGenFunction &CGF) {
llvm::Value *overflow_arg_area_p = CGF.Builder.CreateStructGEP(
nullptr, VAListAddr, 2, "overflow_arg_area_p");
llvm::Value *overflow_arg_area =
CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
// AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
// byte boundary if alignment needed by type exceeds 8 byte boundary.
// It isn't stated explicitly in the standard, but in practice we use
// alignment greater than 16 where necessary.
uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
if (Align > 8) {
// overflow_arg_area = (overflow_arg_area + align - 1) & -align;
llvm::Value *Offset =
llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
CGF.Int64Ty);
llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
overflow_arg_area =
CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
overflow_arg_area->getType(),
"overflow_arg_area.align");
}
// AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *Res =
CGF.Builder.CreateBitCast(overflow_arg_area,
llvm::PointerType::getUnqual(LTy));
// AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
// l->overflow_arg_area + sizeof(type).
// AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
// an 8 byte boundary.
uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
llvm::Value *Offset =
llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
"overflow_arg_area.next");
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
// AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
return Res;
}
llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
// i32 fp_offset;
// i8* overflow_arg_area;
// i8* reg_save_area;
// };
unsigned neededInt, neededSSE;
Ty = CGF.getContext().getCanonicalType(Ty);
ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
/*isNamedArg*/false);
// AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
// in the registers. If not go to step 7.
if (!neededInt && !neededSSE)
return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
// AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
// general purpose registers needed to pass type and num_fp to hold
// the number of floating point registers needed.
// AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
// registers. In the case: l->gp_offset > 48 - num_gp * 8 or
// l->fp_offset > 304 - num_fp * 16 go to step 7.
//
// NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
// register save space).
llvm::Value *InRegs = nullptr;
llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
if (neededInt) {
gp_offset_p =
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "gp_offset_p");
gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
}
if (neededSSE) {
fp_offset_p =
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 1, "fp_offset_p");
fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
llvm::Value *FitsInFP =
llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
}
llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
// Emit code to load the value if it was passed in registers.
CGF.EmitBlock(InRegBlock);
// AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
// an offset of l->gp_offset and/or l->fp_offset. This may require
// copying to a temporary location in case the parameter is passed
// in different register classes or requires an alignment greater
// than 8 for general purpose registers and 16 for XMM registers.
//
// FIXME: This really results in shameful code when we end up needing to
// collect arguments from different places; often what should result in a
// simple assembling of a structure from scattered addresses has many more
// loads than necessary. Can we clean this up?
llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *RegAddr = CGF.Builder.CreateLoad(
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3), "reg_save_area");
if (neededInt && neededSSE) {
// FIXME: Cleanup.
assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
llvm::Type *TyLo = ST->getElementType(0);
llvm::Type *TyHi = ST->getElementType(1);
assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
"Unexpected ABI info for mixed regs");
llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
llvm::Value *V =
CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
RegAddr = CGF.Builder.CreateBitCast(Tmp,
llvm::PointerType::getUnqual(LTy));
} else if (neededInt) {
RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
RegAddr = CGF.Builder.CreateBitCast(RegAddr,
llvm::PointerType::getUnqual(LTy));
// Copy to a temporary if necessary to ensure the appropriate alignment.
std::pair<CharUnits, CharUnits> SizeAlign =
CGF.getContext().getTypeInfoInChars(Ty);
uint64_t TySize = SizeAlign.first.getQuantity();
unsigned TyAlign = SizeAlign.second.getQuantity();
if (TyAlign > 8) {
llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
RegAddr = Tmp;
}
} else if (neededSSE == 1) {
RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
RegAddr = CGF.Builder.CreateBitCast(RegAddr,
llvm::PointerType::getUnqual(LTy));
} else {
assert(neededSSE == 2 && "Invalid number of needed registers!");
// SSE registers are spaced 16 bytes apart in the register save
// area, we need to collect the two eightbytes together.
llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
llvm::Type *DoubleTy = CGF.DoubleTy;
llvm::Type *DblPtrTy =
llvm::PointerType::getUnqual(DoubleTy);
llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
DblPtrTy));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
DblPtrTy));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
RegAddr = CGF.Builder.CreateBitCast(Tmp,
llvm::PointerType::getUnqual(LTy));
}
// AMD64-ABI 3.5.7p5: Step 5. Set:
// l->gp_offset = l->gp_offset + num_gp * 8
// l->fp_offset = l->fp_offset + num_fp * 16.
if (neededInt) {
llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
gp_offset_p);
}
if (neededSSE) {
llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
fp_offset_p);
}
CGF.EmitBranch(ContBlock);
// Emit code to load the value if it was passed in memory.
CGF.EmitBlock(InMemBlock);
llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
"vaarg.addr");
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(MemAddr, InMemBlock);
return ResAddr;
}
ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
bool IsReturnType) const {
if (Ty->isVoidType())
return ABIArgInfo::getIgnore();
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
TypeInfo Info = getContext().getTypeInfo(Ty);
uint64_t Width = Info.Width;
unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity();
const RecordType *RT = Ty->getAs<RecordType>();
if (RT) {
if (!IsReturnType) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
}
if (RT->getDecl()->hasFlexibleArrayMember())
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// FIXME: mingw-w64-gcc emits 128-bit struct as i128
if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Width));
}
// vectorcall adds the concept of a homogenous vector aggregate, similar to
// other targets.
const Type *Base = nullptr;
uint64_t NumElts = 0;
if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
if (FreeSSERegs >= NumElts) {
FreeSSERegs -= NumElts;
if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
return ABIArgInfo::getDirect();
return ABIArgInfo::getExpand();
}
return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
}
if (Ty->isMemberPointerType()) {
// If the member pointer is represented by an LLVM int or ptr, pass it
// directly.
llvm::Type *LLTy = CGT.ConvertType(Ty);
if (LLTy->isPointerTy() || LLTy->isIntegerTy())
return ABIArgInfo::getDirect();
}
if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
if (Width > 64 || !llvm::isPowerOf2_64(Width))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// Otherwise, coerce it to a small integer.
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
}
// Bool type is always extended to the ABI, other builtin types are not
// extended.
const BuiltinType *BT = Ty->getAs<BuiltinType>();
if (BT && BT->getKind() == BuiltinType::Bool)
return ABIArgInfo::getExtend();
return ABIArgInfo::getDirect();
}
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
bool IsVectorCall =
FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
// We can use up to 4 SSE return registers with vectorcall.
unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
// We can use up to 6 SSE register parameters with vectorcall.
FreeSSERegs = IsVectorCall ? 6 : 0;
for (auto &I : FI.arguments())
I.info = classify(I.type, FreeSSERegs, false);
}
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
}
// PowerPC-32
namespace {
/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
public:
PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
};
class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
public:
PPC32TargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
return 1; // r1 is the dedicated stack pointer
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
};
}
llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
QualType Ty,
CodeGenFunction &CGF) const {
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
// TODO: Implement this. For now ignore.
(void)CTy;
return nullptr;
}
bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
bool isInt =
Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
llvm::Type *CharPtr = CGF.Int8PtrTy;
llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
llvm::Value *FPRPtrAsInt =
Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
llvm::Value *OverflowAreaPtrAsInt =
Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
llvm::Value *OverflowAreaPtr =
Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
llvm::Value *RegsaveAreaPtrAsInt =
Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
llvm::Value *RegsaveAreaPtr =
Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
// Align GPR when TY is i64.
if (isI64) {
llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
}
llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
llvm::Value *OverflowArea =
Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
llvm::Value *OverflowAreaAsInt =
Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
llvm::Value *RegsaveArea =
Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
llvm::Value *RegsaveAreaAsInt =
Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
llvm::Value *CC =
Builder.CreateICmpULT(isInt ? GPR : FPR, Builder.getInt8(8), "cond");
llvm::Value *RegConstant =
Builder.CreateMul(isInt ? GPR : FPR, Builder.getInt8(isInt ? 4 : 8));
llvm::Value *OurReg = Builder.CreateAdd(
RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
if (Ty->isFloatingType())
OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
CGF.EmitBlock(UsingRegs);
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
// Increase the GPR/FPR indexes.
if (isInt) {
GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
Builder.CreateStore(GPR, GPRPtr);
} else {
FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
Builder.CreateStore(FPR, FPRPtr);
}
CGF.EmitBranch(Cont);
CGF.EmitBlock(UsingOverflow);
// Increase the overflow area.
llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
OverflowAreaAsInt =
Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr),
OverflowAreaPtr);
CGF.EmitBranch(Cont);
CGF.EmitBlock(Cont);
llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
Result->addIncoming(Result1, UsingRegs);
Result->addIncoming(Result2, UsingOverflow);
if (Ty->isAggregateType()) {
llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr");
return Builder.CreateLoad(AGGPtr, false, "aggr");
}
return Result;
}
bool
PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
// This is calculated from the LLVM and GCC tables and verified
// against gcc output. AFAIK all ABIs use the same encoding.
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::IntegerType *i8 = CGF.Int8Ty;
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
// 0-31: r0-31, the 4-byte general-purpose registers
AssignToArrayRange(Builder, Address, Four8, 0, 31);
// 32-63: fp0-31, the 8-byte floating-point registers
AssignToArrayRange(Builder, Address, Eight8, 32, 63);
// 64-76 are various 4-byte special-purpose registers:
// 64: mq
// 65: lr
// 66: ctr
// 67: ap
// 68-75 cr0-7
// 76: xer
AssignToArrayRange(Builder, Address, Four8, 64, 76);
// 77-108: v0-31, the 16-byte vector registers
AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
// 109: vrsave
// 110: vscr
// 111: spe_acc
// 112: spefscr
// 113: sfp
AssignToArrayRange(Builder, Address, Four8, 109, 113);
return false;
}
// PowerPC-64
namespace {
/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
public:
enum ABIKind {
ELFv1 = 0,
ELFv2
};
private:
static const unsigned GPRBits = 64;
ABIKind Kind;
bool HasQPX;
// A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
// will be passed in a QPX register.
bool IsQPXVectorTy(const Type *Ty) const {
if (!HasQPX)
return false;
if (const VectorType *VT = Ty->getAs<VectorType>()) {
unsigned NumElements = VT->getNumElements();
if (NumElements == 1)
return false;
if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
if (getContext().getTypeSize(Ty) <= 256)
return true;
} else if (VT->getElementType()->
isSpecificBuiltinType(BuiltinType::Float)) {
if (getContext().getTypeSize(Ty) <= 128)
return true;
}
}
return false;
}
bool IsQPXVectorTy(QualType Ty) const {
return IsQPXVectorTy(Ty.getTypePtr());
}
public:
PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
: DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
bool isPromotableTypeForABI(QualType Ty) const;
bool isAlignedParamType(QualType Ty, bool &Align32) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
// TODO: We can add more logic to computeInfo to improve performance.
// Example: For aggregate arguments that fit in a register, we could
// use getDirectInReg (as is done below for structs containing a single
// floating-point value) to avoid pushing them to memory on function
// entry. This would require changing the logic in PPCISelLowering
// when lowering the parameters in the caller and args in the callee.
void computeInfo(CGFunctionInfo &FI) const override {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments()) {
// We rely on the default argument classification for the most part.
// One exception: An aggregate containing a single floating-point
// or vector item must be passed in a register if one is available.
const Type *T = isSingleElementStruct(I.type, getContext());
if (T) {
const BuiltinType *BT = T->getAs<BuiltinType>();
if (IsQPXVectorTy(T) ||
(T->isVectorType() && getContext().getTypeSize(T) == 128) ||
(BT && BT->isFloatingPoint())) {
QualType QT(T, 0);
I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
continue;
}
}
I.info = classifyArgumentType(I.type);
}
}
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
};
class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
: TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
return 1; // r1 is the dedicated stack pointer
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
};
class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
public:
PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
return 1; // r1 is the dedicated stack pointer
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
};
}
// Return true if the ABI requires Ty to be passed sign- or zero-
// extended to 64 bits.
bool
PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (Ty->isPromotableIntegerType())
return true;
// In addition to the usual promotable integer types, we also need to
// extend all 32-bit types, since the ABI requires promotion to 64 bits.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Int:
case BuiltinType::UInt:
return true;
default:
break;
}
return false;
}
/// isAlignedParamType - Determine whether a type requires 16-byte
/// alignment in the parameter area.
bool
PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
Align32 = false;
// Complex types are passed just like their elements.
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
// Only vector types of size 16 bytes need alignment (larger types are
// passed via reference, smaller types are not aligned).
if (IsQPXVectorTy(Ty)) {
if (getContext().getTypeSize(Ty) > 128)
Align32 = true;
return true;
} else if (Ty->isVectorType()) {
return getContext().getTypeSize(Ty) == 128;
}
// For single-element float/vector structs, we consider the whole type
// to have the same alignment requirements as its single element.
const Type *AlignAsType = nullptr;
const Type *EltType = isSingleElementStruct(Ty, getContext());
if (EltType) {
const BuiltinType *BT = EltType->getAs<BuiltinType>();
if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
getContext().getTypeSize(EltType) == 128) ||
(BT && BT->isFloatingPoint()))
AlignAsType = EltType;
}
// Likewise for ELFv2 homogeneous aggregates.
const Type *Base = nullptr;
uint64_t Members = 0;
if (!AlignAsType && Kind == ELFv2 &&
isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
AlignAsType = Base;
// With special case aggregates, only vector base types need alignment.
if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
if (getContext().getTypeSize(AlignAsType) > 128)
Align32 = true;
return true;
} else if (AlignAsType) {
return AlignAsType->isVectorType();
}
// Otherwise, we only need alignment for any aggregate type that
// has an alignment requirement of >= 16 bytes.
if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
Align32 = true;
return true;
}
return false;
}
/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
/// aggregate. Base is set to the base element type, and Members is set
/// to the number of base elements.
bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const {
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
uint64_t NElements = AT->getSize().getZExtValue();
if (NElements == 0)
return false;
if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
return false;
Members *= NElements;
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return false;
Members = 0;
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CXXRD->bases()) {
// Ignore empty records.
if (isEmptyRecord(getContext(), I.getType(), true))
continue;
uint64_t FldMembers;
if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
return false;
Members += FldMembers;
}
}
for (const auto *FD : RD->fields()) {
// Ignore (non-zero arrays of) empty records.
QualType FT = FD->getType();
while (const ConstantArrayType *AT =
getContext().getAsConstantArrayType(FT)) {
if (AT->getSize().getZExtValue() == 0)
return false;
FT = AT->getElementType();
}
if (isEmptyRecord(getContext(), FT, true))
continue;
// For compatibility with GCC, ignore empty bitfields in C++ mode.
if (getContext().getLangOpts().CPlusPlus &&
FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
continue;
uint64_t FldMembers;
if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
return false;
Members = (RD->isUnion() ?
std::max(Members, FldMembers) : Members + FldMembers);
}
if (!Base)
return false;
// Ensure there is no padding.
if (getContext().getTypeSize(Base) * Members !=
getContext().getTypeSize(Ty))
return false;
} else {
Members = 1;
if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
Members = 2;
Ty = CT->getElementType();
}
// Most ABIs only support float, double, and some vector type widths.
if (!isHomogeneousAggregateBaseType(Ty))
return false;
// The base type must be the same for all members. Types that
// agree in both total size and mode (float vs. vector) are
// treated as being equivalent here.
const Type *TyPtr = Ty.getTypePtr();
if (!Base)
Base = TyPtr;
if (Base->isVectorType() != TyPtr->isVectorType() ||
getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
return false;
}
return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
}
bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
// Homogeneous aggregates for ELFv2 must have base types of float,
// double, long double, or 128-bit vectors.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::Float ||
BT->getKind() == BuiltinType::Double ||
BT->getKind() == BuiltinType::LongDouble)
return true;
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
return true;
}
return false;
}
bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
const Type *Base, uint64_t Members) const {
// Vector types require one register, floating point types require one
// or two registers depending on their size.
uint32_t NumRegs =
Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
// Homogeneous Aggregates may occupy at most 8 registers.
return Members * NumRegs <= 8;
}
ABIArgInfo
PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
if (Ty->isAnyComplexType())
return ABIArgInfo::getDirect();
// Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
// or via reference (larger than 16 bytes).
if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 128)
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
else if (Size < 128) {
llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
return ABIArgInfo::getDirect(CoerceTy);
}
}
if (isAggregateTypeForABI(Ty)) {
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
bool Align32;
uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ?
(Align32 ? 32 : 16) : 8;
uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
// ELFv2 homogeneous aggregates are passed as array types.
const Type *Base = nullptr;
uint64_t Members = 0;
if (Kind == ELFv2 &&
isHomogeneousAggregate(Ty, Base, Members)) {
llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
return ABIArgInfo::getDirect(CoerceTy);
}
// If an aggregate may end up fully in registers, we do not
// use the ByVal method, but pass the aggregate as array.
// This is usually beneficial since we avoid forcing the
// back-end to store the argument to memory.
uint64_t Bits = getContext().getTypeSize(Ty);
if (Bits > 0 && Bits <= 8 * GPRBits) {
llvm::Type *CoerceTy;
// Types up to 8 bytes are passed as integer type (which will be
// properly aligned in the argument save area doubleword).
if (Bits <= GPRBits)
CoerceTy = llvm::IntegerType::get(getVMContext(),
llvm::RoundUpToAlignment(Bits, 8));
// Larger types are passed as arrays, with the base type selected
// according to the required alignment in the save area.
else {
uint64_t RegBits = ABIAlign * 8;
uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
}
return ABIArgInfo::getDirect(CoerceTy);
}
// All other aggregates are passed ByVal.
return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
/*Realign=*/TyAlign > ABIAlign);
}
return (isPromotableTypeForABI(Ty) ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
ABIArgInfo
PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect();
// Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
// or via reference (larger than 16 bytes).
if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size > 128)
return ABIArgInfo::getIndirect(0);
else if (Size < 128) {
llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
return ABIArgInfo::getDirect(CoerceTy);
}
}
if (isAggregateTypeForABI(RetTy)) {
// ELFv2 homogeneous aggregates are returned as array types.
const Type *Base = nullptr;
uint64_t Members = 0;
if (Kind == ELFv2 &&
isHomogeneousAggregate(RetTy, Base, Members)) {
llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
return ABIArgInfo::getDirect(CoerceTy);
}
// ELFv2 small aggregates are returned in up to two registers.
uint64_t Bits = getContext().getTypeSize(RetTy);
if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
if (Bits == 0)
return ABIArgInfo::getIgnore();
llvm::Type *CoerceTy;
if (Bits > GPRBits) {
CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
} else
CoerceTy = llvm::IntegerType::get(getVMContext(),
llvm::RoundUpToAlignment(Bits, 8));
return ABIArgInfo::getDirect(CoerceTy);
}
// All other aggregates are returned indirectly.
return ABIArgInfo::getIndirect(0);
}
return (isPromotableTypeForABI(RetTy) ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BP = CGF.Int8PtrTy;
llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
// Handle types that require 16-byte alignment in the parameter save area.
bool Align32;
if (isAlignedParamType(Ty, Align32)) {
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt,
Builder.getInt64(Align32 ? 31 : 15));
AddrAsInt = Builder.CreateAnd(AddrAsInt,
Builder.getInt64(Align32 ? -32 : -16));
Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
}
// Update the va_list pointer. The pointer should be bumped by the
// size of the object. We can trust getTypeSize() except for a complex
// type whose base type is smaller than a doubleword. For these, the
// size of the object is 16 bytes; see below for further explanation.
unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
QualType BaseTy;
unsigned CplxBaseSize = 0;
if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
BaseTy = CTy->getElementType();
CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
if (CplxBaseSize < 8)
SizeInBytes = 16;
}
unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
// If we have a complex type and the base type is smaller than 8 bytes,
// the ABI calls for the real and imaginary parts to be right-adjusted
// in separate doublewords. However, Clang expects us to produce a
// pointer to a structure with the two parts packed tightly. So generate
// loads of the real and imaginary parts relative to the va_list pointer,
// and store them to a temporary structure.
if (CplxBaseSize && CplxBaseSize < 8) {
llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
llvm::Value *ImagAddr = RealAddr;
if (CGF.CGM.getDataLayout().isBigEndian()) {
RealAddr =
Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
ImagAddr =
Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
} else {
ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
}
llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
llvm::AllocaInst *Ptr =
CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), "vacplx");
llvm::Value *RealPtr =
Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0, ".real");
llvm::Value *ImagPtr =
Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1, ".imag");
Builder.CreateStore(Real, RealPtr, false);
Builder.CreateStore(Imag, ImagPtr, false);
return Ptr;
}
// If the argument is smaller than 8 bytes, it is right-adjusted in
// its doubleword slot. Adjust the pointer to pick it up from the
// correct offset.
if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
}
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
return Builder.CreateBitCast(Addr, PTy);
}
static bool
PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) {
// This is calculated from the LLVM and GCC tables and verified
// against gcc output. AFAIK all ABIs use the same encoding.
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::IntegerType *i8 = CGF.Int8Ty;
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
// 0-31: r0-31, the 8-byte general-purpose registers
AssignToArrayRange(Builder, Address, Eight8, 0, 31);
// 32-63: fp0-31, the 8-byte floating-point registers
AssignToArrayRange(Builder, Address, Eight8, 32, 63);
// 64-76 are various 4-byte special-purpose registers:
// 64: mq
// 65: lr
// 66: ctr
// 67: ap
// 68-75 cr0-7
// 76: xer
AssignToArrayRange(Builder, Address, Four8, 64, 76);
// 77-108: v0-31, the 16-byte vector registers
AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
// 109: vrsave
// 110: vscr
// 111: spe_acc
// 112: spefscr
// 113: sfp
AssignToArrayRange(Builder, Address, Four8, 109, 113);
return false;
}
bool
PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
return PPC64_initDwarfEHRegSizeTable(CGF, Address);
}
bool
PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
return PPC64_initDwarfEHRegSizeTable(CGF, Address);
}
//===----------------------------------------------------------------------===//
// AArch64 ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class AArch64ABIInfo : public ABIInfo {
public:
enum ABIKind {
AAPCS = 0,
DarwinPCS
};
private:
ABIKind Kind;
public:
AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
private:
ABIKind getABIKind() const { return Kind; }
bool isDarwinPCS() const { return Kind == DarwinPCS; }
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
bool isIllegalVectorType(QualType Ty) const;
void computeInfo(CGFunctionInfo &FI) const override {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &it : FI.arguments())
it.info = classifyArgumentType(it.type);
}
llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override {
return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
: EmitAAPCSVAArg(VAListAddr, Ty, CGF);
}
};
class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
public:
AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
: TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 31;
}
bool doesReturnSlotInterfereWithArgs() const override { return false; }
};
}
ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
// Handle illegal vector types here.
if (isIllegalVectorType(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 32) {
llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64) {
llvm::Type *ResType =
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
return ABIArgInfo::getDirect(ResType);
}
if (Size == 128) {
llvm::Type *ResType =
llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
return (Ty->isPromotableIntegerType() && isDarwinPCS()
? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
}
// Structures with either a non-trivial destructor or a non-trivial
// copy constructor are always indirect.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
CGCXXABI::RAA_DirectInMemory);
}
// Empty records are always ignored on Darwin, but actually passed in C++ mode
// elsewhere for GNU compatibility.
if (isEmptyRecord(getContext(), Ty, true)) {
if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
return ABIArgInfo::getIgnore();
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
}
// Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
const Type *Base = nullptr;
uint64_t Members = 0;
if (isHomogeneousAggregate(Ty, Base, Members)) {
return ABIArgInfo::getDirect(
llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
}
// Aggregates <= 16 bytes are passed directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 128) {
unsigned Alignment = getContext().getTypeAlign(Ty);
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
// We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
// For aggregates with 16-byte alignment, we use i128.
if (Alignment < 128 && Size == 128) {
llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
}
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
return ABIArgInfo::getIndirect(0);
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() && isDarwinPCS()
? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
}
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
const Type *Base = nullptr;
uint64_t Members = 0;
if (isHomogeneousAggregate(RetTy, Base, Members))
// Homogeneous Floating-point Aggregates (HFAs) are returned directly.
return ABIArgInfo::getDirect();
// Aggregates <= 16 bytes are returned directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 128) {
unsigned Alignment = getContext().getTypeAlign(RetTy);
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
// We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
// For aggregates with 16-byte alignment, we use i128.
if (Alignment < 128 && Size == 128) {
llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
}
return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
}
return ABIArgInfo::getIndirect(0);
}
/// isIllegalVectorType - check whether the vector type is legal for AArch64.
bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
if (const VectorType *VT = Ty->getAs<VectorType>()) {
// Check whether VT is legal.
unsigned NumElements = VT->getNumElements();
uint64_t Size = getContext().getTypeSize(VT);
// NumElements should be power of 2 between 1 and 16.
if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
return true;
return Size != 64 && (Size != 128 || NumElements == 1);
}
return false;
}
bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
// Homogeneous aggregates for AAPCS64 must have base types of a floating
// point type or a short-vector type. This is the same as the 32-bit ABI,
// but with the difference that any floating-point type is allowed,
// including __fp16.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->isFloatingPoint())
return true;
} else if (const VectorType *VT = Ty->getAs<VectorType>()) {
unsigned VecSize = getContext().getTypeSize(VT);
if (VecSize == 64 || VecSize == 128)
return true;
}
return false;
}
bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const {
return Members <= 4;
}
llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
QualType Ty,
CodeGenFunction &CGF) const {
ABIArgInfo AI = classifyArgumentType(Ty);
bool IsIndirect = AI.isIndirect();
llvm::Type *BaseTy = CGF.ConvertType(Ty);
if (IsIndirect)
BaseTy = llvm::PointerType::getUnqual(BaseTy);
else if (AI.getCoerceToType())
BaseTy = AI.getCoerceToType();
unsigned NumRegs = 1;
if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
BaseTy = ArrTy->getElementType();
NumRegs = ArrTy->getNumElements();
}
bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
// The AArch64 va_list type and handling is specified in the Procedure Call
// Standard, section B.4:
//
// struct {
// void *__stack;
// void *__gr_top;
// void *__vr_top;
// int __gr_offs;
// int __vr_offs;
// };
llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
auto &Ctx = CGF.getContext();
llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
int reg_top_index;
int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
if (!IsFPR) {
// 3 is the field number of __gr_offs
reg_offs_p =
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "gr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
reg_top_index = 1; // field number for __gr_top
RegSize = llvm::RoundUpToAlignment(RegSize, 8);
} else {
// 4 is the field number of __vr_offs.
reg_offs_p =
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 4, "vr_offs_p");
reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
reg_top_index = 2; // field number for __vr_top
RegSize = 16 * NumRegs;
}
//=======================================
// Find out where argument was passed
//=======================================
// If reg_offs >= 0 we're already using the stack for this type of
// argument. We don't want to keep updating reg_offs (in case it overflows,
// though anyone passing 2GB of arguments, each at most 16 bytes, deserves
// whatever they get).
llvm::Value *UsingStack = nullptr;
UsingStack = CGF.Builder.CreateICmpSGE(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
// Otherwise, at least some kind of argument could go in these registers, the
// question is whether this particular type is too big.
CGF.EmitBlock(MaybeRegBlock);
// Integer arguments may need to correct register alignment (for example a
// "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
// align __gr_offs to calculate the potential address.
if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
int Align = Ctx.getTypeAlign(Ty) / 8;
reg_offs = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
"align_regoffs");
reg_offs = CGF.Builder.CreateAnd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
"aligned_regoffs");
}
// Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
llvm::Value *NewOffset = nullptr;
NewOffset = CGF.Builder.CreateAdd(
reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
CGF.Builder.CreateStore(NewOffset, reg_offs_p);
// Now we're in a position to decide whether this argument really was in
// registers or not.
llvm::Value *InRegs = nullptr;
InRegs = CGF.Builder.CreateICmpSLE(
NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
//=======================================
// Argument was in registers
//=======================================
// Now we emit the code for if the argument was originally passed in
// registers. First start the appropriate block:
CGF.EmitBlock(InRegBlock);
llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
reg_top_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, reg_top_index,
"reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
llvm::Value *RegAddr = nullptr;
llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
if (IsIndirect) {
// If it's been passed indirectly (actually a struct), whatever we find from
// stored registers or on the stack will actually be a struct **.
MemTy = llvm::PointerType::getUnqual(MemTy);
}
const Type *Base = nullptr;
uint64_t NumMembers = 0;
bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
if (IsHFA && NumMembers > 1) {
// Homogeneous aggregates passed in registers will have their elements split
// and stored 16-bytes apart regardless of size (they're notionally in qN,
// qN+1, ...). We reload and store into a temporary local variable
// contiguously.
assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(HFATy);
int Offset = 0;
if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
Offset = 16 - Ctx.getTypeSize(Base) / 8;
for (unsigned i = 0; i < NumMembers; ++i) {
llvm::Value *BaseOffset =
llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
LoadAddr = CGF.Builder.CreateBitCast(
LoadAddr, llvm::PointerType::getUnqual(BaseTy));
llvm::Value *StoreAddr =
CGF.Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i);
llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
CGF.Builder.CreateStore(Elem, StoreAddr);
}
RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
} else {
// Otherwise the object is contiguous in memory
unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
if (CGF.CGM.getDataLayout().isBigEndian() &&
(IsHFA || !isAggregateTypeForABI(Ty)) &&
Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
BaseAddr = CGF.Builder.CreateAdd(
BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
}
RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
}
CGF.EmitBranch(ContBlock);
//=======================================
// Argument was on the stack
//=======================================
CGF.EmitBlock(OnStackBlock);
llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
stack_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "stack_p");
OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
// Again, stack arguments may need realigmnent. In this case both integer and
// floating-point ones might be affected.
if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
int Align = Ctx.getTypeAlign(Ty) / 8;
OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
OnStackAddr = CGF.Builder.CreateAdd(
OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
"align_stack");
OnStackAddr = CGF.Builder.CreateAnd(
OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
"align_stack");
OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
}
uint64_t StackSize;
if (IsIndirect)
StackSize = 8;
else
StackSize = Ctx.getTypeSize(Ty) / 8;
// All stack slots are 8 bytes
StackSize = llvm::RoundUpToAlignment(StackSize, 8);
llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
llvm::Value *NewStack =
CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
// Write the new value of __stack for the next call to va_arg
CGF.Builder.CreateStore(NewStack, stack_p);
if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
Ctx.getTypeSize(Ty) < 64) {
int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
OnStackAddr = CGF.Builder.CreateAdd(
OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
}
OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
CGF.EmitBranch(ContBlock);
//=======================================
// Tidy up
//=======================================
CGF.EmitBlock(ContBlock);
llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(OnStackAddr, OnStackBlock);
if (IsIndirect)
return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
return ResAddr;
}
llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr,
QualType Ty,
CodeGenFunction &CGF) const {
// We do not support va_arg for aggregates or illegal vector types.
// Lower VAArg here for these cases and use the LLVM va_arg instruction for
// other cases.
if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
return nullptr;
uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
const Type *Base = nullptr;
uint64_t Members = 0;
bool isHA = isHomogeneousAggregate(Ty, Base, Members);
bool isIndirect = false;
// Arguments bigger than 16 bytes which aren't homogeneous aggregates should
// be passed indirectly.
if (Size > 16 && !isHA) {
isIndirect = true;
Size = 8;
Align = 8;
}
llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
if (isEmptyRecord(getContext(), Ty, true)) {
// These are ignored for parameter passing purposes.
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
return Builder.CreateBitCast(Addr, PTy);
}
const uint64_t MinABIAlign = 8;
if (Align > MinABIAlign) {
llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
Addr = Builder.CreateGEP(Addr, Offset);
llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
}
uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
llvm::Value *NextAddr = Builder.CreateGEP(
Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
if (isIndirect)
Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
return AddrTyped;
}
//===----------------------------------------------------------------------===//
// ARM ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class ARMABIInfo : public ABIInfo {
public:
enum ABIKind {
APCS = 0,
AAPCS = 1,
AAPCS_VFP
};
private:
ABIKind Kind;
public:
ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
setCCs();
}
bool isEABI() const {
switch (getTarget().getTriple().getEnvironment()) {
case llvm::Triple::Android:
case llvm::Triple::EABI:
case llvm::Triple::EABIHF:
case llvm::Triple::GNUEABI:
case llvm::Triple::GNUEABIHF:
return true;
default:
return false;
}
}
bool isEABIHF() const {
switch (getTarget().getTriple().getEnvironment()) {
case llvm::Triple::EABIHF:
case llvm::Triple::GNUEABIHF:
return true;
default:
return false;
}
}
ABIKind getABIKind() const { return Kind; }
private:
ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
bool isIllegalVectorType(QualType Ty) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
llvm::CallingConv::ID getLLVMDefaultCC() const;
llvm::CallingConv::ID getABIDefaultCC() const;
void setCCs();
};
class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
public:
ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
:TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
const ARMABIInfo &getABIInfo() const {
return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 13;
}
StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override {
llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-15 are the 16 integer registers.
AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
return false;
}
unsigned getSizeOfUnwindException() const override {
if (getABIInfo().isEABI()) return 88;
return TargetCodeGenInfo::getSizeOfUnwindException();
}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD)
return;
const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
if (!Attr)
return;
const char *Kind;
switch (Attr->getInterrupt()) {
case ARMInterruptAttr::Generic: Kind = ""; break;
case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
case ARMInterruptAttr::SWI: Kind = "SWI"; break;
case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
}
llvm::Function *Fn = cast<llvm::Function>(GV);
Fn->addFnAttr("interrupt", Kind);
if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
return;
// AAPCS guarantees that sp will be 8-byte aligned on any public interface,
// however this is not necessarily true on taking any interrupt. Instruct
// the backend to perform a realignment as part of the function prologue.
llvm::AttrBuilder B;
B.addStackAlignmentAttr(8);
Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
llvm::AttributeSet::get(CGM.getLLVMContext(),
llvm::AttributeSet::FunctionIndex,
B));
}
};
class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const;
public:
WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
: ARMTargetCodeGenInfo(CGT, K) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override;
};
void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
if (!isa<FunctionDecl>(D))
return;
if (CGM.getCodeGenOpts().StackProbeSize == 4096)
return;
llvm::Function *F = cast<llvm::Function>(GV);
F->addFnAttr("stack-probe-size",
llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
}
void WindowsARMTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
addStackProbeSizeTargetAttribute(D, GV, CGM);
}
}
void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() =
classifyReturnType(FI.getReturnType(), FI.isVariadic());
for (auto &I : FI.arguments())
I.info = classifyArgumentType(I.type, FI.isVariadic());
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
return;
llvm::CallingConv::ID cc = getRuntimeCC();
if (cc != llvm::CallingConv::C)
FI.setEffectiveCallingConvention(cc);
}
/// Return the default calling convention that LLVM will use.
llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
// The default calling convention that LLVM will infer.
if (isEABIHF())
return llvm::CallingConv::ARM_AAPCS_VFP;
else if (isEABI())
return llvm::CallingConv::ARM_AAPCS;
else
return llvm::CallingConv::ARM_APCS;
}
/// Return the calling convention that our ABI would like us to use
/// as the C calling convention.
llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
switch (getABIKind()) {
case APCS: return llvm::CallingConv::ARM_APCS;
case AAPCS: return llvm::CallingConv::ARM_AAPCS;
case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
}
llvm_unreachable("bad ABI kind");
}
void ARMABIInfo::setCCs() {
assert(getRuntimeCC() == llvm::CallingConv::C);
// Don't muddy up the IR with a ton of explicit annotations if
// they'd just match what LLVM will infer from the triple.
llvm::CallingConv::ID abiCC = getABIDefaultCC();
if (abiCC != getLLVMDefaultCC())
RuntimeCC = abiCC;
BuiltinCC = (getABIKind() == APCS ?
llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
}
ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
bool isVariadic) const {
// 6.1.2.1 The following argument types are VFP CPRCs:
// A single-precision floating-point type (including promoted
// half-precision types); A double-precision floating-point type;
// A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
// with a Base Type of a single- or double-precision floating-point type,
// 64-bit containerized vectors or 128-bit containerized vectors with one
// to four Elements.
bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
Ty = useFirstFieldIfTransparentUnion(Ty);
// Handle illegal vector types here.
if (isIllegalVectorType(Ty)) {
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 32) {
llvm::Type *ResType =
llvm::Type::getInt32Ty(getVMContext());
return ABIArgInfo::getDirect(ResType);
}
if (Size == 64) {
llvm::Type *ResType = llvm::VectorType::get(
llvm::Type::getInt32Ty(getVMContext()), 2);
return ABIArgInfo::getDirect(ResType);
}
if (Size == 128) {
llvm::Type *ResType = llvm::VectorType::get(
llvm::Type::getInt32Ty(getVMContext()), 4);
return ABIArgInfo::getDirect(ResType);
}
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
}
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
Ty = EnumTy->getDecl()->getIntegerType();
}
return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
}
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
}
// Ignore empty records.
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
if (IsEffectivelyAAPCS_VFP) {
// Homogeneous Aggregates need to be expanded when we can fit the aggregate
// into VFP registers.
const Type *Base = nullptr;
uint64_t Members = 0;
if (isHomogeneousAggregate(Ty, Base, Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
// Base can be a floating-point or a vector.
return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
}
// Support byval for ARM.
// The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
// most 8-byte. We realign the indirect argument if type alignment is bigger
// than ABI alignment.
uint64_t ABIAlign = 4;
uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
getABIKind() == ARMABIInfo::AAPCS)
ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
/*Realign=*/TyAlign > ABIAlign);
}
// Otherwise, pass by coercing to a structure of the appropriate size.
llvm::Type* ElemTy;
unsigned SizeRegs;
// FIXME: Try to match the types of the arguments more accurately where
// we can.
if (getContext().getTypeAlign(Ty) <= 32) {
ElemTy = llvm::Type::getInt32Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
} else {
ElemTy = llvm::Type::getInt64Ty(getVMContext());
SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
}
return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
}
static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
llvm::LLVMContext &VMContext) {
// APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
// is called integer-like if its size is less than or equal to one word, and
// the offset of each of its addressable sub-fields is zero.
uint64_t Size = Context.getTypeSize(Ty);
// Check that the type fits in a word.
if (Size > 32)
return false;
// FIXME: Handle vector types!
if (Ty->isVectorType())
return false;
// Float types are never treated as "integer like".
if (Ty->isRealFloatingType())
return false;
// If this is a builtin or pointer type then it is ok.
if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
return true;
// Small complex integer types are "integer like".
if (const ComplexType *CT = Ty->getAs<ComplexType>())
return isIntegerLikeType(CT->getElementType(), Context, VMContext);
// Single element and zero sized arrays should be allowed, by the definition
// above, but they are not.
// Otherwise, it must be a record type.
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT) return false;
// Ignore records with flexible arrays.
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return false;
// Check that all sub-fields are at offset 0, and are themselves "integer
// like".
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
bool HadField = false;
unsigned idx = 0;
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
const FieldDecl *FD = *i;
// Bit-fields are not addressable, we only need to verify they are "integer
// like". We still have to disallow a subsequent non-bitfield, for example:
// struct { int : 0; int x }
// is non-integer like according to gcc.
if (FD->isBitField()) {
if (!RD->isUnion())
HadField = true;
if (!isIntegerLikeType(FD->getType(), Context, VMContext))
return false;
continue;
}
// Check if this field is at offset 0.
if (Layout.getFieldOffset(idx) != 0)
return false;
if (!isIntegerLikeType(FD->getType(), Context, VMContext))
return false;
// Only allow at most one field in a structure. This doesn't match the
// wording above, but follows gcc in situations with a field following an
// empty structure.
if (!RD->isUnion()) {
if (HadField)
return false;
HadField = true;
}
}
return true;
}
ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
bool isVariadic) const {
bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
return ABIArgInfo::getIndirect(0);
}
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect();
}
// Are we following APCS?
if (getABIKind() == APCS) {
if (isEmptyRecord(getContext(), RetTy, false))
return ABIArgInfo::getIgnore();
// Complex types are all returned as packed integers.
//
// FIXME: Consider using 2 x vector types if the back end handles them
// correctly.
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect(llvm::IntegerType::get(
getVMContext(), getContext().getTypeSize(RetTy)));
// Integer like structures are returned in r0.
if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
// Return in the smallest viable integer type.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 8)
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
if (Size <= 16)
return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
// Otherwise return in memory.
return ABIArgInfo::getIndirect(0);
}
// Otherwise this is an AAPCS variant.
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
// Check for homogeneous aggregates with AAPCS-VFP.
if (IsEffectivelyAAPCS_VFP) {
const Type *Base = nullptr;
uint64_t Members;
if (isHomogeneousAggregate(RetTy, Base, Members)) {
assert(Base && "Base class should be set for homogeneous aggregate");
// Homogeneous Aggregates are returned directly.
return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
}
}
// Aggregates <= 4 bytes are returned in r0; other aggregates
// are returned indirectly.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 32) {
if (getDataLayout().isBigEndian())
// Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
// Return in the smallest viable integer type.
if (Size <= 8)
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
if (Size <= 16)
return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
return ABIArgInfo::getIndirect(0);
}
/// isIllegalVector - check whether Ty is an illegal vector type.
bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
if (const VectorType *VT = Ty->getAs<VectorType>()) {
// Check whether VT is legal.
unsigned NumElements = VT->getNumElements();
uint64_t Size = getContext().getTypeSize(VT);
// NumElements should be power of 2.
if ((NumElements & (NumElements - 1)) != 0)
return true;
// Size should be greater than 32 bits.
return Size <= 32;
}
return false;
}
bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
// Homogeneous aggregates for AAPCS-VFP must have base types of float,
// double, or 64-bit or 128-bit vectors.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::Float ||
BT->getKind() == BuiltinType::Double ||
BT->getKind() == BuiltinType::LongDouble)
return true;
} else if (const VectorType *VT = Ty->getAs<VectorType>()) {
unsigned VecSize = getContext().getTypeSize(VT);
if (VecSize == 64 || VecSize == 128)
return true;
}
return false;
}
bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const {
return Members <= 4;
}
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BP = CGF.Int8PtrTy;
llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
if (isEmptyRecord(getContext(), Ty, true)) {
// These are ignored for parameter passing purposes.
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
return Builder.CreateBitCast(Addr, PTy);
}
uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
bool IsIndirect = false;
// The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
// APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
getABIKind() == ARMABIInfo::AAPCS)
TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
else
TyAlign = 4;
// Use indirect if size of the illegal vector is bigger than 16 bytes.
if (isIllegalVectorType(Ty) && Size > 16) {
IsIndirect = true;
Size = 4;
TyAlign = 4;
}
// Handle address alignment for ABI alignment > 4 bytes.
if (TyAlign > 4) {
assert((TyAlign & (TyAlign - 1)) == 0 &&
"Alignment is not power of 2!");
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
}
uint64_t Offset =
llvm::RoundUpToAlignment(Size, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
if (IsIndirect)
Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
// We can't directly cast ap.cur to pointer to a vector type, since ap.cur
// may not be correctly aligned for the vector type. We create an aligned
// temporary space and copy the content over from ap.cur to the temporary
// space. This is necessary if the natural alignment of the type is greater
// than the ABI alignment.
llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
"var.align");
llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
Builder.CreateMemCpy(Dst, Src,
llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
TyAlign, false);
Addr = AlignedTemp; //The content is in aligned location.
}
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
return AddrTyped;
}
//===----------------------------------------------------------------------===//
// NVPTX ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class NVPTXABIInfo : public ABIInfo {
public:
NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType Ty) const;
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const override;
};
class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
public:
NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
private:
// Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
// resulting MDNode to the nvvm.annotations MDNode.
static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
};
ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// note: this is different from default ABI
if (!RetTy->isScalarType())
return ABIArgInfo::getDirect();
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// Return aggregates type as indirect by value
if (isAggregateTypeForABI(Ty))
return ABIArgInfo::getIndirect(0, /* byval */ true);
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments())
I.info = classifyArgumentType(I.type);
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
return;
FI.setEffectiveCallingConvention(getRuntimeCC());
}
llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const {
llvm_unreachable("NVPTX does not support varargs");
}
void NVPTXTargetCodeGenInfo::
setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const{
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD) return;
llvm::Function *F = cast<llvm::Function>(GV);
// Perform special handling in OpenCL mode
if (M.getLangOpts().OpenCL) {
// Use OpenCL function attributes to check for kernel functions
// By default, all functions are device functions
if (FD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL __kernel functions get kernel metadata
// Create !{<func-ref>, metadata !"kernel", i32 1} node
addNVVMMetadata(F, "kernel", 1);
// And kernel functions are not subject to inlining
F->addFnAttr(llvm::Attribute::NoInline);
}
}
// Perform special handling in CUDA mode.
if (M.getLangOpts().CUDA) {
// CUDA __global__ functions get a kernel metadata entry. Since
// __global__ functions cannot be called from the device, we do not
// need to set the noinline attribute.
if (FD->hasAttr<CUDAGlobalAttr>()) {
// Create !{<func-ref>, metadata !"kernel", i32 1} node
addNVVMMetadata(F, "kernel", 1);
}
if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
// Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
llvm::APSInt MaxThreads(32);
MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
if (MaxThreads > 0)
addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
// min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
// not specified in __launch_bounds__ or if the user specified a 0 value,
// we don't have to add a PTX directive.
if (Attr->getMinBlocks()) {
llvm::APSInt MinBlocks(32);
MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
if (MinBlocks > 0)
// Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
}
}
}
}
void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
int Operand) {
llvm::Module *M = F->getParent();
llvm::LLVMContext &Ctx = M->getContext();
// Get "nvvm.annotations" metadata node
llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
llvm::Metadata *MDVals[] = {
llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
// Append metadata to nvvm.annotations
MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
}
}
//===----------------------------------------------------------------------===//
// SystemZ ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class SystemZABIInfo : public ABIInfo {
bool HasVector;
public:
SystemZABIInfo(CodeGenTypes &CGT, bool HV)
: ABIInfo(CGT), HasVector(HV) {}
bool isPromotableIntegerType(QualType Ty) const;
bool isCompoundType(QualType Ty) const;
bool isVectorArgumentType(QualType Ty) const;
bool isFPArgumentType(QualType Ty) const;
QualType GetSingleElementType(QualType Ty) const;
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType ArgTy) const;
void computeInfo(CGFunctionInfo &FI) const override {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments())
I.info = classifyArgumentType(I.type);
}
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
};
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
public:
SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
: TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
};
}
bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (Ty->isPromotableIntegerType())
return true;
// 32-bit values must also be promoted.
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Int:
case BuiltinType::UInt:
return true;
default:
return false;
}
return false;
}
bool SystemZABIInfo::isCompoundType(QualType Ty) const {
return (Ty->isAnyComplexType() ||
Ty->isVectorType() ||
isAggregateTypeForABI(Ty));
}
bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
return (HasVector &&
Ty->isVectorType() &&
getContext().getTypeSize(Ty) <= 128);
}
bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Float:
case BuiltinType::Double:
return true;
default:
return false;
}
return false;
}
QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
if (const RecordType *RT = Ty->getAsStructureType()) {
const RecordDecl *RD = RT->getDecl();
QualType Found;
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
for (const auto &I : CXXRD->bases()) {
QualType Base = I.getType();
// Empty bases don't affect things either way.
if (isEmptyRecord(getContext(), Base, true))
continue;
if (!Found.isNull())
return Ty;
Found = GetSingleElementType(Base);
}
// Check the fields.
for (const auto *FD : RD->fields()) {
// For compatibility with GCC, ignore empty bitfields in C++ mode.
// Unlike isSingleElementStruct(), empty structure and array fields
// do count. So do anonymous bitfields that aren't zero-sized.
if (getContext().getLangOpts().CPlusPlus &&
FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
continue;
// Unlike isSingleElementStruct(), arrays do not count.
// Nested structures still do though.
if (!Found.isNull())
return Ty;
Found = GetSingleElementType(FD->getType());
}
// Unlike isSingleElementStruct(), trailing padding is allowed.
// An 8-byte aligned struct s { float f; } is passed as a double.
if (!Found.isNull())
return Found;
}
return Ty;
}
llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i64 __gpr;
// i64 __fpr;
// i8 *__overflow_arg_area;
// i8 *__reg_save_area;
// };
// Every non-vector argument occupies 8 bytes and is passed by preference
// in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
// always passed on the stack.
Ty = CGF.getContext().getCanonicalType(Ty);
llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy);
ABIArgInfo AI = classifyArgumentType(Ty);
bool IsIndirect = AI.isIndirect();
bool InFPRs = false;
bool IsVector = false;
unsigned UnpaddedBitSize;
if (IsIndirect) {
APTy = llvm::PointerType::getUnqual(APTy);
UnpaddedBitSize = 64;
} else {
if (AI.getCoerceToType())
ArgTy = AI.getCoerceToType();
InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
IsVector = ArgTy->isVectorTy();
UnpaddedBitSize = getContext().getTypeSize(Ty);
}
unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64;
assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
unsigned PaddedSize = PaddedBitSize / 8;
unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
llvm::Type *IndexTy = CGF.Int64Ty;
llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
if (IsVector) {
// Work out the address of a vector argument on the stack.
// Vector arguments are always passed in the high bits of a
// single (8 byte) or double (16 byte) stack slot.
llvm::Value *OverflowArgAreaPtr =
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2,
"overflow_arg_area_ptr");
llvm::Value *OverflowArgArea =
CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
llvm::Value *MemAddr =
CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
return MemAddr;
}
unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
if (InFPRs) {
MaxRegs = 4; // Maximum of 4 FPR arguments
RegCountField = 1; // __fpr
RegSaveIndex = 16; // save offset for f0
RegPadding = 0; // floats are passed in the high bits of an FPR
} else {
MaxRegs = 5; // Maximum of 5 GPR arguments
RegCountField = 0; // __gpr
RegSaveIndex = 2; // save offset for r2
RegPadding = Padding; // values are passed in the low bits of a GPR
}
llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP(
nullptr, VAListAddr, RegCountField, "reg_count_ptr");
llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
"fits_in_regs");
llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
// Emit code to load the value if it was passed in registers.
CGF.EmitBlock(InRegBlock);
// Work out the address of an argument register.
llvm::Value *ScaledRegCount =
CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
llvm::Value *RegBase =
llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
llvm::Value *RegOffset =
CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
llvm::Value *RegSaveAreaPtr =
CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "reg_save_area_ptr");
llvm::Value *RegSaveArea =
CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
llvm::Value *RawRegAddr =
CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
llvm::Value *RegAddr =
CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
// Update the register count
llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
llvm::Value *NewRegCount =
CGF.Builder.CreateAdd(RegCount, One, "reg_count");
CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
CGF.EmitBranch(ContBlock);
// Emit code to load the value if it was passed in memory.
CGF.EmitBlock(InMemBlock);
// Work out the address of a stack argument.
llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
nullptr, VAListAddr, 2, "overflow_arg_area_ptr");
llvm::Value *OverflowArgArea =
CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
llvm::Value *RawMemAddr =
CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
llvm::Value *MemAddr =
CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
CGF.EmitBranch(ContBlock);
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
ResAddr->addIncoming(RegAddr, InRegBlock);
ResAddr->addIncoming(MemAddr, InMemBlock);
if (IsIndirect)
return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
return ResAddr;
}
ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (isVectorArgumentType(RetTy))
return ABIArgInfo::getDirect();
if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
return ABIArgInfo::getIndirect(0);
return (isPromotableIntegerType(RetTy) ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
// Handle the generic C++ ABI.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
// Integers and enums are extended to full register width.
if (isPromotableIntegerType(Ty))
return ABIArgInfo::getExtend();
// Handle vector types and vector-like structure types. Note that
// as opposed to float-like structure types, we do not allow any
// padding for vector-like structures, so verify the sizes match.
uint64_t Size = getContext().getTypeSize(Ty);
QualType SingleElementTy = GetSingleElementType(Ty);
if (isVectorArgumentType(SingleElementTy) &&
getContext().getTypeSize(SingleElementTy) == Size)
return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
// Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// Handle small structures.
if (const RecordType *RT = Ty->getAs<RecordType>()) {
// Structures with flexible arrays have variable length, so really
// fail the size test above.
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// The structure is passed as an unextended integer, a float, or a double.
llvm::Type *PassTy;
if (isFPArgumentType(SingleElementTy)) {
assert(Size == 32 || Size == 64);
if (Size == 32)
PassTy = llvm::Type::getFloatTy(getVMContext());
else
PassTy = llvm::Type::getDoubleTy(getVMContext());
} else
PassTy = llvm::IntegerType::get(getVMContext(), Size);
return ABIArgInfo::getDirect(PassTy);
}
// Non-structure compounds are passed indirectly.
if (isCompoundType(Ty))
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
return ABIArgInfo::getDirect(nullptr);
}
//===----------------------------------------------------------------------===//
// MSP430 ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
public:
MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
};
}
void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
// Handle 'interrupt' attribute:
llvm::Function *F = cast<llvm::Function>(GV);
// Step 1: Set ISR calling convention.
F->setCallingConv(llvm::CallingConv::MSP430_INTR);
// Step 2: Add attributes goodness.
F->addFnAttr(llvm::Attribute::NoInline);
// Step 3: Emit ISR vector alias.
unsigned Num = attr->getNumber() / 2;
llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
"__isr_" + Twine(Num), F);
}
}
}
//===----------------------------------------------------------------------===//
// MIPS ABI Implementation. This works for both little-endian and
// big-endian variants.
//===----------------------------------------------------------------------===//
namespace {
class MipsABIInfo : public ABIInfo {
bool IsO32;
unsigned MinABIStackAlignInBytes, StackAlignInBytes;
void CoerceToIntArgs(uint64_t TySize,
SmallVectorImpl<llvm::Type *> &ArgList) const;
llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
public:
MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
StackAlignInBytes(IsO32 ? 8 : 16) {}
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
bool shouldSignExtUnsignedType(QualType Ty) const override;
};
class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
unsigned SizeOfUnwindException;
public:
MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
: TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
SizeOfUnwindException(IsO32 ? 24 : 32) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
return 29;
}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD) return;
llvm::Function *Fn = cast<llvm::Function>(GV);
if (FD->hasAttr<Mips16Attr>()) {
Fn->addFnAttr("mips16");
}
else if (FD->hasAttr<NoMips16Attr>()) {
Fn->addFnAttr("nomips16");
}
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
unsigned getSizeOfUnwindException() const override {
return SizeOfUnwindException;
}
};
}
void MipsABIInfo::CoerceToIntArgs(
uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
llvm::IntegerType *IntTy =
llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
// Add (TySize / MinABIStackAlignInBytes) args of IntTy.
for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
ArgList.push_back(IntTy);
// If necessary, add one more integer type to ArgList.
unsigned R = TySize % (MinABIStackAlignInBytes * 8);
if (R)
ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
}
// In N32/64, an aligned double precision floating point field is passed in
// a register.
llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
SmallVector<llvm::Type*, 8> ArgList, IntArgList;
if (IsO32) {
CoerceToIntArgs(TySize, ArgList);
return llvm::StructType::get(getVMContext(), ArgList);
}
if (Ty->isComplexType())
return CGT.ConvertType(Ty);
const RecordType *RT = Ty->getAs<RecordType>();
// Unions/vectors are passed in integer registers.
if (!RT || !RT->isStructureOrClassType()) {
CoerceToIntArgs(TySize, ArgList);
return llvm::StructType::get(getVMContext(), ArgList);
}
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
uint64_t LastOffset = 0;
unsigned idx = 0;
llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
// Iterate over fields in the struct/class and check if there are any aligned
// double fields.
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i, ++idx) {
const QualType Ty = i->getType();
const BuiltinType *BT = Ty->getAs<BuiltinType>();
if (!BT || BT->getKind() != BuiltinType::Double)
continue;
uint64_t Offset = Layout.getFieldOffset(idx);
if (Offset % 64) // Ignore doubles that are not aligned.
continue;
// Add ((Offset - LastOffset) / 64) args of type i64.
for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
ArgList.push_back(I64);
// Add double type.
ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
LastOffset = Offset + 64;
}
CoerceToIntArgs(TySize - LastOffset, IntArgList);
ArgList.append(IntArgList.begin(), IntArgList.end());
return llvm::StructType::get(getVMContext(), ArgList);
}
llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
uint64_t Offset) const {
if (OrigOffset + MinABIStackAlignInBytes > Offset)
return nullptr;
return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
}
ABIArgInfo
MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
uint64_t OrigOffset = Offset;
uint64_t TySize = getContext().getTypeSize(Ty);
uint64_t Align = getContext().getTypeAlign(Ty) / 8;
Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
(uint64_t)StackAlignInBytes);
unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
// Ignore empty aggregates.
if (TySize == 0)
return ABIArgInfo::getIgnore();
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
Offset = OrigOffset + MinABIStackAlignInBytes;
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
}
// If we have reached here, aggregates are passed directly by coercing to
// another structure type. Padding is inserted if the offset of the
// aggregate is unaligned.
ABIArgInfo ArgInfo =
ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
getPaddingType(OrigOffset, CurrOffset));
ArgInfo.setInReg(true);
return ArgInfo;
}
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// All integral types are promoted to the GPR width.
if (Ty->isIntegralOrEnumerationType())
return ABIArgInfo::getExtend();
return ABIArgInfo::getDirect(
nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
}
llvm::Type*
MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
const RecordType *RT = RetTy->getAs<RecordType>();
SmallVector<llvm::Type*, 8> RTList;
if (RT && RT->isStructureOrClassType()) {
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
unsigned FieldCnt = Layout.getFieldCount();
// N32/64 returns struct/classes in floating point registers if the
// following conditions are met:
// 1. The size of the struct/class is no larger than 128-bit.
// 2. The struct/class has one or two fields all of which are floating
// point types.
// 3. The offset of the first field is zero (this follows what gcc does).
//
// Any other composite results are returned in integer registers.
//
if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
for (; b != e; ++b) {
const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
if (!BT || !BT->isFloatingPoint())
break;
RTList.push_back(CGT.ConvertType(b->getType()));
}
if (b == e)
return llvm::StructType::get(getVMContext(), RTList,
RD->hasAttr<PackedAttr>());
RTList.clear();
}
}
CoerceToIntArgs(Size, RTList);
return llvm::StructType::get(getVMContext(), RTList);
}
ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
uint64_t Size = getContext().getTypeSize(RetTy);
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// O32 doesn't treat zero-sized structs differently from other structs.
// However, N32/N64 ignores zero sized return values.
if (!IsO32 && Size == 0)
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
if (Size <= 128) {
if (RetTy->isAnyComplexType())
return ABIArgInfo::getDirect();
// O32 returns integer vectors in registers and N32/N64 returns all small
// aggregates in registers.
if (!IsO32 ||
(RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
ABIArgInfo ArgInfo =
ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
ArgInfo.setInReg(true);
return ArgInfo;
}
}
return ABIArgInfo::getIndirect(0);
}
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
ABIArgInfo &RetInfo = FI.getReturnInfo();
if (!getCXXABI().classifyReturnType(FI))
RetInfo = classifyReturnType(FI.getReturnType());
// Check if a pointer to an aggregate is passed as a hidden argument.
uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
for (auto &I : FI.arguments())
I.info = classifyArgumentType(I.type, Offset);
}
llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::Type *BP = CGF.Int8PtrTy;
llvm::Type *BPP = CGF.Int8PtrPtrTy;
// Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
// Pointers are also promoted in the same way but this only matters for N32.
unsigned SlotSizeInBits = IsO32 ? 32 : 64;
unsigned PtrWidth = getTarget().getPointerWidth(0);
if ((Ty->isIntegerType() &&
CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
(Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
Ty->isSignedIntegerType());
}
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
int64_t TypeAlign =
std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped;
llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
if (TypeAlign > MinABIStackAlignInBytes) {
llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
}
else
AddrTyped = Builder.CreateBitCast(Addr, PTy);
llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
llvm::Value *NextAddr =
Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
}
bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
int TySize = getContext().getTypeSize(Ty);
// MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
return true;
return false;
}
bool
MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
// This information comes from gcc's implementation, which seems to
// as canonical as it gets.
// Everything on MIPS is 4 bytes. Double-precision FP registers
// are aliased to pairs of single-precision FP registers.
llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
// 0-31 are the general purpose registers, $0 - $31.
// 32-63 are the floating-point registers, $f0 - $f31.
// 64 and 65 are the multiply/divide registers, $hi and $lo.
// 66 is the (notional, I think) register for signal-handler return.
AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
// 67-74 are the floating-point status registers, $fcc0 - $fcc7.
// They are one bit wide and ignored here.
// 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
// (coprocessor 1 is the FP unit)
// 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
// 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
// 176-181 are the DSP accumulator registers.
AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
return false;
}
//===----------------------------------------------------------------------===//
// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
// Currently subclassed only to implement custom OpenCL C function attribute
// handling.
//===----------------------------------------------------------------------===//
namespace {
class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
public:
TCETargetCodeGenInfo(CodeGenTypes &CGT)
: DefaultTargetCodeGenInfo(CGT) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
};
void TCETargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD) return;
llvm::Function *F = cast<llvm::Function>(GV);
if (M.getLangOpts().OpenCL) {
if (FD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL C Kernel functions are not subject to inlining
F->addFnAttr(llvm::Attribute::NoInline);
const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
if (Attr) {
// Convert the reqd_work_group_size() attributes to metadata.
llvm::LLVMContext &Context = F->getContext();
llvm::NamedMDNode *OpenCLMetadata =
M.getModule().getOrInsertNamedMetadata(
"opencl.kernel_wg_size_info");
SmallVector<llvm::Metadata *, 5> Operands;
Operands.push_back(llvm::ConstantAsMetadata::get(F));
Operands.push_back(
llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
Operands.push_back(
llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
Operands.push_back(
llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
// Add a boolean constant operand for "required" (true) or "hint"
// (false) for implementing the work_group_size_hint attr later.
// Currently always true as the hint is not yet implemented.
Operands.push_back(
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
}
}
}
}
}
//===----------------------------------------------------------------------===//
// Hexagon ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class HexagonABIInfo : public ABIInfo {
public:
HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
private:
ABIArgInfo classifyReturnType(QualType RetTy) const;
ABIArgInfo classifyArgumentType(QualType RetTy) const;
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
};
class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
public:
HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
:TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 29;
}
};
}
void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
for (auto &I : FI.arguments())
I.info = classifyArgumentType(I.type);
}
ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
// Ignore empty records.
if (isEmptyRecord(getContext(), Ty, true))
return ABIArgInfo::getIgnore();
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
uint64_t Size = getContext().getTypeSize(Ty);
if (Size > 64)
return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
// Pass in the smallest viable integer type.
else if (Size > 32)
return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
else if (Size > 16)
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
else if (Size > 8)
return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
else
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
}
ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
// Large vector types should be returned via memory.
if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
return ABIArgInfo::getIndirect(0);
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
return (RetTy->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
}
if (isEmptyRecord(getContext(), RetTy, true))
return ABIArgInfo::getIgnore();
// Aggregates <= 8 bytes are returned in r0; other aggregates
// are returned indirectly.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 64) {
// Return in the smallest viable integer type.
if (Size <= 8)
return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
if (Size <= 16)
return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
if (Size <= 32)
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
}
return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
}
llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// FIXME: Need to handle alignment
llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
"ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
llvm::Type *PTy =
llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
}
//===----------------------------------------------------------------------===//
// AMDGPU ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
};
}
void AMDGPUTargetCodeGenInfo::setTargetAttributes(
const Decl *D,
llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD)
return;
if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
llvm::Function *F = cast<llvm::Function>(GV);
uint32_t NumVGPR = Attr->getNumVGPR();
if (NumVGPR != 0)
F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
}
if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
llvm::Function *F = cast<llvm::Function>(GV);
unsigned NumSGPR = Attr->getNumSGPR();
if (NumSGPR != 0)
F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
}
}
// HLSL Change Begins
//===----------------------------------------------------------------------===//
// MSDXIL ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
class MSDXILABIInfo : public ABIInfo {
public:
MSDXILABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
ABIArgInfo classifyReturnType(QualType RetTy) const {
if (RetTy->isVoidType())
return ABIArgInfo::getIgnore();
if (isAggregateTypeForABI(RetTy))
return ABIArgInfo::getIndirect(0);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
// do not use extend for hlsl.
return ABIArgInfo::getDirect(CGT.ConvertType(RetTy));
}
ABIArgInfo classifyArgumentType(QualType Ty) const;
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const override;
};
class MSDXILTargetCodeGenInfo : public TargetCodeGenInfo {
public:
MSDXILTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new MSDXILABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
private:
};
ABIArgInfo MSDXILABIInfo::classifyArgumentType(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// Return aggregates type as indirect by ref.
// By val not work for out param.
if (isAggregateTypeForABI(Ty))
return ABIArgInfo::getIndirect(0, /* byval */ false);
return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
: ABIArgInfo::getDirect());
}
void MSDXILABIInfo::computeInfo(CGFunctionInfo &FI) const {
QualType RetTy = FI.getReturnType();
if (RetTy->isVoidType()) {
FI.getReturnInfo() = ABIArgInfo::getIgnore();
} else if (isAggregateTypeForABI(RetTy)) {
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classifyReturnType(RetTy);
} else {
// Make vector and matrix direct ret.
FI.getReturnInfo() = classifyReturnType(RetTy);
}
for (auto &I : FI.arguments()) {
I.info = classifyArgumentType(I.type);
// Do not flat matrix
if (hlsl::IsHLSLMatType(I.type))
I.info.setCanBeFlattened(false);
}
// TODO: set calling convention
// Every function call will be inlined for now.
FI.setEffectiveCallingConvention(getRuntimeCC());
}
llvm::Value *MSDXILABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CFG) const {
// TODO: support vaarg
llvm_unreachable("Not support printf yet");
}
void MSDXILTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD)
return;
// llvm::Function *F = cast<llvm::Function>(GV);
// TODO: add dxil attributes
}
}
// HLSL Change Ends
//===----------------------------------------------------------------------===//
// SPARC v9 ABI Implementation.
// Based on the SPARC Compliance Definition version 2.4.1.
//
// Function arguments a mapped to a nominal "parameter array" and promoted to
// registers depending on their type. Each argument occupies 8 or 16 bytes in
// the array, structs larger than 16 bytes are passed indirectly.
//
// One case requires special care:
//
// struct mixed {
// int i;
// float f;
// };
//
// When a struct mixed is passed by value, it only occupies 8 bytes in the
// parameter array, but the int is passed in an integer register, and the float
// is passed in a floating point register. This is represented as two arguments
// with the LLVM IR inreg attribute:
//
// declare void f(i32 inreg %i, float inreg %f)
//
// The code generator will only allocate 4 bytes from the parameter array for
// the inreg arguments. All other arguments are allocated a multiple of 8
// bytes.
//
namespace {
class SparcV9ABIInfo : public ABIInfo {
public:
SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
private:
ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
void computeInfo(CGFunctionInfo &FI) const override;
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
// Coercion type builder for structs passed in registers. The coercion type
// serves two purposes:
//
// 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
// in registers.
// 2. Expose aligned floating point elements as first-level elements, so the
// code generator knows to pass them in floating point registers.
//
// We also compute the InReg flag which indicates that the struct contains
// aligned 32-bit floats.
//
struct CoerceBuilder {
llvm::LLVMContext &Context;
const llvm::DataLayout &DL;
SmallVector<llvm::Type*, 8> Elems;
uint64_t Size;
bool InReg;
CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
: Context(c), DL(dl), Size(0), InReg(false) {}
// Pad Elems with integers until Size is ToSize.
void pad(uint64_t ToSize) {
assert(ToSize >= Size && "Cannot remove elements");
if (ToSize == Size)
return;
// Finish the current 64-bit word.
uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
if (Aligned > Size && Aligned <= ToSize) {
Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
Size = Aligned;
}
// Add whole 64-bit words.
while (Size + 64 <= ToSize) {
Elems.push_back(llvm::Type::getInt64Ty(Context));
Size += 64;
}
// Final in-word padding.
if (Size < ToSize) {
Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
Size = ToSize;
}
}
// Add a floating point element at Offset.
void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
// Unaligned floats are treated as integers.
if (Offset % Bits)
return;
// The InReg flag is only required if there are any floats < 64 bits.
if (Bits < 64)
InReg = true;
pad(Offset);
Elems.push_back(Ty);
Size = Offset + Bits;
}
// Add a struct type to the coercion type, starting at Offset (in bits).
void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
llvm::Type *ElemTy = StrTy->getElementType(i);
uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
switch (ElemTy->getTypeID()) {
case llvm::Type::StructTyID:
addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
break;
case llvm::Type::FloatTyID:
addFloat(ElemOffset, ElemTy, 32);
break;
case llvm::Type::DoubleTyID:
addFloat(ElemOffset, ElemTy, 64);
break;
case llvm::Type::FP128TyID:
addFloat(ElemOffset, ElemTy, 128);
break;
case llvm::Type::PointerTyID:
if (ElemOffset % 64 == 0) {
pad(ElemOffset);
Elems.push_back(ElemTy);
Size += 64;
}
break;
default:
break;
}
}
}
// Check if Ty is a usable substitute for the coercion type.
bool isUsableType(llvm::StructType *Ty) const {
return llvm::makeArrayRef(Elems) == Ty->elements();
}
// Get the coercion type as a literal struct type.
llvm::Type *getType() const {
if (Elems.size() == 1)
return Elems.front();
else
return llvm::StructType::get(Context, Elems);
}
};
};
} // end anonymous namespace
ABIArgInfo
SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
if (Ty->isVoidType())
return ABIArgInfo::getIgnore();
uint64_t Size = getContext().getTypeSize(Ty);
// Anything too big to fit in registers is passed with an explicit indirect
// pointer / sret pointer.
if (Size > SizeLimit)
return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
// Integer types smaller than a register are extended.
if (Size < 64 && Ty->isIntegerType())
return ABIArgInfo::getExtend();
// Other non-aggregates go in registers.
if (!isAggregateTypeForABI(Ty))
return ABIArgInfo::getDirect();
// If a C++ object has either a non-trivial copy constructor or a non-trivial
// destructor, it is passed with an explicit indirect pointer / sret pointer.
if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
// This is a small aggregate type that should be passed in registers.
// Build a coercion type from the LLVM struct type.
llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
if (!StrTy)
return ABIArgInfo::getDirect();
CoerceBuilder CB(getVMContext(), getDataLayout());
CB.addStruct(0, StrTy);
CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
// Try to use the original type for coercion.
llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
if (CB.InReg)
return ABIArgInfo::getDirectInReg(CoerceTy);
else
return ABIArgInfo::getDirect(CoerceTy);
}
llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
ABIArgInfo AI = classifyType(Ty, 16 * 8);
llvm::Type *ArgTy = CGT.ConvertType(Ty);
if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
AI.setCoerceToType(ArgTy);
llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
llvm::Value *ArgAddr;
unsigned Stride;
switch (AI.getKind()) {
case ABIArgInfo::Expand:
case ABIArgInfo::InAlloca:
llvm_unreachable("Unsupported ABI kind for va_arg");
case ABIArgInfo::Extend:
Stride = 8;
ArgAddr = Builder
.CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
"extend");
break;
case ABIArgInfo::Direct:
Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
ArgAddr = Addr;
break;
case ABIArgInfo::Indirect:
Stride = 8;
ArgAddr = Builder.CreateBitCast(Addr,
llvm::PointerType::getUnqual(ArgPtrTy),
"indirect");
ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
break;
case ABIArgInfo::Ignore:
return llvm::UndefValue::get(ArgPtrTy);
}
// Update VAList.
Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
Builder.CreateStore(Addr, VAListAddrAsBPP);
return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
}
void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
for (auto &I : FI.arguments())
I.info = classifyType(I.type, 16 * 8);
}
namespace {
class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
public:
SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
return 14;
}
bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const override;
};
} // end anonymous namespace
bool
SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
// This is calculated from the LLVM and GCC tables and verified
// against gcc output. AFAIK all ABIs use the same encoding.
CodeGen::CGBuilderTy &Builder = CGF.Builder;
llvm::IntegerType *i8 = CGF.Int8Ty;
llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
// 0-31: the 8-byte general-purpose registers
AssignToArrayRange(Builder, Address, Eight8, 0, 31);
// 32-63: f0-31, the 4-byte floating-point registers
AssignToArrayRange(Builder, Address, Four8, 32, 63);
// Y = 64
// PSR = 65
// WIM = 66
// TBR = 67
// PC = 68
// NPC = 69
// FSR = 70
// CSR = 71
AssignToArrayRange(Builder, Address, Eight8, 64, 71);
// 72-87: d0-15, the 8-byte floating-point registers
AssignToArrayRange(Builder, Address, Eight8, 72, 87);
return false;
}
//===----------------------------------------------------------------------===//
// XCore ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
/// A SmallStringEnc instance is used to build up the TypeString by passing
/// it by reference between functions that append to it.
typedef llvm::SmallString<128> SmallStringEnc;
/// TypeStringCache caches the meta encodings of Types.
///
/// The reason for caching TypeStrings is two fold:
/// 1. To cache a type's encoding for later uses;
/// 2. As a means to break recursive member type inclusion.
///
/// A cache Entry can have a Status of:
/// NonRecursive: The type encoding is not recursive;
/// Recursive: The type encoding is recursive;
/// Incomplete: An incomplete TypeString;
/// IncompleteUsed: An incomplete TypeString that has been used in a
/// Recursive type encoding.
///
/// A NonRecursive entry will have all of its sub-members expanded as fully
/// as possible. Whilst it may contain types which are recursive, the type
/// itself is not recursive and thus its encoding may be safely used whenever
/// the type is encountered.
///
/// A Recursive entry will have all of its sub-members expanded as fully as
/// possible. The type itself is recursive and it may contain other types which
/// are recursive. The Recursive encoding must not be used during the expansion
/// of a recursive type's recursive branch. For simplicity the code uses
/// IncompleteCount to reject all usage of Recursive encodings for member types.
///
/// An Incomplete entry is always a RecordType and only encodes its
/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
/// are placed into the cache during type expansion as a means to identify and
/// handle recursive inclusion of types as sub-members. If there is recursion
/// the entry becomes IncompleteUsed.
///
/// During the expansion of a RecordType's members:
///
/// If the cache contains a NonRecursive encoding for the member type, the
/// cached encoding is used;
///
/// If the cache contains a Recursive encoding for the member type, the
/// cached encoding is 'Swapped' out, as it may be incorrect, and...
///
/// If the member is a RecordType, an Incomplete encoding is placed into the
/// cache to break potential recursive inclusion of itself as a sub-member;
///
/// Once a member RecordType has been expanded, its temporary incomplete
/// entry is removed from the cache. If a Recursive encoding was swapped out
/// it is swapped back in;
///
/// If an incomplete entry is used to expand a sub-member, the incomplete
/// entry is marked as IncompleteUsed. The cache keeps count of how many
/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
///
/// If a member's encoding is found to be a NonRecursive or Recursive viz:
/// IncompleteUsedCount==0, the member's encoding is added to the cache.
/// Else the member is part of a recursive type and thus the recursion has
/// been exited too soon for the encoding to be correct for the member.
///
class TypeStringCache {
enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
struct Entry {
std::string Str; // The encoded TypeString for the type.
enum Status State; // Information about the encoding in 'Str'.
std::string Swapped; // A temporary place holder for a Recursive encoding
// during the expansion of RecordType's members.
};
std::map<const IdentifierInfo *, struct Entry> Map;
unsigned IncompleteCount; // Number of Incomplete entries in the Map.
unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
public:
TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
bool removeIncomplete(const IdentifierInfo *ID);
void addIfComplete(const IdentifierInfo *ID, StringRef Str,
bool IsRecursive);
StringRef lookupStr(const IdentifierInfo *ID);
};
/// TypeString encodings for enum & union fields must be order.
/// FieldEncoding is a helper for this ordering process.
class FieldEncoding {
bool HasName;
std::string Enc;
public:
FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
StringRef str() {return Enc.c_str();};
bool operator<(const FieldEncoding &rhs) const {
if (HasName != rhs.HasName) return HasName;
return Enc < rhs.Enc;
}
};
class XCoreABIInfo : public DefaultABIInfo {
public:
XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const override;
};
class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
mutable TypeStringCache TSC;
public:
XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
:TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
};
} // End anonymous namespace.
llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
CGBuilderTy &Builder = CGF.Builder;
// Get the VAList.
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
CGF.Int8PtrPtrTy);
llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
// Handle the argument.
ABIArgInfo AI = classifyArgumentType(Ty);
llvm::Type *ArgTy = CGT.ConvertType(Ty);
if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
AI.setCoerceToType(ArgTy);
llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
llvm::Value *Val;
uint64_t ArgSize = 0;
switch (AI.getKind()) {
case ABIArgInfo::Expand:
case ABIArgInfo::InAlloca:
default: // HLSL Change - fix warning for uninitialized memory later on for unreachable cases
llvm_unreachable("Unsupported ABI kind for va_arg");
case ABIArgInfo::Ignore:
Val = llvm::UndefValue::get(ArgPtrTy);
ArgSize = 0;
break;
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
Val = Builder.CreatePointerCast(AP, ArgPtrTy);
ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
if (ArgSize < 4)
ArgSize = 4;
break;
case ABIArgInfo::Indirect:
llvm::Value *ArgAddr;
ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
ArgAddr = Builder.CreateLoad(ArgAddr);
Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
ArgSize = 4;
break;
}
// Increment the VAList.
if (ArgSize) {
llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
Builder.CreateStore(APN, VAListAddrAsBPP);
}
return Val;
}
/// During the expansion of a RecordType, an incomplete TypeString is placed
/// into the cache as a means to identify and break recursion.
/// If there is a Recursive encoding in the cache, it is swapped out and will
/// be reinserted by removeIncomplete().
/// All other types of encoding should have been used rather than arriving here.
void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
std::string StubEnc) {
if (!ID)
return;
Entry &E = Map[ID];
assert( (E.Str.empty() || E.State == Recursive) &&
"Incorrectly use of addIncomplete");
assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
E.Swapped.swap(E.Str); // swap out the Recursive
E.Str.swap(StubEnc);
E.State = Incomplete;
++IncompleteCount;
}
/// Once the RecordType has been expanded, the temporary incomplete TypeString
/// must be removed from the cache.
/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
/// Returns true if the RecordType was defined recursively.
bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
if (!ID)
return false;
auto I = Map.find(ID);
assert(I != Map.end() && "Entry not present");
Entry &E = I->second;
assert( (E.State == Incomplete ||
E.State == IncompleteUsed) &&
"Entry must be an incomplete type");
bool IsRecursive = false;
if (E.State == IncompleteUsed) {
// We made use of our Incomplete encoding, thus we are recursive.
IsRecursive = true;
--IncompleteUsedCount;
}
if (E.Swapped.empty())
Map.erase(I);
else {
// Swap the Recursive back.
E.Swapped.swap(E.Str);
E.Swapped.clear();
E.State = Recursive;
}
--IncompleteCount;
return IsRecursive;
}
/// Add the encoded TypeString to the cache only if it is NonRecursive or
/// Recursive (viz: all sub-members were expanded as fully as possible).
void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
bool IsRecursive) {
if (!ID || IncompleteUsedCount)
return; // No key or it is is an incomplete sub-type so don't add.
Entry &E = Map[ID];
if (IsRecursive && !E.Str.empty()) {
assert(E.State==Recursive && E.Str.size() == Str.size() &&
"This is not the same Recursive entry");
// The parent container was not recursive after all, so we could have used
// this Recursive sub-member entry after all, but we assumed the worse when
// we started viz: IncompleteCount!=0.
return;
}
assert(E.Str.empty() && "Entry already present");
E.Str = Str.str();
E.State = IsRecursive? Recursive : NonRecursive;
}
/// Return a cached TypeString encoding for the ID. If there isn't one, or we
/// are recursively expanding a type (IncompleteCount != 0) and the cached
/// encoding is Recursive, return an empty StringRef.
StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
if (!ID)
return StringRef(); // We have no key.
auto I = Map.find(ID);
if (I == Map.end())
return StringRef(); // We have no encoding.
Entry &E = I->second;
if (E.State == Recursive && IncompleteCount)
return StringRef(); // We don't use Recursive encodings for member types.
if (E.State == Incomplete) {
// The incomplete type is being used to break out of recursion.
E.State = IncompleteUsed;
++IncompleteUsedCount;
}
return E.Str.c_str();
}
/// The XCore ABI includes a type information section that communicates symbol
/// type information to the linker. The linker uses this information to verify
/// safety/correctness of things such as array bound and pointers et al.
/// The ABI only requires C (and XC) language modules to emit TypeStrings.
/// This type information (TypeString) is emitted into meta data for all global
/// symbols: definitions, declarations, functions & variables.
///
/// The TypeString carries type, qualifier, name, size & value details.
/// Please see 'Tools Development Guide' section 2.16.2 for format details:
/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
/// The output is tested by test/CodeGen/xcore-stringtype.c.
///
static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const {
SmallStringEnc Enc;
if (getTypeString(Enc, D, CGM, TSC)) {
llvm::LLVMContext &Ctx = CGM.getModule().getContext();
llvm::SmallVector<llvm::Metadata *, 2> MDVals;
MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
llvm::NamedMDNode *MD =
CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
}
}
static bool appendType(SmallStringEnc &Enc, QualType QType,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC);
/// Helper function for appendRecordType().
/// Builds a SmallVector containing the encoded field types in declaration
/// order.
static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
const RecordDecl *RD,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC) {
for (const auto *Field : RD->fields()) {
SmallStringEnc Enc;
Enc += "m(";
Enc += Field->getName();
Enc += "){";
if (Field->isBitField()) {
Enc += "b(";
llvm::raw_svector_ostream OS(Enc);
OS.resync();
OS << Field->getBitWidthValue(CGM.getContext());
OS.flush();
Enc += ':';
}
if (!appendType(Enc, Field->getType(), CGM, TSC))
return false;
if (Field->isBitField())
Enc += ')';
Enc += '}';
FE.emplace_back(!Field->getName().empty(), Enc);
}
return true;
}
/// Appends structure and union types to Enc and adds encoding to cache.
/// Recursively calls appendType (via extractFieldType) for each field.
/// Union types have their fields ordered according to the ABI.
static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC, const IdentifierInfo *ID) {
// Append the cached TypeString if we have one.
StringRef TypeString = TSC.lookupStr(ID);
if (!TypeString.empty()) {
Enc += TypeString;
return true;
}
// Start to emit an incomplete TypeString.
size_t Start = Enc.size();
Enc += (RT->isUnionType()? 'u' : 's');
Enc += '(';
if (ID)
Enc += ID->getName();
Enc += "){";
// We collect all encoded fields and order as necessary.
bool IsRecursive = false;
const RecordDecl *RD = RT->getDecl()->getDefinition();
if (RD && !RD->field_empty()) {
// An incomplete TypeString stub is placed in the cache for this RecordType
// so that recursive calls to this RecordType will use it whilst building a
// complete TypeString for this RecordType.
SmallVector<FieldEncoding, 16> FE;
std::string StubEnc(Enc.substr(Start).str());
StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
TSC.addIncomplete(ID, std::move(StubEnc));
if (!extractFieldType(FE, RD, CGM, TSC)) {
(void) TSC.removeIncomplete(ID);
return false;
}
IsRecursive = TSC.removeIncomplete(ID);
// The ABI requires unions to be sorted but not structures.
// See FieldEncoding::operator< for sort algorithm.
if (RT->isUnionType())
std::sort(FE.begin(), FE.end());
// We can now complete the TypeString.
unsigned E = FE.size();
for (unsigned I = 0; I != E; ++I) {
if (I)
Enc += ',';
Enc += FE[I].str();
}
}
Enc += '}';
TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
return true;
}
/// Appends enum types to Enc and adds the encoding to the cache.
static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
TypeStringCache &TSC,
const IdentifierInfo *ID) {
// Append the cached TypeString if we have one.
StringRef TypeString = TSC.lookupStr(ID);
if (!TypeString.empty()) {
Enc += TypeString;
return true;
}
size_t Start = Enc.size();
Enc += "e(";
if (ID)
Enc += ID->getName();
Enc += "){";
// We collect all encoded enumerations and order them alphanumerically.
if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
SmallVector<FieldEncoding, 16> FE;
for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
++I) {
SmallStringEnc EnumEnc;
EnumEnc += "m(";
EnumEnc += I->getName();
EnumEnc += "){";
I->getInitVal().toString(EnumEnc);
EnumEnc += '}';
FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
}
std::sort(FE.begin(), FE.end());
unsigned E = FE.size();
for (unsigned I = 0; I != E; ++I) {
if (I)
Enc += ',';
Enc += FE[I].str();
}
}
Enc += '}';
TSC.addIfComplete(ID, Enc.substr(Start), false);
return true;
}
/// Appends type's qualifier to Enc.
/// This is done prior to appending the type's encoding.
static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
// Qualifiers are emitted in alphabetical order.
static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
int Lookup = 0;
if (QT.isConstQualified())
Lookup += 1<<0;
if (QT.isRestrictQualified())
Lookup += 1<<1;
if (QT.isVolatileQualified())
Lookup += 1<<2;
Enc += Table[Lookup];
}
/// Appends built-in types to Enc.
static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
const char *EncType;
switch (BT->getKind()) {
case BuiltinType::Void:
EncType = "0";
break;
case BuiltinType::Bool:
EncType = "b";
break;
case BuiltinType::Char_U:
EncType = "uc";
break;
case BuiltinType::UChar:
EncType = "uc";
break;
case BuiltinType::SChar:
EncType = "sc";
break;
case BuiltinType::UShort:
EncType = "us";
break;
case BuiltinType::Short:
EncType = "ss";
break;
case BuiltinType::UInt:
EncType = "ui";
break;
case BuiltinType::Int:
EncType = "si";
break;
case BuiltinType::ULong:
EncType = "ul";
break;
case BuiltinType::Long:
EncType = "sl";
break;
case BuiltinType::ULongLong:
EncType = "ull";
break;
case BuiltinType::LongLong:
EncType = "sll";
break;
case BuiltinType::Float:
EncType = "ft";
break;
case BuiltinType::Double:
EncType = "d";
break;
case BuiltinType::LongDouble:
EncType = "ld";
break;
default:
return false;
}
Enc += EncType;
return true;
}
/// Appends a pointer encoding to Enc before calling appendType for the pointee.
static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC) {
Enc += "p(";
if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
return false;
Enc += ')';
return true;
}
/// Appends array encoding to Enc before calling appendType for the element.
static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
const ArrayType *AT,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC, StringRef NoSizeEnc) {
if (AT->getSizeModifier() != ArrayType::Normal)
return false;
Enc += "a(";
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
CAT->getSize().toStringUnsigned(Enc);
else
Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
Enc += ':';
// The Qualifiers should be attached to the type rather than the array.
appendQualifier(Enc, QT);
if (!appendType(Enc, AT->getElementType(), CGM, TSC))
return false;
Enc += ')';
return true;
}
/// Appends a function encoding to Enc, calling appendType for the return type
/// and the arguments.
static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC) {
Enc += "f{";
if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
return false;
Enc += "}(";
if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
// N.B. we are only interested in the adjusted param types.
auto I = FPT->param_type_begin();
auto E = FPT->param_type_end();
if (I != E) {
do {
if (!appendType(Enc, *I, CGM, TSC))
return false;
++I;
if (I != E)
Enc += ',';
} while (I != E);
if (FPT->isVariadic())
Enc += ",va";
} else {
if (FPT->isVariadic())
Enc += "va";
else
Enc += '0';
}
}
Enc += ')';
return true;
}
/// Handles the type's qualifier before dispatching a call to handle specific
/// type encodings.
static bool appendType(SmallStringEnc &Enc, QualType QType,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC) {
QualType QT = QType.getCanonicalType();
if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
// The Qualifiers should be attached to the type rather than the array.
// Thus we don't call appendQualifier() here.
return appendArrayType(Enc, QT, AT, CGM, TSC, "");
appendQualifier(Enc, QT);
if (const BuiltinType *BT = QT->getAs<BuiltinType>())
return appendBuiltinType(Enc, BT);
if (const PointerType *PT = QT->getAs<PointerType>())
return appendPointerType(Enc, PT, CGM, TSC);
if (const EnumType *ET = QT->getAs<EnumType>())
return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
if (const RecordType *RT = QT->getAsStructureType())
return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
if (const RecordType *RT = QT->getAsUnionType())
return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
if (const FunctionType *FT = QT->getAs<FunctionType>())
return appendFunctionType(Enc, FT, CGM, TSC);
return false;
}
static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
if (!D)
return false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getLanguageLinkage() != CLanguageLinkage)
return false;
return appendType(Enc, FD->getType(), CGM, TSC);
}
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->getLanguageLinkage() != CLanguageLinkage)
return false;
QualType QT = VD->getType().getCanonicalType();
if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
// Global ArrayTypes are given a size of '*' if the size is unknown.
// The Qualifiers should be attached to the type rather than the array.
// Thus we don't call appendQualifier() here.
return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
}
return appendType(Enc, QT, CGM, TSC);
}
return false;
}
//===----------------------------------------------------------------------===//
// Driver code
//===----------------------------------------------------------------------===//
const llvm::Triple &CodeGenModule::getTriple() const {
return getTarget().getTriple();
}
bool CodeGenModule::supportsCOMDAT() const {
return !getTriple().isOSBinFormatMachO();
}
const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
const llvm::Triple &Triple = getTarget().getTriple();
switch (Triple.getArch()) {
default:
TheTargetCodeGenInfo.reset(new DefaultTargetCodeGenInfo(Types)); break; // HLSL Change - reset
#if 0 // HLSL Change Starts
case llvm::Triple::le32:
return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
case llvm::Triple::mips:
case llvm::Triple::mipsel:
if (Triple.getOS() == llvm::Triple::NaCl)
return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be: {
AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
if (getTarget().getABI() == "darwinpcs")
Kind = AArch64ABIInfo::DarwinPCS;
return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
}
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
{
if (Triple.getOS() == llvm::Triple::Win32) {
TheTargetCodeGenInfo =
new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP);
return *TheTargetCodeGenInfo;
}
ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
if (getTarget().getABI() == "apcs-gnu")
Kind = ARMABIInfo::APCS;
else if (CodeGenOpts.FloatABI == "hard" ||
(CodeGenOpts.FloatABI != "soft" &&
Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
Kind = ARMABIInfo::AAPCS_VFP;
return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
}
case llvm::Triple::ppc:
return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
case llvm::Triple::ppc64:
if (Triple.isOSBinFormatELF()) {
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
if (getTarget().getABI() == "elfv2")
Kind = PPC64_SVR4_ABIInfo::ELFv2;
bool HasQPX = getTarget().getABI() == "elfv1-qpx";
return *(TheTargetCodeGenInfo =
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
} else
return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::ppc64le: {
assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
Kind = PPC64_SVR4_ABIInfo::ELFv1;
bool HasQPX = getTarget().getABI() == "elfv1-qpx";
return *(TheTargetCodeGenInfo =
new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
}
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
case llvm::Triple::msp430:
return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
case llvm::Triple::systemz: {
bool HasVector = getTarget().getABI() == "vector";
return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types,
HasVector));
}
case llvm::Triple::tce:
return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
case llvm::Triple::x86: {
bool IsDarwinVectorABI = Triple.isOSDarwin();
bool IsSmallStructInRegABI =
X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
if (Triple.getOS() == llvm::Triple::Win32) {
return *(TheTargetCodeGenInfo = new WinX86_32TargetCodeGenInfo(
Types, IsDarwinVectorABI, IsSmallStructInRegABI,
IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
} else {
return *(TheTargetCodeGenInfo = new X86_32TargetCodeGenInfo(
Types, IsDarwinVectorABI, IsSmallStructInRegABI,
IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
}
}
case llvm::Triple::x86_64: {
StringRef ABI = getTarget().getABI();
X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 :
ABI == "avx" ? X86AVXABILevel::AVX :
X86AVXABILevel::None);
switch (Triple.getOS()) {
case llvm::Triple::Win32:
return *(TheTargetCodeGenInfo =
new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
case llvm::Triple::PS4:
return *(TheTargetCodeGenInfo =
new PS4TargetCodeGenInfo(Types, AVXLevel));
default:
return *(TheTargetCodeGenInfo =
new X86_64TargetCodeGenInfo(Types, AVXLevel));
}
}
case llvm::Triple::hexagon:
return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
case llvm::Triple::r600:
return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
case llvm::Triple::amdgcn:
return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
case llvm::Triple::sparcv9:
return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
case llvm::Triple::xcore:
return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));
#endif // HLSL Change Ends
// HLSL Change Begins
case llvm::Triple::dxil:
case llvm::Triple::dxil64:
TheTargetCodeGenInfo.reset(new MSDXILTargetCodeGenInfo(Types));
break;
// HLSL Change Ends
}
return *(TheTargetCodeGenInfo.get());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGDebugInfo.h | //===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the source-level debug info generator for llvm translation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
#define LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
#include "CGBuilder.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Allocator.h"
namespace llvm {
class MDNode;
}
namespace clang {
class CXXMethodDecl;
class VarDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
class ClassTemplateSpecializationDecl;
class GlobalDecl;
class UsingDecl;
namespace CodeGen {
class CodeGenModule;
class CodeGenFunction;
class CGBlockInfo;
/// This class gathers all debug information during compilation and is
/// responsible for emitting to llvm globals or pass directly to the
/// backend.
class CGDebugInfo {
friend class ApplyDebugLocation;
friend class SaveAndRestoreLocation;
CodeGenModule &CGM;
const CodeGenOptions::DebugInfoKind DebugKind;
llvm::DIBuilder DBuilder;
llvm::DICompileUnit *TheCU = nullptr;
SourceLocation CurLoc;
llvm::DIType *VTablePtrType = nullptr;
llvm::DIType *ClassTy = nullptr;
llvm::DICompositeType *ObjTy = nullptr;
llvm::DIType *SelTy = nullptr;
llvm::DIType *OCLImage1dDITy = nullptr;
llvm::DIType *OCLImage1dArrayDITy = nullptr;
llvm::DIType *OCLImage1dBufferDITy = nullptr;
llvm::DIType *OCLImage2dDITy = nullptr;
llvm::DIType *OCLImage2dArrayDITy = nullptr;
llvm::DIType *OCLImage3dDITy = nullptr;
llvm::DIType *OCLEventDITy = nullptr;
/// Cache of previously constructed Types.
llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
struct ObjCInterfaceCacheEntry {
const ObjCInterfaceType *Type;
llvm::DIType *Decl;
llvm::DIFile *Unit;
ObjCInterfaceCacheEntry(const ObjCInterfaceType *Type, llvm::DIType *Decl,
llvm::DIFile *Unit)
: Type(Type), Decl(Decl), Unit(Unit) {}
};
/// Cache of previously constructed interfaces which may change.
llvm::SmallVector<ObjCInterfaceCacheEntry, 32> ObjCInterfaceCache;
/// Cache of references to AST files such as PCHs or modules.
llvm::DenseMap<uint64_t, llvm::DIModule *> ModuleRefCache;
/// List of interfaces we want to keep even if orphaned.
std::vector<void *> RetainedTypes;
/// Cache of forward declared types to RAUW at the end of
/// compilation.
std::vector<std::pair<const TagType *, llvm::TrackingMDRef>> ReplaceMap;
/// Cache of replaceable forward declarartions (functions and
/// variables) to RAUW at the end of compilation.
std::vector<std::pair<const DeclaratorDecl *, llvm::TrackingMDRef>>
FwdDeclReplaceMap;
/// Keep track of our current nested lexical block.
std::vector<llvm::TypedTrackingMDRef<llvm::DIScope>> LexicalBlockStack;
llvm::DenseMap<const Decl *, llvm::TrackingMDRef> RegionMap;
/// Keep track of LexicalBlockStack counter at the beginning of a
/// function. This is used to pop unbalanced regions at the end of a
/// function.
std::vector<unsigned> FnBeginRegionCount;
/// This is a storage for names that are constructed on demand. For
/// example, C++ destructors, C++ operators etc..
llvm::BumpPtrAllocator DebugInfoNames;
StringRef CWDName;
llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
/// Cache declarations relevant to DW_TAG_imported_declarations (C++
/// using declarations) that aren't covered by other more specific caches.
llvm::DenseMap<const Decl *, llvm::TrackingMDRef> DeclCache;
llvm::DenseMap<const NamespaceDecl *, llvm::TrackingMDRef> NameSpaceCache;
llvm::DenseMap<const NamespaceAliasDecl *, llvm::TrackingMDRef>
NamespaceAliasCache;
llvm::DenseMap<const Decl *, llvm::TrackingMDRef> StaticDataMemberCache;
/// Helper functions for getOrCreateType.
/// @{
/// Currently the checksum of an interface includes the number of
/// ivars and property accessors.
unsigned Checksum(const ObjCInterfaceDecl *InterfaceDecl);
llvm::DIType *CreateType(const BuiltinType *Ty);
llvm::DIType *CreateType(const ComplexType *Ty);
llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TypedefType *Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TemplateSpecializationType *Ty,
llvm::DIFile *Fg);
llvm::DIType *CreateType(const ObjCObjectPointerType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const PointerType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const BlockPointerType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const FunctionType *Ty, llvm::DIFile *F);
/// Get structure or union type.
llvm::DIType *CreateType(const RecordType *Tyg);
llvm::DIType *CreateTypeDefinition(const RecordType *Ty);
llvm::DICompositeType *CreateLimitedType(const RecordType *Ty);
void CollectContainingType(const CXXRecordDecl *RD,
llvm::DICompositeType *CT);
/// Get Objective-C interface type.
llvm::DIType *CreateType(const ObjCInterfaceType *Ty, llvm::DIFile *F);
llvm::DIType *CreateTypeDefinition(const ObjCInterfaceType *Ty,
llvm::DIFile *F);
/// Get Objective-C object type.
llvm::DIType *CreateType(const ObjCObjectType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const VectorType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const ArrayType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const LValueReferenceType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const RValueReferenceType *Ty, llvm::DIFile *Unit);
llvm::DIType *CreateType(const MemberPointerType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const AtomicType *Ty, llvm::DIFile *F);
/// Get enumeration type.
llvm::DIType *CreateEnumType(const EnumType *Ty);
llvm::DIType *CreateTypeDefinition(const EnumType *Ty);
/// Look up the completed type for a self pointer in the TypeCache and
/// create a copy of it with the ObjectPointer and Artificial flags
/// set. If the type is not cached, a new one is created. This should
/// never happen though, since creating a type for the implicit self
/// argument implies that we already parsed the interface definition
/// and the ivar declarations in the implementation.
llvm::DIType *CreateSelfType(const QualType &QualTy, llvm::DIType *Ty);
/// @}
/// Get the type from the cache or return null type if it doesn't
/// exist.
llvm::DIType *getTypeOrNull(const QualType);
/// Return the debug type for a C++ method.
/// \arg CXXMethodDecl is of FunctionType. This function type is
/// not updated to include implicit \c this pointer. Use this routine
/// to get a method type which includes \c this pointer.
llvm::DISubroutineType *getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile *F);
llvm::DISubroutineType *
getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func,
llvm::DIFile *Unit);
llvm::DISubroutineType *
getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F);
/// \return debug info descriptor for vtable.
llvm::DIType *getOrCreateVTablePtrType(llvm::DIFile *F);
/// \return namespace descriptor for the given namespace decl.
llvm::DINamespace *getOrCreateNameSpace(const NamespaceDecl *N);
llvm::DIType *getOrCreateTypeDeclaration(QualType PointeeTy, llvm::DIFile *F);
llvm::DIType *CreatePointerLikeType(llvm::dwarf::Tag Tag, const Type *Ty,
QualType PointeeTy, llvm::DIFile *F);
llvm::Value *getCachedInterfaceTypeOrNull(const QualType Ty);
llvm::DIType *getOrCreateStructPtrType(StringRef Name, llvm::DIType *&Cache);
/// A helper function to create a subprogram for a single member
/// function GlobalDecl.
llvm::DISubprogram *CreateCXXMemberFunction(const CXXMethodDecl *Method,
llvm::DIFile *F,
llvm::DIType *RecordTy);
/// A helper function to collect debug info for C++ member
/// functions. This is used while creating debug info entry for a
/// Record.
void CollectCXXMemberFunctions(const CXXRecordDecl *Decl, llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType *T);
/// A helper function to collect debug info for C++ base
/// classes. This is used while creating debug info entry for a
/// Record.
void CollectCXXBases(const CXXRecordDecl *Decl, llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType *RecordTy);
/// A helper function to collect template parameters.
llvm::DINodeArray CollectTemplateParams(const TemplateParameterList *TPList,
ArrayRef<TemplateArgument> TAList,
llvm::DIFile *Unit);
/// A helper function to collect debug info for function template
/// parameters.
llvm::DINodeArray CollectFunctionTemplateParams(const FunctionDecl *FD,
llvm::DIFile *Unit);
/// A helper function to collect debug info for template
/// parameters.
llvm::DINodeArray
CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
llvm::DIFile *F);
llvm::DIType *createFieldType(StringRef name, QualType type,
uint64_t sizeInBitsOverride, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
llvm::DIFile *tunit, llvm::DIScope *scope,
const RecordDecl *RD = nullptr);
/// Helpers for collecting fields of a record.
/// @{
void CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType *RecordTy);
llvm::DIDerivedType *CreateRecordStaticField(const VarDecl *Var,
llvm::DIType *RecordTy,
const RecordDecl *RD);
void CollectRecordNormalField(const FieldDecl *Field, uint64_t OffsetInBits,
llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType *RecordTy, const RecordDecl *RD);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DICompositeType *RecordTy);
/// If the C++ class has vtable info then insert appropriate debug
/// info entry in EltTys vector.
void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &EltTys);
/// @}
/// Create a new lexical block node and push it on the stack.
void CreateLexicalBlock(SourceLocation Loc);
// HLSL Change Begins
private:
bool
TryCollectHLSLRecordElements(const RecordType *Ty,
llvm::DICompositeType *DITy,
SmallVectorImpl<llvm::Metadata *> &Elements);
// HLSL Change Ends
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
void finalize();
/// Update the current source location. If \arg loc is invalid it is
/// ignored.
void setLocation(SourceLocation Loc);
/// Emit metadata to indicate a change in line/column information in
/// the source file. If the location is invalid, the previous
/// location will be reused.
void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc);
/// Emit a call to llvm.dbg.function.start to indicate
/// start of a new function.
/// \param Loc The location of the function header.
/// \param ScopeLoc The location of the function body.
void EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
SourceLocation ScopeLoc, QualType FnType,
llvm::Function *Fn, CGBuilderTy &Builder);
/// Constructs the debug code for exiting a function.
void EmitFunctionEnd(CGBuilderTy &Builder);
/// Emit metadata to indicate the beginning of a new lexical block
/// and push the block onto the stack.
void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc);
/// Emit metadata to indicate the end of a new lexical block and pop
/// the current block.
void EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc);
/// Emit call to \c llvm.dbg.declare for an automatic variable
/// declaration.
void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
CGBuilderTy &Builder);
/// Emit call to \c llvm.dbg.declare for an imported variable
/// declaration in a block.
void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
llvm::Value *storage,
CGBuilderTy &Builder,
const CGBlockInfo &blockInfo,
llvm::Instruction *InsertPoint = 0);
/// Emit call to \c llvm.dbg.declare for an argument variable
/// declaration.
void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
unsigned ArgNo, CGBuilderTy &Builder);
/// Emit call to \c llvm.dbg.declare for the block-literal argument
/// to a block invocation function.
void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *Arg, unsigned ArgNo,
llvm::Value *LocalAddr,
CGBuilderTy &Builder);
/// Emit information about a global variable.
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
/// Emit global variable's debug info.
void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
/// Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
/// Emit the type explicitly casted to.
void EmitExplicitCastType(QualType Ty);
/// Emit C++ using declaration.
void EmitUsingDecl(const UsingDecl &UD);
/// Emit an @import declaration.
void EmitImportDecl(const ImportDecl &ID);
/// Emit C++ namespace alias.
llvm::DIImportedEntity *EmitNamespaceAlias(const NamespaceAliasDecl &NA);
/// Emit record type's standalone debug info.
llvm::DIType *getOrCreateRecordType(QualType Ty, SourceLocation L);
/// Emit an Objective-C interface type standalone debug info.
llvm::DIType *getOrCreateInterfaceType(QualType Ty, SourceLocation Loc);
void completeType(const EnumDecl *ED);
void completeType(const RecordDecl *RD);
void completeRequiredType(const RecordDecl *RD);
void completeClassData(const RecordDecl *RD);
void completeTemplateDefinition(const ClassTemplateSpecializationDecl &SD);
private:
/// Emit call to llvm.dbg.declare for a variable declaration.
/// Tag accepts custom types DW_TAG_arg_variable and DW_TAG_auto_variable,
/// otherwise would be of type llvm::dwarf::Tag.
void EmitDeclare(const VarDecl *decl, llvm::dwarf::Tag Tag, llvm::Value *AI,
unsigned ArgNo, CGBuilderTy &Builder);
/// Build up structure info for the byref. See \a BuildByRefType.
llvm::DIType *EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *OffSet);
/// Get context info for the decl.
llvm::DIScope *getContextDescriptor(const Decl *Decl);
llvm::DIScope *getCurrentContextDescriptor(const Decl *Decl);
/// Create a forward decl for a RecordType in a given context.
llvm::DICompositeType *getOrCreateRecordFwdDecl(const RecordType *,
llvm::DIScope *);
/// Return current directory name.
StringRef getCurrentDirname();
/// Create new compile unit.
void CreateCompileUnit();
/// Get the file debug info descriptor for the input location.
llvm::DIFile *getOrCreateFile(SourceLocation Loc);
/// Get the file info for main compile unit.
llvm::DIFile *getOrCreateMainFile();
/// Get the type from the cache or create a new type if necessary.
llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg);
/// Get a reference to a clang module.
llvm::DIModule *
getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod);
/// Get the type from the cache or create a new partial type if
/// necessary.
llvm::DIType *getOrCreateLimitedType(const RecordType *Ty, llvm::DIFile *F);
/// Create type metadata for a source language type.
llvm::DIType *CreateTypeNode(QualType Ty, llvm::DIFile *Fg);
/// Return the underlying ObjCInterfaceDecl if \arg Ty is an
/// ObjCInterface or a pointer to one.
ObjCInterfaceDecl *getObjCInterfaceDecl(QualType Ty);
/// Create new member and increase Offset by FType's size.
llvm::DIType *CreateMemberType(llvm::DIFile *Unit, QualType FType,
StringRef Name, uint64_t *Offset);
/// Retrieve the DIDescriptor, if any, for the canonical form of this
/// declaration.
llvm::DINode *getDeclarationOrDefinition(const Decl *D);
/// \return debug info descriptor to describe method
/// declaration for the given method definition.
llvm::DISubprogram *getFunctionDeclaration(const Decl *D);
/// \return debug info descriptor to describe in-class static data
/// member declaration for the given out-of-class definition. If D
/// is an out-of-class definition of a static data member of a
/// class, find its corresponding in-class declaration.
llvm::DIDerivedType *
getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D);
/// Create a subprogram describing the forward declaration
/// represented in the given FunctionDecl.
llvm::DISubprogram *getFunctionForwardDeclaration(const FunctionDecl *FD);
/// Create a global variable describing the forward decalration
/// represented in the given VarDecl.
llvm::DIGlobalVariable *
getGlobalVariableForwardDeclaration(const VarDecl *VD);
/// \brief Return a global variable that represents one of the
/// collection of global variables created for an anonmyous union.
///
/// Recursively collect all of the member fields of a global
/// anonymous decl and create static variables for them. The first
/// time this is called it needs to be on a union and then from
/// there we can have additional unnamed fields.
llvm::DIGlobalVariable *
CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile *Unit,
unsigned LineNo, StringRef LinkageName,
llvm::GlobalVariable *Var, llvm::DIScope *DContext);
/// Get function name for the given FunctionDecl. If the name is
/// constructed on demand (e.g., C++ destructor) then the name is
/// stored on the side.
StringRef getFunctionName(const FunctionDecl *FD);
/// Returns the unmangled name of an Objective-C method.
/// This is the display name for the debugging info.
StringRef getObjCMethodName(const ObjCMethodDecl *FD);
/// Return selector name. This is used for debugging
/// info.
StringRef getSelectorName(Selector S);
/// Get class name including template argument list.
StringRef getClassName(const RecordDecl *RD);
/// Get the vtable name for the given class.
StringRef getVTableName(const CXXRecordDecl *Decl);
/// Get line number for the location. If location is invalid
/// then use current location.
unsigned getLineNumber(SourceLocation Loc);
/// Get column number for the location. If location is
/// invalid then use current location.
/// \param Force Assume DebugColumnInfo option is true.
unsigned getColumnNumber(SourceLocation Loc, bool Force = false);
/// Collect various properties of a FunctionDecl.
/// \param GD A GlobalDecl whose getDecl() must return a FunctionDecl.
void collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
StringRef &Name, StringRef &LinkageName,
llvm::DIScope *&FDContext,
llvm::DINodeArray &TParamsArray,
unsigned &Flags);
/// Collect various properties of a VarDecl.
void collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
unsigned &LineNo, QualType &T, StringRef &Name,
StringRef &LinkageName, llvm::DIScope *&VDContext);
/// Allocate a copy of \p A using the DebugInfoNames allocator
/// and return a reference to it. If multiple arguments are given the strings
/// are concatenated.
StringRef internString(StringRef A, StringRef B = StringRef()) {
char *Data = DebugInfoNames.Allocate<char>(A.size() + B.size());
if (!A.empty())
std::memcpy(Data, A.data(), A.size());
if (!B.empty())
std::memcpy(Data + A.size(), B.data(), B.size());
return StringRef(Data, A.size() + B.size());
}
};
/// A scoped helper to set the current debug location to the specified
/// location or preferred location of the specified Expr.
class ApplyDebugLocation {
private:
void init(SourceLocation TemporaryLocation, bool DefaultToEmpty = false);
ApplyDebugLocation(CodeGenFunction &CGF, bool DefaultToEmpty,
SourceLocation TemporaryLocation);
llvm::DebugLoc OriginalLocation;
CodeGenFunction &CGF;
public:
/// Set the location to the (valid) TemporaryLocation.
ApplyDebugLocation(CodeGenFunction &CGF, SourceLocation TemporaryLocation);
ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E);
ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc);
~ApplyDebugLocation();
/// \brief Apply TemporaryLocation if it is valid. Otherwise switch
/// to an artificial debug location that has a valid scope, but no
/// line information.
///
/// Artificial locations are useful when emitting compiler-generated
/// helper functions that have no source location associated with
/// them. The DWARF specification allows the compiler to use the
/// special line number 0 to indicate code that can not be
/// attributed to any source location. Note that passing an empty
/// SourceLocation to CGDebugInfo::setLocation() will result in the
/// last valid location being reused.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF) {
return ApplyDebugLocation(CGF, false, SourceLocation());
}
/// \brief Apply TemporaryLocation if it is valid. Otherwise switch
/// to an artificial debug location that has a valid scope, but no
/// line information.
static ApplyDebugLocation
CreateDefaultArtificial(CodeGenFunction &CGF,
SourceLocation TemporaryLocation) {
return ApplyDebugLocation(CGF, false, TemporaryLocation);
}
/// Set the IRBuilder to not attach debug locations. Note that
/// passing an empty SourceLocation to \a CGDebugInfo::setLocation()
/// will result in the last valid location being reused. Note that
/// all instructions that do not have a location at the beginning of
/// a function are counted towards to funciton prologue.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF) {
return ApplyDebugLocation(CGF, true, SourceLocation());
}
/// \brief Apply TemporaryLocation if it is valid. Otherwise set the IRBuilder
/// to not attach debug locations.
static ApplyDebugLocation
CreateDefaultEmpty(CodeGenFunction &CGF, SourceLocation TemporaryLocation) {
return ApplyDebugLocation(CGF, true, TemporaryLocation);
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenPGO.cpp | //===--- CodeGenPGO.cpp - PGO Instrumentation for LLVM CodeGen --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Instrumentation-based profile-guided optimization
//
//===----------------------------------------------------------------------===//
#include "CodeGenPGO.h"
#include "CodeGenFunction.h"
#include "CoverageMappingGen.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MD5.h"
using namespace clang;
using namespace CodeGen;
void CodeGenPGO::setFuncName(StringRef Name,
llvm::GlobalValue::LinkageTypes Linkage) {
StringRef RawFuncName = Name;
// Function names may be prefixed with a binary '1' to indicate
// that the backend should not modify the symbols due to any platform
// naming convention. Do not include that '1' in the PGO profile name.
if (RawFuncName[0] == '\1')
RawFuncName = RawFuncName.substr(1);
FuncName = RawFuncName;
if (llvm::GlobalValue::isLocalLinkage(Linkage)) {
// For local symbols, prepend the main file name to distinguish them.
// Do not include the full path in the file name since there's no guarantee
// that it will stay the same, e.g., if the files are checked out from
// version control in different locations.
if (CGM.getCodeGenOpts().MainFileName.empty())
FuncName = FuncName.insert(0, "<unknown>:");
else
FuncName = FuncName.insert(0, CGM.getCodeGenOpts().MainFileName + ":");
}
// If we're generating a profile, create a variable for the name.
if (CGM.getCodeGenOpts().ProfileInstrGenerate)
createFuncNameVar(Linkage);
}
void CodeGenPGO::setFuncName(llvm::Function *Fn) {
setFuncName(Fn->getName(), Fn->getLinkage());
}
void CodeGenPGO::createFuncNameVar(llvm::GlobalValue::LinkageTypes Linkage) {
// We generally want to match the function's linkage, but available_externally
// and extern_weak both have the wrong semantics, and anything that doesn't
// need to link across compilation units doesn't need to be visible at all.
if (Linkage == llvm::GlobalValue::ExternalWeakLinkage)
Linkage = llvm::GlobalValue::LinkOnceAnyLinkage;
else if (Linkage == llvm::GlobalValue::AvailableExternallyLinkage)
Linkage = llvm::GlobalValue::LinkOnceODRLinkage;
else if (Linkage == llvm::GlobalValue::InternalLinkage ||
Linkage == llvm::GlobalValue::ExternalLinkage)
Linkage = llvm::GlobalValue::PrivateLinkage;
auto *Value =
llvm::ConstantDataArray::getString(CGM.getLLVMContext(), FuncName, false);
FuncNameVar =
new llvm::GlobalVariable(CGM.getModule(), Value->getType(), true, Linkage,
Value, "__llvm_profile_name_" + FuncName);
// Hide the symbol so that we correctly get a copy for each executable.
if (!llvm::GlobalValue::isLocalLinkage(FuncNameVar->getLinkage()))
FuncNameVar->setVisibility(llvm::GlobalValue::HiddenVisibility);
}
namespace {
/// \brief Stable hasher for PGO region counters.
///
/// PGOHash produces a stable hash of a given function's control flow.
///
/// Changing the output of this hash will invalidate all previously generated
/// profiles -- i.e., don't do it.
///
/// \note When this hash does eventually change (years?), we still need to
/// support old hashes. We'll need to pull in the version number from the
/// profile data format and use the matching hash function.
class PGOHash {
uint64_t Working;
unsigned Count;
llvm::MD5 MD5;
static const int NumBitsPerType = 6;
static const unsigned NumTypesPerWord = sizeof(uint64_t) * 8 / NumBitsPerType;
static const unsigned TooBig = 1u << NumBitsPerType;
public:
/// \brief Hash values for AST nodes.
///
/// Distinct values for AST nodes that have region counters attached.
///
/// These values must be stable. All new members must be added at the end,
/// and no members should be removed. Changing the enumeration value for an
/// AST node will affect the hash of every function that contains that node.
enum HashType : unsigned char {
None = 0,
LabelStmt = 1,
WhileStmt,
DoStmt,
ForStmt,
CXXForRangeStmt,
ObjCForCollectionStmt,
SwitchStmt,
CaseStmt,
DefaultStmt,
IfStmt,
CXXTryStmt,
CXXCatchStmt,
ConditionalOperator,
BinaryOperatorLAnd,
BinaryOperatorLOr,
BinaryConditionalOperator,
// Keep this last. It's for the static assert that follows.
LastHashType
};
static_assert(LastHashType <= TooBig, "Too many types in HashType");
// TODO: When this format changes, take in a version number here, and use the
// old hash calculation for file formats that used the old hash.
PGOHash() : Working(0), Count(0) {}
void combine(HashType Type);
uint64_t finalize();
};
const int PGOHash::NumBitsPerType;
const unsigned PGOHash::NumTypesPerWord;
const unsigned PGOHash::TooBig;
/// A RecursiveASTVisitor that fills a map of statements to PGO counters.
struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
/// The next counter value to assign.
unsigned NextCounter;
/// The function hash.
PGOHash Hash;
/// The map of statements to counters.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
MapRegionCounters(llvm::DenseMap<const Stmt *, unsigned> &CounterMap)
: NextCounter(0), CounterMap(CounterMap) {}
// Blocks and lambdas are handled as separate functions, so we need not
// traverse them in the parent context.
bool TraverseBlockExpr(BlockExpr *BE) { return true; }
bool TraverseLambdaBody(LambdaExpr *LE) { return true; }
bool TraverseCapturedStmt(CapturedStmt *CS) { return true; }
bool VisitDecl(const Decl *D) {
switch (D->getKind()) {
default:
break;
case Decl::Function:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXConversion:
case Decl::ObjCMethod:
case Decl::Block:
case Decl::Captured:
CounterMap[D->getBody()] = NextCounter++;
break;
}
return true;
}
bool VisitStmt(const Stmt *S) {
auto Type = getHashType(S);
if (Type == PGOHash::None)
return true;
CounterMap[S] = NextCounter++;
Hash.combine(Type);
return true;
}
PGOHash::HashType getHashType(const Stmt *S) {
switch (S->getStmtClass()) {
default:
break;
case Stmt::LabelStmtClass:
return PGOHash::LabelStmt;
case Stmt::WhileStmtClass:
return PGOHash::WhileStmt;
case Stmt::DoStmtClass:
return PGOHash::DoStmt;
case Stmt::ForStmtClass:
return PGOHash::ForStmt;
case Stmt::CXXForRangeStmtClass:
return PGOHash::CXXForRangeStmt;
case Stmt::ObjCForCollectionStmtClass:
return PGOHash::ObjCForCollectionStmt;
case Stmt::SwitchStmtClass:
return PGOHash::SwitchStmt;
case Stmt::CaseStmtClass:
return PGOHash::CaseStmt;
case Stmt::DefaultStmtClass:
return PGOHash::DefaultStmt;
case Stmt::IfStmtClass:
return PGOHash::IfStmt;
case Stmt::CXXTryStmtClass:
return PGOHash::CXXTryStmt;
case Stmt::CXXCatchStmtClass:
return PGOHash::CXXCatchStmt;
case Stmt::ConditionalOperatorClass:
return PGOHash::ConditionalOperator;
case Stmt::BinaryConditionalOperatorClass:
return PGOHash::BinaryConditionalOperator;
case Stmt::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(S);
if (BO->getOpcode() == BO_LAnd)
return PGOHash::BinaryOperatorLAnd;
if (BO->getOpcode() == BO_LOr)
return PGOHash::BinaryOperatorLOr;
break;
}
}
return PGOHash::None;
}
};
/// A StmtVisitor that propagates the raw counts through the AST and
/// records the count at statements where the value may change.
struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
/// PGO state.
CodeGenPGO &PGO;
/// A flag that is set when the current count should be recorded on the
/// next statement, such as at the exit of a loop.
bool RecordNextStmtCount;
/// The count at the current location in the traversal.
uint64_t CurrentCount;
/// The map of statements to count values.
llvm::DenseMap<const Stmt *, uint64_t> &CountMap;
/// BreakContinueStack - Keep counts of breaks and continues inside loops.
struct BreakContinue {
uint64_t BreakCount;
uint64_t ContinueCount;
BreakContinue() : BreakCount(0), ContinueCount(0) {}
};
SmallVector<BreakContinue, 8> BreakContinueStack;
ComputeRegionCounts(llvm::DenseMap<const Stmt *, uint64_t> &CountMap,
CodeGenPGO &PGO)
: PGO(PGO), RecordNextStmtCount(false), CountMap(CountMap) {}
void RecordStmtCount(const Stmt *S) {
if (RecordNextStmtCount) {
CountMap[S] = CurrentCount;
RecordNextStmtCount = false;
}
}
/// Set and return the current count.
uint64_t setCount(uint64_t Count) {
CurrentCount = Count;
return Count;
}
void VisitStmt(const Stmt *S) {
RecordStmtCount(S);
for (const Stmt *Child : S->children())
if (Child)
this->Visit(Child);
}
void VisitFunctionDecl(const FunctionDecl *D) {
// Counter tracks entry to the function body.
uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
CountMap[D->getBody()] = BodyCount;
Visit(D->getBody());
}
// Skip lambda expressions. We visit these as FunctionDecls when we're
// generating them and aren't interested in the body when generating a
// parent context.
void VisitLambdaExpr(const LambdaExpr *LE) {}
void VisitCapturedDecl(const CapturedDecl *D) {
// Counter tracks entry to the capture body.
uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
CountMap[D->getBody()] = BodyCount;
Visit(D->getBody());
}
void VisitObjCMethodDecl(const ObjCMethodDecl *D) {
// Counter tracks entry to the method body.
uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
CountMap[D->getBody()] = BodyCount;
Visit(D->getBody());
}
void VisitBlockDecl(const BlockDecl *D) {
// Counter tracks entry to the block body.
uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody()));
CountMap[D->getBody()] = BodyCount;
Visit(D->getBody());
}
void VisitReturnStmt(const ReturnStmt *S) {
RecordStmtCount(S);
if (S->getRetValue())
Visit(S->getRetValue());
CurrentCount = 0;
RecordNextStmtCount = true;
}
void VisitCXXThrowExpr(const CXXThrowExpr *E) {
RecordStmtCount(E);
if (E->getSubExpr())
Visit(E->getSubExpr());
CurrentCount = 0;
RecordNextStmtCount = true;
}
void VisitGotoStmt(const GotoStmt *S) {
RecordStmtCount(S);
CurrentCount = 0;
RecordNextStmtCount = true;
}
void VisitLabelStmt(const LabelStmt *S) {
RecordNextStmtCount = false;
// Counter tracks the block following the label.
uint64_t BlockCount = setCount(PGO.getRegionCount(S));
CountMap[S] = BlockCount;
Visit(S->getSubStmt());
}
void VisitBreakStmt(const BreakStmt *S) {
RecordStmtCount(S);
assert(!BreakContinueStack.empty() && "break not in a loop or switch!");
BreakContinueStack.back().BreakCount += CurrentCount;
CurrentCount = 0;
RecordNextStmtCount = true;
}
void VisitContinueStmt(const ContinueStmt *S) {
RecordStmtCount(S);
assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
BreakContinueStack.back().ContinueCount += CurrentCount;
CurrentCount = 0;
RecordNextStmtCount = true;
}
void VisitWhileStmt(const WhileStmt *S) {
RecordStmtCount(S);
uint64_t ParentCount = CurrentCount;
BreakContinueStack.push_back(BreakContinue());
// Visit the body region first so the break/continue adjustments can be
// included when visiting the condition.
uint64_t BodyCount = setCount(PGO.getRegionCount(S));
CountMap[S->getBody()] = CurrentCount;
Visit(S->getBody());
uint64_t BackedgeCount = CurrentCount;
// ...then go back and propagate counts through the condition. The count
// at the start of the condition is the sum of the incoming edges,
// the backedge from the end of the loop body, and the edges from
// continue statements.
BreakContinue BC = BreakContinueStack.pop_back_val();
uint64_t CondCount =
setCount(ParentCount + BackedgeCount + BC.ContinueCount);
CountMap[S->getCond()] = CondCount;
Visit(S->getCond());
setCount(BC.BreakCount + CondCount - BodyCount);
RecordNextStmtCount = true;
}
void VisitDoStmt(const DoStmt *S) {
RecordStmtCount(S);
uint64_t LoopCount = PGO.getRegionCount(S);
BreakContinueStack.push_back(BreakContinue());
// The count doesn't include the fallthrough from the parent scope. Add it.
uint64_t BodyCount = setCount(LoopCount + CurrentCount);
CountMap[S->getBody()] = BodyCount;
Visit(S->getBody());
uint64_t BackedgeCount = CurrentCount;
BreakContinue BC = BreakContinueStack.pop_back_val();
// The count at the start of the condition is equal to the count at the
// end of the body, plus any continues.
uint64_t CondCount = setCount(BackedgeCount + BC.ContinueCount);
CountMap[S->getCond()] = CondCount;
Visit(S->getCond());
setCount(BC.BreakCount + CondCount - LoopCount);
RecordNextStmtCount = true;
}
void VisitForStmt(const ForStmt *S) {
RecordStmtCount(S);
if (S->getInit())
Visit(S->getInit());
uint64_t ParentCount = CurrentCount;
BreakContinueStack.push_back(BreakContinue());
// Visit the body region first. (This is basically the same as a while
// loop; see further comments in VisitWhileStmt.)
uint64_t BodyCount = setCount(PGO.getRegionCount(S));
CountMap[S->getBody()] = BodyCount;
Visit(S->getBody());
uint64_t BackedgeCount = CurrentCount;
BreakContinue BC = BreakContinueStack.pop_back_val();
// The increment is essentially part of the body but it needs to include
// the count for all the continue statements.
if (S->getInc()) {
uint64_t IncCount = setCount(BackedgeCount + BC.ContinueCount);
CountMap[S->getInc()] = IncCount;
Visit(S->getInc());
}
// ...then go back and propagate counts through the condition.
uint64_t CondCount =
setCount(ParentCount + BackedgeCount + BC.ContinueCount);
if (S->getCond()) {
CountMap[S->getCond()] = CondCount;
Visit(S->getCond());
}
setCount(BC.BreakCount + CondCount - BodyCount);
RecordNextStmtCount = true;
}
void VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
RecordStmtCount(S);
Visit(S->getLoopVarStmt());
Visit(S->getRangeStmt());
Visit(S->getBeginEndStmt());
uint64_t ParentCount = CurrentCount;
BreakContinueStack.push_back(BreakContinue());
// Visit the body region first. (This is basically the same as a while
// loop; see further comments in VisitWhileStmt.)
uint64_t BodyCount = setCount(PGO.getRegionCount(S));
CountMap[S->getBody()] = BodyCount;
Visit(S->getBody());
uint64_t BackedgeCount = CurrentCount;
BreakContinue BC = BreakContinueStack.pop_back_val();
// The increment is essentially part of the body but it needs to include
// the count for all the continue statements.
uint64_t IncCount = setCount(BackedgeCount + BC.ContinueCount);
CountMap[S->getInc()] = IncCount;
Visit(S->getInc());
// ...then go back and propagate counts through the condition.
uint64_t CondCount =
setCount(ParentCount + BackedgeCount + BC.ContinueCount);
CountMap[S->getCond()] = CondCount;
Visit(S->getCond());
setCount(BC.BreakCount + CondCount - BodyCount);
RecordNextStmtCount = true;
}
void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
RecordStmtCount(S);
Visit(S->getElement());
uint64_t ParentCount = CurrentCount;
BreakContinueStack.push_back(BreakContinue());
// Counter tracks the body of the loop.
uint64_t BodyCount = setCount(PGO.getRegionCount(S));
CountMap[S->getBody()] = BodyCount;
Visit(S->getBody());
uint64_t BackedgeCount = CurrentCount;
BreakContinue BC = BreakContinueStack.pop_back_val();
setCount(BC.BreakCount + ParentCount + BackedgeCount + BC.ContinueCount -
BodyCount);
RecordNextStmtCount = true;
}
void VisitSwitchStmt(const SwitchStmt *S) {
RecordStmtCount(S);
Visit(S->getCond());
CurrentCount = 0;
BreakContinueStack.push_back(BreakContinue());
Visit(S->getBody());
// If the switch is inside a loop, add the continue counts.
BreakContinue BC = BreakContinueStack.pop_back_val();
if (!BreakContinueStack.empty())
BreakContinueStack.back().ContinueCount += BC.ContinueCount;
// Counter tracks the exit block of the switch.
setCount(PGO.getRegionCount(S));
RecordNextStmtCount = true;
}
void VisitSwitchCase(const SwitchCase *S) {
RecordNextStmtCount = false;
// Counter for this particular case. This counts only jumps from the
// switch header and does not include fallthrough from the case before
// this one.
uint64_t CaseCount = PGO.getRegionCount(S);
setCount(CurrentCount + CaseCount);
// We need the count without fallthrough in the mapping, so it's more useful
// for branch probabilities.
CountMap[S] = CaseCount;
RecordNextStmtCount = true;
Visit(S->getSubStmt());
}
void VisitIfStmt(const IfStmt *S) {
RecordStmtCount(S);
uint64_t ParentCount = CurrentCount;
Visit(S->getCond());
// Counter tracks the "then" part of an if statement. The count for
// the "else" part, if it exists, will be calculated from this counter.
uint64_t ThenCount = setCount(PGO.getRegionCount(S));
CountMap[S->getThen()] = ThenCount;
Visit(S->getThen());
uint64_t OutCount = CurrentCount;
uint64_t ElseCount = ParentCount - ThenCount;
if (S->getElse()) {
setCount(ElseCount);
CountMap[S->getElse()] = ElseCount;
Visit(S->getElse());
OutCount += CurrentCount;
} else
OutCount += ElseCount;
setCount(OutCount);
RecordNextStmtCount = true;
}
void VisitCXXTryStmt(const CXXTryStmt *S) {
RecordStmtCount(S);
Visit(S->getTryBlock());
for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I)
Visit(S->getHandler(I));
// Counter tracks the continuation block of the try statement.
setCount(PGO.getRegionCount(S));
RecordNextStmtCount = true;
}
void VisitCXXCatchStmt(const CXXCatchStmt *S) {
RecordNextStmtCount = false;
// Counter tracks the catch statement's handler block.
uint64_t CatchCount = setCount(PGO.getRegionCount(S));
CountMap[S] = CatchCount;
Visit(S->getHandlerBlock());
}
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
RecordStmtCount(E);
uint64_t ParentCount = CurrentCount;
Visit(E->getCond());
// Counter tracks the "true" part of a conditional operator. The
// count in the "false" part will be calculated from this counter.
uint64_t TrueCount = setCount(PGO.getRegionCount(E));
CountMap[E->getTrueExpr()] = TrueCount;
Visit(E->getTrueExpr());
uint64_t OutCount = CurrentCount;
uint64_t FalseCount = setCount(ParentCount - TrueCount);
CountMap[E->getFalseExpr()] = FalseCount;
Visit(E->getFalseExpr());
OutCount += CurrentCount;
setCount(OutCount);
RecordNextStmtCount = true;
}
void VisitBinLAnd(const BinaryOperator *E) {
RecordStmtCount(E);
uint64_t ParentCount = CurrentCount;
Visit(E->getLHS());
// Counter tracks the right hand side of a logical and operator.
uint64_t RHSCount = setCount(PGO.getRegionCount(E));
CountMap[E->getRHS()] = RHSCount;
Visit(E->getRHS());
setCount(ParentCount + RHSCount - CurrentCount);
RecordNextStmtCount = true;
}
void VisitBinLOr(const BinaryOperator *E) {
RecordStmtCount(E);
uint64_t ParentCount = CurrentCount;
Visit(E->getLHS());
// Counter tracks the right hand side of a logical or operator.
uint64_t RHSCount = setCount(PGO.getRegionCount(E));
CountMap[E->getRHS()] = RHSCount;
Visit(E->getRHS());
setCount(ParentCount + RHSCount - CurrentCount);
RecordNextStmtCount = true;
}
};
}
void PGOHash::combine(HashType Type) {
// Check that we never combine 0 and only have six bits.
assert(Type && "Hash is invalid: unexpected type 0");
assert(unsigned(Type) < TooBig && "Hash is invalid: too many types");
// Pass through MD5 if enough work has built up.
if (Count && Count % NumTypesPerWord == 0) {
using namespace llvm::support;
uint64_t Swapped = endian::byte_swap<uint64_t, little>(Working);
MD5.update(llvm::makeArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
Working = 0;
}
// Accumulate the current type.
++Count;
Working = Working << NumBitsPerType | Type;
}
uint64_t PGOHash::finalize() {
// Use Working as the hash directly if we never used MD5.
if (Count <= NumTypesPerWord)
// No need to byte swap here, since none of the math was endian-dependent.
// This number will be byte-swapped as required on endianness transitions,
// so we will see the same value on the other side.
return Working;
// Check for remaining work in Working.
if (Working)
MD5.update(Working);
// Finalize the MD5 and return the hash.
llvm::MD5::MD5Result Result;
MD5.final(Result);
using namespace llvm::support;
return endian::read<uint64_t, little, unaligned>(Result);
}
void CodeGenPGO::checkGlobalDecl(GlobalDecl GD) {
// Make sure we only emit coverage mapping for one constructor/destructor.
// Clang emits several functions for the constructor and the destructor of
// a class. Every function is instrumented, but we only want to provide
// coverage for one of them. Because of that we only emit the coverage mapping
// for the base constructor/destructor.
if ((isa<CXXConstructorDecl>(GD.getDecl()) &&
GD.getCtorType() != Ctor_Base) ||
(isa<CXXDestructorDecl>(GD.getDecl()) &&
GD.getDtorType() != Dtor_Base)) {
SkipCoverageMapping = true;
}
}
void CodeGenPGO::assignRegionCounters(const Decl *D, llvm::Function *Fn) {
bool InstrumentRegions = CGM.getCodeGenOpts().ProfileInstrGenerate;
llvm::IndexedInstrProfReader *PGOReader = CGM.getPGOReader();
if (!InstrumentRegions && !PGOReader)
return;
if (D->isImplicit())
return;
CGM.ClearUnusedCoverageMapping(D);
setFuncName(Fn);
mapRegionCounters(D);
if (CGM.getCodeGenOpts().CoverageMapping)
emitCounterRegionMapping(D);
if (PGOReader) {
SourceManager &SM = CGM.getContext().getSourceManager();
loadRegionCounts(PGOReader, SM.isInMainFile(D->getLocation()));
computeRegionCounts(D);
applyFunctionAttributes(PGOReader, Fn);
}
}
void CodeGenPGO::mapRegionCounters(const Decl *D) {
RegionCounterMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
MapRegionCounters Walker(*RegionCounterMap);
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
Walker.TraverseDecl(const_cast<FunctionDecl *>(FD));
else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
Walker.TraverseDecl(const_cast<ObjCMethodDecl *>(MD));
else if (const BlockDecl *BD = dyn_cast_or_null<BlockDecl>(D))
Walker.TraverseDecl(const_cast<BlockDecl *>(BD));
else if (const CapturedDecl *CD = dyn_cast_or_null<CapturedDecl>(D))
Walker.TraverseDecl(const_cast<CapturedDecl *>(CD));
assert(Walker.NextCounter > 0 && "no entry counter mapped for decl");
NumRegionCounters = Walker.NextCounter;
FunctionHash = Walker.Hash.finalize();
}
void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
if (SkipCoverageMapping)
return;
// Don't map the functions inside the system headers
auto Loc = D->getBody()->getLocStart();
if (CGM.getContext().getSourceManager().isInSystemHeader(Loc))
return;
std::string CoverageMapping;
llvm::raw_string_ostream OS(CoverageMapping);
CoverageMappingGen MappingGen(*CGM.getCoverageMapping(),
CGM.getContext().getSourceManager(),
CGM.getLangOpts(), RegionCounterMap.get());
MappingGen.emitCounterMapping(D, OS);
OS.flush();
if (CoverageMapping.empty())
return;
CGM.getCoverageMapping()->addFunctionMappingRecord(
FuncNameVar, FuncName, FunctionHash, CoverageMapping);
}
void
CodeGenPGO::emitEmptyCounterMapping(const Decl *D, StringRef Name,
llvm::GlobalValue::LinkageTypes Linkage) {
if (SkipCoverageMapping)
return;
// Don't map the functions inside the system headers
auto Loc = D->getBody()->getLocStart();
if (CGM.getContext().getSourceManager().isInSystemHeader(Loc))
return;
std::string CoverageMapping;
llvm::raw_string_ostream OS(CoverageMapping);
CoverageMappingGen MappingGen(*CGM.getCoverageMapping(),
CGM.getContext().getSourceManager(),
CGM.getLangOpts());
MappingGen.emitEmptyMapping(D, OS);
OS.flush();
if (CoverageMapping.empty())
return;
setFuncName(Name, Linkage);
CGM.getCoverageMapping()->addFunctionMappingRecord(
FuncNameVar, FuncName, FunctionHash, CoverageMapping);
}
void CodeGenPGO::computeRegionCounts(const Decl *D) {
StmtCountMap.reset(new llvm::DenseMap<const Stmt *, uint64_t>);
ComputeRegionCounts Walker(*StmtCountMap, *this);
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
Walker.VisitFunctionDecl(FD);
else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
Walker.VisitObjCMethodDecl(MD);
else if (const BlockDecl *BD = dyn_cast_or_null<BlockDecl>(D))
Walker.VisitBlockDecl(BD);
else if (const CapturedDecl *CD = dyn_cast_or_null<CapturedDecl>(D))
Walker.VisitCapturedDecl(const_cast<CapturedDecl *>(CD));
}
void
CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
llvm::Function *Fn) {
if (!haveRegionCounts())
return;
uint64_t MaxFunctionCount = PGOReader->getMaximumFunctionCount();
uint64_t FunctionCount = getRegionCount(0);
if (FunctionCount >= (uint64_t)(0.3 * (double)MaxFunctionCount))
// Turn on InlineHint attribute for hot functions.
// FIXME: 30% is from preliminary tuning on SPEC, it may not be optimal.
Fn->addFnAttr(llvm::Attribute::InlineHint);
else if (FunctionCount <= (uint64_t)(0.01 * (double)MaxFunctionCount))
// Turn on Cold attribute for cold functions.
// FIXME: 1% is from preliminary tuning on SPEC, it may not be optimal.
Fn->addFnAttr(llvm::Attribute::Cold);
Fn->setEntryCount(FunctionCount);
}
void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S) {
if (!CGM.getCodeGenOpts().ProfileInstrGenerate || !RegionCounterMap)
return;
if (!Builder.GetInsertPoint())
return;
unsigned Counter = (*RegionCounterMap)[S];
auto *I8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment),
{llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
Builder.getInt64(FunctionHash),
Builder.getInt32(NumRegionCounters),
Builder.getInt32(Counter)});
}
void CodeGenPGO::loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
bool IsInMainFile) {
CGM.getPGOStats().addVisited(IsInMainFile);
RegionCounts.clear();
if (std::error_code EC =
PGOReader->getFunctionCounts(FuncName, FunctionHash, RegionCounts)) {
if (EC == llvm::instrprof_error::unknown_function)
CGM.getPGOStats().addMissing(IsInMainFile);
else if (EC == llvm::instrprof_error::hash_mismatch)
CGM.getPGOStats().addMismatched(IsInMainFile);
else if (EC == llvm::instrprof_error::malformed)
// TODO: Consider a more specific warning for this case.
CGM.getPGOStats().addMismatched(IsInMainFile);
RegionCounts.clear();
}
}
/// \brief Calculate what to divide by to scale weights.
///
/// Given the maximum weight, calculate a divisor that will scale all the
/// weights to strictly less than UINT32_MAX.
static uint64_t calculateWeightScale(uint64_t MaxWeight) {
return MaxWeight < UINT32_MAX ? 1 : MaxWeight / UINT32_MAX + 1;
}
/// \brief Scale an individual branch weight (and add 1).
///
/// Scale a 64-bit weight down to 32-bits using \c Scale.
///
/// According to Laplace's Rule of Succession, it is better to compute the
/// weight based on the count plus 1, so universally add 1 to the value.
///
/// \pre \c Scale was calculated by \a calculateWeightScale() with a weight no
/// greater than \c Weight.
static uint32_t scaleBranchWeight(uint64_t Weight, uint64_t Scale) {
assert(Scale && "scale by 0?");
uint64_t Scaled = Weight / Scale + 1;
assert(Scaled <= UINT32_MAX && "overflow 32-bits");
return Scaled;
}
llvm::MDNode *CodeGenFunction::createProfileWeights(uint64_t TrueCount,
uint64_t FalseCount) {
// Check for empty weights.
if (!TrueCount && !FalseCount)
return nullptr;
// Calculate how to scale down to 32-bits.
uint64_t Scale = calculateWeightScale(std::max(TrueCount, FalseCount));
llvm::MDBuilder MDHelper(CGM.getLLVMContext());
return MDHelper.createBranchWeights(scaleBranchWeight(TrueCount, Scale),
scaleBranchWeight(FalseCount, Scale));
}
llvm::MDNode *
CodeGenFunction::createProfileWeights(ArrayRef<uint64_t> Weights) {
// We need at least two elements to create meaningful weights.
if (Weights.size() < 2)
return nullptr;
// Check for empty weights.
uint64_t MaxWeight = *std::max_element(Weights.begin(), Weights.end());
if (MaxWeight == 0)
return nullptr;
// Calculate how to scale down to 32-bits.
uint64_t Scale = calculateWeightScale(MaxWeight);
SmallVector<uint32_t, 16> ScaledWeights;
ScaledWeights.reserve(Weights.size());
for (uint64_t W : Weights)
ScaledWeights.push_back(scaleBranchWeight(W, Scale));
llvm::MDBuilder MDHelper(CGM.getLLVMContext());
return MDHelper.createBranchWeights(ScaledWeights);
}
llvm::MDNode *CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond,
uint64_t LoopCount) {
if (!PGO.haveRegionCounts())
return nullptr;
Optional<uint64_t> CondCount = PGO.getStmtCount(Cond);
assert(CondCount.hasValue() && "missing expected loop condition count");
if (*CondCount == 0)
return nullptr;
return createProfileWeights(LoopCount,
std::max(*CondCount, LoopCount) - LoopCount);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenTypes.h | //===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the code that handles AST -> LLVM type lowering.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H
#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H
#include "CGCall.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Module.h"
#include <vector>
namespace llvm {
class FunctionType;
class Module;
class DataLayout;
class Type;
class LLVMContext;
class StructType;
}
namespace clang {
class ABIInfo;
class ASTContext;
template <typename> class CanQual;
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
class CodeGenOptions;
class FieldDecl;
class FunctionProtoType;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
class PointerType;
class QualType;
class RecordDecl;
class TagDecl;
class TargetInfo;
class Type;
typedef CanQual<Type> CanQualType;
namespace CodeGen {
class CGCXXABI;
class CGRecordLayout;
class CodeGenModule;
class RequiredArgs;
enum class StructorType {
Complete, // constructor or destructor
Base, // constructor or destructor
Deleting // destructor only
};
inline CXXCtorType toCXXCtorType(StructorType T) {
switch (T) {
case StructorType::Complete:
return Ctor_Complete;
case StructorType::Base:
return Ctor_Base;
case StructorType::Deleting:
llvm_unreachable("cannot have a deleting ctor");
}
llvm_unreachable("not a StructorType");
}
inline StructorType getFromCtorType(CXXCtorType T) {
switch (T) {
case Ctor_Complete:
return StructorType::Complete;
case Ctor_Base:
return StructorType::Base;
case Ctor_Comdat:
llvm_unreachable("not expecting a COMDAT");
case Ctor_CopyingClosure:
case Ctor_DefaultClosure:
llvm_unreachable("not expecting a closure");
}
llvm_unreachable("not a CXXCtorType");
}
inline CXXDtorType toCXXDtorType(StructorType T) {
switch (T) {
case StructorType::Complete:
return Dtor_Complete;
case StructorType::Base:
return Dtor_Base;
case StructorType::Deleting:
return Dtor_Deleting;
}
llvm_unreachable("not a StructorType");
}
inline StructorType getFromDtorType(CXXDtorType T) {
switch (T) {
case Dtor_Deleting:
return StructorType::Deleting;
case Dtor_Complete:
return StructorType::Complete;
case Dtor_Base:
return StructorType::Base;
case Dtor_Comdat:
llvm_unreachable("not expecting a COMDAT");
}
llvm_unreachable("not a CXXDtorType");
}
/// This class organizes the cross-module state that is used while lowering
/// AST types to LLVM types.
class CodeGenTypes {
CodeGenModule &CGM;
// Some of this stuff should probably be left on the CGM.
ASTContext &Context;
llvm::Module &TheModule;
const llvm::DataLayout &TheDataLayout;
const TargetInfo &Target;
CGCXXABI &TheCXXABI;
// This should not be moved earlier, since its initialization depends on some
// of the previous reference members being already initialized
const ABIInfo &TheABIInfo;
/// The opaque type map for Objective-C interfaces. All direct
/// manipulation is done by the runtime interfaces, which are
/// responsible for coercing to the appropriate type; these opaque
/// types are never refined.
llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
/// Maps clang struct type with corresponding record layout info.
llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
/// Contains the LLVM IR type for any converted RecordDecl.
llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
/// Hold memoized CGFunctionInfo results.
llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
/// This set keeps track of records that we're currently converting
/// to an IR type. For example, when converting:
/// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
/// types will be in this set.
llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
/// True if we didn't layout a function due to a being inside
/// a recursive struct conversion, set this to true.
bool SkippedLayout;
SmallVector<const RecordDecl *, 8> DeferredRecords;
private:
/// This map keeps cache of llvm::Types and maps clang::Type to
/// corresponding llvm::Type.
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
public:
CodeGenTypes(CodeGenModule &cgm);
~CodeGenTypes();
const llvm::DataLayout &getDataLayout() const { return TheDataLayout; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
const TargetInfo &getTarget() const { return Target; }
CGCXXABI &getCXXABI() const { return TheCXXABI; }
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
/// ConvertType - Convert type T into a llvm::Type.
llvm::Type *ConvertType(QualType T);
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i32, depending on the target.
llvm::Type *ConvertTypeForMem(QualType T);
/// GetFunctionType - Get the LLVM function type for \arg Info.
llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
llvm::FunctionType *GetFunctionType(GlobalDecl GD);
/// isFuncTypeConvertible - Utility to check whether a function type can
/// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
/// type).
bool isFuncTypeConvertible(const FunctionType *FT);
bool isFuncParamTypeConvertible(QualType Ty);
/// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
/// and/or incomplete argument types, this will return the opaque type.
llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void UpdateCompletedType(const TagDecl *TD);
/// getNullaryFunctionInfo - Get the function info for a void()
/// function with standard CC.
const CGFunctionInfo &arrangeNullaryFunction();
// The arrangement methods are split into three families:
// - those meant to drive the signature and prologue/epilogue
// of a function declaration or definition,
// - those meant for the computation of the LLVM type for an abstract
// appearance of a function, and
// - those meant for performing the IR-generation of a call.
// They differ mainly in how they deal with optional (i.e. variadic)
// arguments, as well as unprototyped functions.
//
// Key points:
// - The CGFunctionInfo for emitting a specific call site must include
// entries for the optional arguments.
// - The function type used at the call site must reflect the formal
// signature of the declaration being called, or else the call will
// go awry.
// - For the most part, unprototyped functions are called by casting to
// a formal signature inferred from the specific argument types used
// at the call-site. However, some targets (e.g. x86-64) screw with
// this for compatibility reasons.
const CGFunctionInfo &arrangeGlobalDeclaration(GlobalDecl GD);
const CGFunctionInfo &arrangeFunctionDeclaration(const FunctionDecl *FD);
const CGFunctionInfo &
arrangeFreeFunctionDeclaration(QualType ResTy, const FunctionArgList &Args,
const FunctionType::ExtInfo &Info,
bool isVariadic);
const CGFunctionInfo &arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD);
const CGFunctionInfo &arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
QualType receiverType);
const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
const CGFunctionInfo &arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
StructorType Type);
const CGFunctionInfo &arrangeCXXConstructorCall(const CallArgList &Args,
const CXXConstructorDecl *D,
CXXCtorType CtorKind,
unsigned ExtraArgs);
const CGFunctionInfo &arrangeFreeFunctionCall(const CallArgList &Args,
const FunctionType *Ty,
bool ChainCall);
const CGFunctionInfo &arrangeFreeFunctionCall(QualType ResTy,
const CallArgList &args,
FunctionType::ExtInfo info,
RequiredArgs required);
const CGFunctionInfo &arrangeBlockFunctionCall(const CallArgList &args,
const FunctionType *type);
const CGFunctionInfo &arrangeCXXMethodCall(const CallArgList &args,
const FunctionProtoType *type,
RequiredArgs required);
const CGFunctionInfo &arrangeMSMemberPointerThunk(const CXXMethodDecl *MD);
const CGFunctionInfo &arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CXXCtorType CT);
const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty);
const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty);
const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
const FunctionProtoType *FTP);
/// "Arrange" the LLVM information for a call or type with the given
/// signature. This is largely an internal method; other clients
/// should use one of the above routines, which ultimately defer to
/// this.
///
/// \param argTypes - must all actually be canonical as params
const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
bool instanceMethod,
bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs args);
/// \brief Compute a new LLVM record layout object for the given record.
CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
llvm::StructType *Ty);
/// addRecordTypeName - Compute a name from the given record decl with an
/// optional suffix and name the given LLVM type using it.
void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty,
StringRef suffix);
public: // These are internal details of CGT that shouldn't be used externally.
/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD);
/// getExpandedTypes - Expand the type \arg Ty into the LLVM
/// argument types it would be passed as. See ABIArgInfo::Expand.
void getExpandedTypes(QualType Ty,
SmallVectorImpl<llvm::Type *>::iterator &TI);
/// IsZeroInitializable - Return whether a type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(QualType T);
/// IsZeroInitializable - Return whether a record type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(const RecordDecl *RD);
bool isRecordLayoutComplete(const Type *Ty) const;
bool noRecordsBeingLaidOut() const {
return RecordsBeingLaidOut.empty();
}
bool isRecordBeingLaidOut(const Type *Ty) const {
return RecordsBeingLaidOut.count(Ty);
}
};
} // end namespace CodeGen
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp | //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit OpenMP nodes as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtOpenMP.h"
using namespace clang;
using namespace CodeGen;
//===----------------------------------------------------------------------===//
// OpenMP Directive Emission
//===----------------------------------------------------------------------===//
void CodeGenFunction::EmitOMPAggregateAssign(
llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) {
// Perform element-by-element initialization.
QualType ElementTy;
auto SrcBegin = SrcAddr;
auto DestBegin = DestAddr;
auto ArrayTy = OriginalType->getAsArrayTypeUnsafe();
auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin);
// Cast from pointer to array type to pointer to single element.
SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin,
DestBegin->getType());
auto DestEnd = Builder.CreateGEP(DestBegin, NumElements);
// The basic structure here is a while-do loop.
auto BodyBB = createBasicBlock("omp.arraycpy.body");
auto DoneBB = createBasicBlock("omp.arraycpy.done");
auto IsEmpty =
Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
// Enter the loop body, making that address the current address.
auto EntryBB = Builder.GetInsertBlock();
EmitBlock(BodyBB);
auto SrcElementCurrent =
Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
SrcElementCurrent->addIncoming(SrcBegin, EntryBB);
auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2,
"omp.arraycpy.destElementPast");
DestElementCurrent->addIncoming(DestBegin, EntryBB);
// Emit copy.
CopyGen(DestElementCurrent, SrcElementCurrent);
// Shift the address forward by one element.
auto DestElementNext = Builder.CreateConstGEP1_32(
DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element");
auto SrcElementNext = Builder.CreateConstGEP1_32(
SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element");
// Check whether we've reached the end.
auto Done =
Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
Builder.CreateCondBr(Done, DoneBB, BodyBB);
DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock());
SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock());
// Done.
EmitBlock(DoneBB, /*IsFinished=*/true);
}
void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF,
QualType OriginalType, llvm::Value *DestAddr,
llvm::Value *SrcAddr, const VarDecl *DestVD,
const VarDecl *SrcVD, const Expr *Copy) {
if (OriginalType->isArrayType()) {
auto *BO = dyn_cast<BinaryOperator>(Copy);
if (BO && BO->getOpcode() == BO_Assign) {
// Perform simple memcpy for simple copying.
CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType);
} else {
// For arrays with complex element types perform element by element
// copying.
CGF.EmitOMPAggregateAssign(
DestAddr, SrcAddr, OriginalType,
[&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement,
llvm::Value *SrcElement) {
// Working with the single array element, so have to remap
// destination and source variables to corresponding array
// elements.
CodeGenFunction::OMPPrivateScope Remap(CGF);
Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{
return DestElement;
});
Remap.addPrivate(
SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; });
(void)Remap.Privatize();
CGF.EmitIgnoredExpr(Copy);
});
}
} else {
// Remap pseudo source variable to private copy.
CodeGenFunction::OMPPrivateScope Remap(CGF);
Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; });
Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; });
(void)Remap.Privatize();
// Emit copying of the whole variable.
CGF.EmitIgnoredExpr(Copy);
}
}
bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope) {
llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) {
auto *C = cast<OMPFirstprivateClause>(*I);
auto IRef = C->varlist_begin();
auto InitsRef = C->inits().begin();
for (auto IInit : C->private_copies()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsFirstprivate.count(OrigVD) == 0) {
EmittedAsFirstprivate.insert(OrigVD);
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
bool IsRegistered;
DeclRefExpr DRE(
const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
OrigVD) != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
auto *OriginalAddr = EmitLValue(&DRE).getAddress();
QualType Type = OrigVD->getType();
if (Type->isArrayType()) {
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
// captured region.
IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
auto Emission = EmitAutoVarAlloca(*VD);
auto *Init = VD->getInit();
if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
// Perform simple memcpy.
EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr,
Type);
} else {
EmitOMPAggregateAssign(
Emission.getAllocatedAddress(), OriginalAddr, Type,
[this, VDInit, Init](llvm::Value *DestElement,
llvm::Value *SrcElement) {
// Clean up any temporaries needed by the initialization.
RunCleanupsScope InitScope(*this);
// Emit initialization for single element.
LocalDeclMap[VDInit] = SrcElement;
EmitAnyExprToMem(Init, DestElement,
Init->getType().getQualifiers(),
/*IsInitializer*/ false);
LocalDeclMap.erase(VDInit);
});
}
EmitAutoVarCleanups(Emission);
return Emission.getAllocatedAddress();
});
} else {
IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable
// (for proper handling of captured global variables).
LocalDeclMap[VDInit] = OriginalAddr;
EmitDecl(*VD);
LocalDeclMap.erase(VDInit);
return GetAddrOfLocalVar(VD);
});
}
assert(IsRegistered &&
"firstprivate var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
}
++IRef, ++InitsRef;
}
}
return !EmittedAsFirstprivate.empty();
}
void CodeGenFunction::EmitOMPPrivateClause(
const OMPExecutableDirective &D,
CodeGenFunction::OMPPrivateScope &PrivateScope) {
llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) {
auto *C = cast<OMPPrivateClause>(*I);
auto IRef = C->varlist_begin();
for (auto IInit : C->private_copies()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
bool IsRegistered =
PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
});
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
}
++IRef;
}
}
}
bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
// threadprivate_var1 = master_threadprivate_var1;
// operator=(threadprivate_var2, master_threadprivate_var2);
// ...
// __kmpc_barrier(&loc, global_tid);
llvm::DenseSet<const VarDecl *> CopiedVars;
llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) {
auto *C = cast<OMPCopyinClause>(*I);
auto IRef = C->varlist_begin();
auto ISrcRef = C->source_exprs().begin();
auto IDestRef = C->destination_exprs().begin();
for (auto *AssignOp : C->assignment_ops()) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
QualType Type = VD->getType();
if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
// Get the address of the master variable. If we are emitting code with
// TLS support, the address is passed from the master as field in the
// captured declaration.
llvm::Value *MasterAddr;
if (getLangOpts().OpenMPUseTLS &&
getContext().getTargetInfo().isTLSSupported()) {
assert(CapturedStmtInfo->lookup(VD) &&
"Copyin threadprivates should have been captured!");
DeclRefExpr DRE(const_cast<VarDecl *>(VD), true, (*IRef)->getType(),
VK_LValue, (*IRef)->getExprLoc());
MasterAddr = EmitLValue(&DRE).getAddress();
} else {
MasterAddr = VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
: CGM.GetAddrOfGlobal(VD);
}
// Get the address of the threadprivate variable.
auto *PrivateAddr = EmitLValue(*IRef).getAddress();
if (CopiedVars.size() == 1) {
// At first check if current thread is a master thread. If it is, no
// need to copy data.
CopyBegin = createBasicBlock("copyin.not.master");
CopyEnd = createBasicBlock("copyin.not.master.end");
Builder.CreateCondBr(
Builder.CreateICmpNE(
Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy),
Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)),
CopyBegin, CopyEnd);
EmitBlock(CopyBegin);
}
auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD,
AssignOp);
}
++IRef;
++ISrcRef;
++IDestRef;
}
}
if (CopyEnd) {
// Exit out of copying procedure for non-master thread.
EmitBlock(CopyEnd, /*IsFinished=*/true);
return true;
}
return false;
}
bool CodeGenFunction::EmitOMPLastprivateClauseInit(
const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
bool HasAtLeastOneLastprivate = false;
llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
HasAtLeastOneLastprivate = true;
auto *C = cast<OMPLastprivateClause>(*I);
auto IRef = C->varlist_begin();
auto IDestRef = C->destination_exprs().begin();
for (auto *IInit : C->private_copies()) {
// Keep the address of the original variable for future update at the end
// of the loop.
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{
DeclRefExpr DRE(
const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup(
OrigVD) != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
return EmitLValue(&DRE).getAddress();
});
// Check if the variable is also a firstprivate: in this case IInit is
// not generated. Initialization of this variable will happen in codegen
// for 'firstprivate' clause.
if (IInit) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
bool IsRegistered =
PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{
// Emit private VarDecl with copy init.
EmitDecl(*VD);
return GetAddrOfLocalVar(VD);
});
assert(IsRegistered &&
"lastprivate var already registered as private");
(void)IsRegistered;
}
}
++IRef, ++IDestRef;
}
}
return HasAtLeastOneLastprivate;
}
void CodeGenFunction::EmitOMPLastprivateClauseFinal(
const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) {
// Emit following code:
// if (<IsLastIterCond>) {
// orig_var1 = private_orig_var1;
// ...
// orig_varn = private_orig_varn;
// }
llvm::BasicBlock *ThenBB = nullptr;
llvm::BasicBlock *DoneBB = nullptr;
if (IsLastIterCond) {
ThenBB = createBasicBlock(".omp.lastprivate.then");
DoneBB = createBasicBlock(".omp.lastprivate.done");
Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
EmitBlock(ThenBB);
}
llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates;
const Expr *LastIterVal = nullptr;
const Expr *IVExpr = nullptr;
const Expr *IncExpr = nullptr;
if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
if (isOpenMPWorksharingDirective(D.getDirectiveKind())) {
LastIterVal = cast<VarDecl>(cast<DeclRefExpr>(
LoopDirective->getUpperBoundVariable())
->getDecl())
->getAnyInitializer();
IVExpr = LoopDirective->getIterationVariable();
IncExpr = LoopDirective->getInc();
auto IUpdate = LoopDirective->updates().begin();
for (auto *E : LoopDirective->counters()) {
auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
LoopCountersAndUpdates[D] = *IUpdate;
++IUpdate;
}
}
}
{
llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
bool FirstLCV = true;
for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) {
auto *C = cast<OMPLastprivateClause>(*I);
auto IRef = C->varlist_begin();
auto ISrcRef = C->source_exprs().begin();
auto IDestRef = C->destination_exprs().begin();
for (auto *AssignOp : C->assignment_ops()) {
auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
QualType Type = PrivateVD->getType();
auto *CanonicalVD = PrivateVD->getCanonicalDecl();
if (AlreadyEmittedVars.insert(CanonicalVD).second) {
// If lastprivate variable is a loop control variable for loop-based
// directive, update its value before copyin back to original
// variable.
if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) {
if (FirstLCV && LastIterVal) {
EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(),
IVExpr->getType().getQualifiers(),
/*IsInitializer=*/false);
EmitIgnoredExpr(IncExpr);
FirstLCV = false;
}
EmitIgnoredExpr(UpExpr);
}
auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
// Get the address of the original variable.
auto *OriginalAddr = GetAddrOfLocalVar(DestVD);
// Get the address of the private variable.
auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD);
EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD,
AssignOp);
}
++IRef;
++ISrcRef;
++IDestRef;
}
}
}
if (IsLastIterCond) {
EmitBlock(DoneBB, /*IsFinished=*/true);
}
}
void CodeGenFunction::EmitOMPReductionClauseInit(
const OMPExecutableDirective &D,
CodeGenFunction::OMPPrivateScope &PrivateScope) {
for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
auto *C = cast<OMPReductionClause>(*I);
auto ILHS = C->lhs_exprs().begin();
auto IRHS = C->rhs_exprs().begin();
for (auto IRef : C->varlists()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
// Store the address of the original variable associated with the LHS
// implicit variable.
PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
IRef->getType(), VK_LValue, IRef->getExprLoc());
return EmitLValue(&DRE).getAddress();
});
// Emit reduction copy.
bool IsRegistered =
PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{
// Emit private VarDecl with reduction init.
EmitDecl(*PrivateVD);
return GetAddrOfLocalVar(PrivateVD);
});
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
++ILHS, ++IRHS;
}
}
}
void CodeGenFunction::EmitOMPReductionClauseFinal(
const OMPExecutableDirective &D) {
llvm::SmallVector<const Expr *, 8> LHSExprs;
llvm::SmallVector<const Expr *, 8> RHSExprs;
llvm::SmallVector<const Expr *, 8> ReductionOps;
bool HasAtLeastOneReduction = false;
for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) {
HasAtLeastOneReduction = true;
auto *C = cast<OMPReductionClause>(*I);
LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
}
if (HasAtLeastOneReduction) {
// Emit nowait reduction if nowait clause is present or directive is a
// parallel directive (it always has implicit barrier).
CGM.getOpenMPRuntime().emitReduction(
*this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps,
D.getSingleClause(OMPC_nowait) ||
isOpenMPParallelDirective(D.getDirectiveKind()) ||
D.getDirectiveKind() == OMPD_simd,
D.getDirectiveKind() == OMPD_simd);
}
}
static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) {
auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS);
auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
if (auto C = S.getSingleClause(OMPC_num_threads)) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
auto NumThreadsClause = cast<OMPNumThreadsClause>(C);
auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
/*IgnoreResultAssign*/ true);
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
CGF, NumThreads, NumThreadsClause->getLocStart());
}
if (auto *C = S.getSingleClause(OMPC_proc_bind)) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
auto *ProcBindClause = cast<OMPProcBindClause>(C);
CGF.CGM.getOpenMPRuntime().emitProcBindClause(
CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getLocStart());
}
const Expr *IfCond = nullptr;
if (auto C = S.getSingleClause(OMPC_if)) {
IfCond = cast<OMPIfClause>(C)->getCondition();
}
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
CapturedStruct, IfCond);
}
void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
// Emit parallel region as a standalone region.
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
OMPPrivateScope PrivateScope(CGF);
bool Copyins = CGF.EmitOMPCopyinClause(S);
bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope);
if (Copyins || Firstprivates) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables or propagation master's thread
// values of threadprivate variables to local instances of that variables
// of all other implicit threads.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
OMPD_unknown);
}
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S);
// Emit implicit barrier at the end of the 'parallel' directive.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
OMPD_unknown);
};
emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
}
void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
JumpDest LoopExit) {
RunCleanupsScope BodyScope(*this);
// Update counters values on current iteration.
for (auto I : D.updates()) {
EmitIgnoredExpr(I);
}
// Update the linear variables.
for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
auto *C = cast<OMPLinearClause>(*I);
for (auto U : C->updates()) {
EmitIgnoredExpr(U);
}
}
// On a continue in the body, jump to the end.
auto Continue = getJumpDestInCurrentScope("omp.body.continue");
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
// Emit loop body.
EmitStmt(D.getBody());
// The end (updates/cleanups).
EmitBlock(Continue.getBlock());
BreakContinueStack.pop_back();
// TODO: Update lastprivates if the SeparateIter flag is true.
// This will be implemented in a follow-up OMPLastprivateClause patch, but
// result should be still correct without it, as we do not make these
// variables private yet.
}
void CodeGenFunction::EmitOMPInnerLoop(
const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) {
auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
// Start the loop with a block that tests the condition.
auto CondBlock = createBasicBlock("omp.inner.for.cond");
EmitBlock(CondBlock);
LoopStack.push(CondBlock);
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
auto ExitBlock = LoopExit.getBlock();
if (RequiresCleanup)
ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
auto LoopBody = createBasicBlock("omp.inner.for.body");
// Emit condition.
EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
}
EmitBlock(LoopBody);
incrementProfileCounter(&S);
// Create a block for the increment.
auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
BodyGen(*this);
// Emit "IV = IV + 1" and a back-edge to the condition block.
EmitBlock(Continue.getBlock());
EmitIgnoredExpr(IncExpr);
PostIncGen(*this);
BreakContinueStack.pop_back();
EmitBranch(CondBlock);
LoopStack.pop();
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock());
}
void CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
// Emit inits for the linear variables.
for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
auto *C = cast<OMPLinearClause>(*I);
for (auto Init : C->inits()) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
auto *OrigVD = cast<VarDecl>(
cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())->getDecl());
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
VD->getInit()->getType(), VK_LValue,
VD->getInit()->getExprLoc());
AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
EmitExprAsInit(&DRE, VD,
MakeAddrLValue(Emission.getAllocatedAddress(),
VD->getType(), Emission.Alignment),
/*capturedByInit=*/false);
EmitAutoVarCleanups(Emission);
}
// Emit the linear steps for the linear clauses.
// If a step is not constant, it is pre-calculated before the loop.
if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
// Emit calculation of the linear step.
EmitIgnoredExpr(CS);
}
}
}
static void emitLinearClauseFinal(CodeGenFunction &CGF,
const OMPLoopDirective &D) {
// Emit the final values of the linear variables.
for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
auto *C = cast<OMPLinearClause>(*I);
auto IC = C->varlist_begin();
for (auto F : C->finals()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
auto *OrigAddr = CGF.EmitLValue(&DRE).getAddress();
CodeGenFunction::OMPPrivateScope VarScope(CGF);
VarScope.addPrivate(OrigVD,
[OrigAddr]() -> llvm::Value *{ return OrigAddr; });
(void)VarScope.Privatize();
CGF.EmitIgnoredExpr(F);
++IC;
}
}
}
static void emitAlignedClause(CodeGenFunction &CGF,
const OMPExecutableDirective &D) {
for (auto &&I = D.getClausesOfKind(OMPC_aligned); I; ++I) {
auto *Clause = cast<OMPAlignedClause>(*I);
unsigned ClauseAlignment = 0;
if (auto AlignmentExpr = Clause->getAlignment()) {
auto AlignmentCI =
cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
ClauseAlignment = static_cast<unsigned>(AlignmentCI->getZExtValue());
}
for (auto E : Clause->varlists()) {
unsigned Alignment = ClauseAlignment;
if (Alignment == 0) {
// OpenMP [2.8.1, Description]
// If no optional parameter is specified, implementation-defined default
// alignments for SIMD instructions on the target platforms are assumed.
Alignment =
CGF.getContext()
.toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
E->getType()->getPointeeType()))
.getQuantity();
}
assert((Alignment == 0 || llvm::isPowerOf2_32(Alignment)) &&
"alignment is not power of 2");
if (Alignment != 0) {
llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
CGF.EmitAlignmentAssumption(PtrValue, Alignment);
}
}
}
}
static void emitPrivateLoopCounters(CodeGenFunction &CGF,
CodeGenFunction::OMPPrivateScope &LoopScope,
ArrayRef<Expr *> Counters) {
for (auto *E : Counters) {
auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
(void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{
// Emit var without initialization.
auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
CGF.EmitAutoVarCleanups(VarEmission);
return VarEmission.getAllocatedAddress();
});
}
}
static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
{
CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
emitPrivateLoopCounters(CGF, PreCondScope, S.counters());
(void)PreCondScope.Privatize();
// Get initial values of real counters.
for (auto I : S.inits()) {
CGF.EmitIgnoredExpr(I);
}
}
// Check that loop is executed at least one time.
CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
}
static void
emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
CodeGenFunction::OMPPrivateScope &PrivateScope) {
for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) {
auto *C = cast<OMPLinearClause>(*I);
for (auto *E : C->varlists()) {
auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * {
// Emit var without initialization.
auto VarEmission = CGF.EmitAutoVarAlloca(*VD);
CGF.EmitAutoVarCleanups(VarEmission);
return VarEmission.getAllocatedAddress();
});
assert(IsRegistered && "linear var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
}
}
}
static void emitSafelenClause(CodeGenFunction &CGF,
const OMPExecutableDirective &D) {
if (auto *C =
cast_or_null<OMPSafelenClause>(D.getSingleClause(OMPC_safelen))) {
RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
/*ignoreResult=*/true);
llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
CGF.LoopStack.setVectorizerWidth(Val->getZExtValue());
// In presence of finite 'safelen', it may be unsafe to mark all
// the memory instructions parallel, because loop-carried
// dependences of 'safelen' iterations are possible.
CGF.LoopStack.setParallel(false);
}
}
void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
// Walk clauses and process safelen/lastprivate.
LoopStack.setParallel();
LoopStack.setVectorizerEnable(true);
emitSafelenClause(*this, D);
}
void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
auto IC = D.counters().begin();
for (auto F : D.finals()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
if (LocalDeclMap.lookup(OrigVD) || CapturedStmtInfo->lookup(OrigVD)) {
DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
auto *OrigAddr = EmitLValue(&DRE).getAddress();
OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD,
[OrigAddr]() -> llvm::Value *{ return OrigAddr; });
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
}
++IC;
}
emitLinearClauseFinal(*this, D);
}
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
// if (PreCond) {
// for (IV in 0..LastIteration) BODY;
// <Final counter/linear vars updates>;
// }
//
// Emit: if (PreCond) - begin.
// If the condition constant folds and can be elided, avoid emitting the
// whole loop.
bool CondConstant;
llvm::BasicBlock *ContBlock = nullptr;
if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
if (!CondConstant)
return;
} else {
auto *ThenBlock = CGF.createBasicBlock("simd.if.then");
ContBlock = CGF.createBasicBlock("simd.if.end");
emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
CGF.getProfileCount(&S));
CGF.EmitBlock(ThenBlock);
CGF.incrementProfileCounter(&S);
}
// Emit the loop iteration variable.
const Expr *IVExpr = S.getIterationVariable();
const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
CGF.EmitVarDecl(*IVDecl);
CGF.EmitIgnoredExpr(S.getInit());
// Emit the iterations count variable.
// If it is not a variable, Sema decided to calculate iterations count on
// each iteration (e.g., it is foldable into a constant).
if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
// Emit calculation of the iterations count.
CGF.EmitIgnoredExpr(S.getCalcLastIteration());
}
CGF.EmitOMPSimdInit(S);
emitAlignedClause(CGF, S);
CGF.EmitOMPLinearClauseInit(S);
bool HasLastprivateClause;
{
OMPPrivateScope LoopScope(CGF);
emitPrivateLoopCounters(CGF, LoopScope, S.counters());
emitPrivateLinearVars(CGF, S, LoopScope);
CGF.EmitOMPPrivateClause(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
S.getInc(),
[&S](CodeGenFunction &CGF) {
CGF.EmitOMPLoopBody(S, JumpDest());
CGF.EmitStopPoint(&S);
},
[](CodeGenFunction &) {});
// Emit final copy of the lastprivate variables at the end of loops.
if (HasLastprivateClause) {
CGF.EmitOMPLastprivateClauseFinal(S);
}
CGF.EmitOMPReductionClauseFinal(S);
}
CGF.EmitOMPSimdFinal(S);
// Emit: if (PreCond) - end.
if (ContBlock) {
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock, true);
}
};
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const OMPLoopDirective &S,
OMPPrivateScope &LoopScope,
bool Ordered, llvm::Value *LB,
llvm::Value *UB, llvm::Value *ST,
llvm::Value *IL, llvm::Value *Chunk) {
auto &RT = CGM.getOpenMPRuntime();
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind);
assert((Ordered ||
!RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) &&
"static non-chunked schedule does not need outer loop");
// Emit outer loop.
//
// OpenMP [2.7.1, Loop Construct, Description, table 2-1]
// When schedule(dynamic,chunk_size) is specified, the iterations are
// distributed to threads in the team in chunks as the threads request them.
// Each thread executes a chunk of iterations, then requests another chunk,
// until no chunks remain to be distributed. Each chunk contains chunk_size
// iterations, except for the last chunk to be distributed, which may have
// fewer iterations. When no chunk_size is specified, it defaults to 1.
//
// When schedule(guided,chunk_size) is specified, the iterations are assigned
// to threads in the team in chunks as the executing threads request them.
// Each thread executes a chunk of iterations, then requests another chunk,
// until no chunks remain to be assigned. For a chunk_size of 1, the size of
// each chunk is proportional to the number of unassigned iterations divided
// by the number of threads in the team, decreasing to 1. For a chunk_size
// with value k (greater than 1), the size of each chunk is determined in the
// same way, with the restriction that the chunks do not contain fewer than k
// iterations (except for the last chunk to be assigned, which may have fewer
// than k iterations).
//
// When schedule(auto) is specified, the decision regarding scheduling is
// delegated to the compiler and/or runtime system. The programmer gives the
// implementation the freedom to choose any possible mapping of iterations to
// threads in the team.
//
// When schedule(runtime) is specified, the decision regarding scheduling is
// deferred until run time, and the schedule and chunk size are taken from the
// run-sched-var ICV. If the ICV is set to auto, the schedule is
// implementation defined
//
// while(__kmpc_dispatch_next(&LB, &UB)) {
// idx = LB;
// while (idx <= UB) { BODY; ++idx;
// __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
// } // inner loop
// }
//
// OpenMP [2.7.1, Loop Construct, Description, table 2-1]
// When schedule(static, chunk_size) is specified, iterations are divided into
// chunks of size chunk_size, and the chunks are assigned to the threads in
// the team in a round-robin fashion in the order of the thread number.
//
// while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
// while (idx <= UB) { BODY; ++idx; } // inner loop
// LB = LB + ST;
// UB = UB + ST;
// }
//
const Expr *IVExpr = S.getIterationVariable();
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
RT.emitForInit(
*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB,
(DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal()
: UB),
ST, Chunk);
auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
// Start the loop with a block that tests the condition.
auto CondBlock = createBasicBlock("omp.dispatch.cond");
EmitBlock(CondBlock);
LoopStack.push(CondBlock);
llvm::Value *BoolCondVal = nullptr;
if (!DynamicOrOrdered) {
// UB = min(UB, GlobalUB)
EmitIgnoredExpr(S.getEnsureUpperBound());
// IV = LB
EmitIgnoredExpr(S.getInit());
// IV < UB
BoolCondVal = EvaluateExprAsBool(S.getCond());
} else {
BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned,
IL, LB, UB, ST);
}
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
auto ExitBlock = LoopExit.getBlock();
if (LoopScope.requiresCleanups())
ExitBlock = createBasicBlock("omp.dispatch.cleanup");
auto LoopBody = createBasicBlock("omp.dispatch.body");
Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
EmitBranchThroughCleanup(LoopExit);
}
EmitBlock(LoopBody);
// Emit "IV = LB" (in case of static schedule, we have already calculated new
// LB for loop condition and emitted it above).
if (DynamicOrOrdered)
EmitIgnoredExpr(S.getInit());
// Create a block for the increment.
auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
// Generate !llvm.loop.parallel metadata for loads and stores for loops
// with dynamic/guided scheduling and without ordered clause.
if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic ||
ScheduleKind == OMPC_SCHEDULE_guided) &&
!Ordered);
} else {
EmitOMPSimdInit(S);
}
SourceLocation Loc = S.getLocStart();
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
[&S, LoopExit](CodeGenFunction &CGF) {
CGF.EmitOMPLoopBody(S, LoopExit);
CGF.EmitStopPoint(&S);
},
[Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
if (Ordered) {
CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
CGF, Loc, IVSize, IVSigned);
}
});
EmitBlock(Continue.getBlock());
BreakContinueStack.pop_back();
if (!DynamicOrOrdered) {
// Emit "LB = LB + Stride", "UB = UB + Stride".
EmitIgnoredExpr(S.getNextLowerBound());
EmitIgnoredExpr(S.getNextUpperBound());
}
EmitBranch(CondBlock);
LoopStack.pop();
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
if (!DynamicOrOrdered)
RT.emitForStaticFinish(*this, S.getLocEnd());
}
/// \brief Emit a helper variable and return corresponding lvalue.
static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
const DeclRefExpr *Helper) {
auto VDecl = cast<VarDecl>(Helper->getDecl());
CGF.EmitVarDecl(*VDecl);
return CGF.EmitLValue(Helper);
}
static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind>
emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
bool OuterRegion) {
// Detect the loop schedule kind and chunk.
auto ScheduleKind = OMPC_SCHEDULE_unknown;
llvm::Value *Chunk = nullptr;
if (auto *C =
cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) {
ScheduleKind = C->getScheduleKind();
if (const auto *Ch = C->getChunkSize()) {
if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
if (OuterRegion) {
const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl());
CGF.EmitVarDecl(*ImpVar);
CGF.EmitStoreThroughLValue(
CGF.EmitAnyExpr(Ch),
CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar),
ImpVar->getType()));
} else {
Ch = ImpRef;
}
}
if (!C->getHelperChunkSize() || !OuterRegion) {
Chunk = CGF.EmitScalarExpr(Ch);
Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(),
S.getIterationVariable()->getType());
}
}
}
return std::make_pair(Chunk, ScheduleKind);
}
bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
// Emit the loop iteration variable.
auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
EmitVarDecl(*IVDecl);
// Emit the iterations count variable.
// If it is not a variable, Sema decided to calculate iterations count on each
// iteration (e.g., it is foldable into a constant).
if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
// Emit calculation of the iterations count.
EmitIgnoredExpr(S.getCalcLastIteration());
}
auto &RT = CGM.getOpenMPRuntime();
bool HasLastprivateClause;
// Check pre-condition.
{
// Skip the entire loop if we don't meet the precondition.
// If the condition constant folds and can be elided, avoid emitting the
// whole loop.
bool CondConstant;
llvm::BasicBlock *ContBlock = nullptr;
if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
if (!CondConstant)
return false;
} else {
auto *ThenBlock = createBasicBlock("omp.precond.then");
ContBlock = createBasicBlock("omp.precond.end");
emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
getProfileCount(&S));
EmitBlock(ThenBlock);
incrementProfileCounter(&S);
}
emitAlignedClause(*this, S);
EmitOMPLinearClauseInit(S);
// Emit 'then' code.
{
// Emit helper vars inits.
LValue LB =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
LValue UB =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
LValue ST =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
LValue IL =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
OMPPrivateScope LoopScope(*this);
if (EmitOMPFirstprivateClause(S, LoopScope)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
OMPD_unknown);
}
EmitOMPPrivateClause(S, LoopScope);
HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
EmitOMPReductionClauseInit(S, LoopScope);
emitPrivateLoopCounters(*this, LoopScope, S.counters());
emitPrivateLinearVars(*this, S, LoopScope);
(void)LoopScope.Privatize();
// Detect the loop schedule kind and chunk.
llvm::Value *Chunk;
OpenMPScheduleClauseKind ScheduleKind;
auto ScheduleInfo =
emitScheduleClause(*this, S, /*OuterRegion=*/false);
Chunk = ScheduleInfo.first;
ScheduleKind = ScheduleInfo.second;
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr;
if (RT.isStaticNonchunked(ScheduleKind,
/* Chunked */ Chunk != nullptr) &&
!Ordered) {
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
EmitOMPSimdInit(S);
}
// OpenMP [2.7.1, Loop Construct, Description, table 2-1]
// When no chunk_size is specified, the iteration space is divided into
// chunks that are approximately equal in size, and at most one chunk is
// distributed to each thread. Note that the size of the chunks is
// unspecified in this case.
RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
Ordered, IL.getAddress(), LB.getAddress(),
UB.getAddress(), ST.getAddress());
auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
EmitIgnoredExpr(S.getEnsureUpperBound());
// IV = LB;
EmitIgnoredExpr(S.getInit());
// while (idx <= UB) { BODY; ++idx; }
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
S.getInc(),
[&S, LoopExit](CodeGenFunction &CGF) {
CGF.EmitOMPLoopBody(S, LoopExit);
CGF.EmitStopPoint(&S);
},
[](CodeGenFunction &) {});
EmitBlock(LoopExit.getBlock());
// Tell the runtime we are done.
RT.emitForStaticFinish(*this, S.getLocStart());
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered,
LB.getAddress(), UB.getAddress(), ST.getAddress(),
IL.getAddress(), Chunk);
}
EmitOMPReductionClauseFinal(S);
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivateClause)
EmitOMPLastprivateClauseFinal(
S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart())));
}
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
EmitOMPSimdFinal(S);
}
// We're now done with the loop, so jump to the continuation block.
if (ContBlock) {
EmitBranch(ContBlock);
EmitBlock(ContBlock, true);
}
}
return HasLastprivateClause;
}
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
};
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_for, CodeGen);
// Emit an implicit barrier at the end.
if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
}
}
void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) {
HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
};
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
// Emit an implicit barrier at the end.
if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for);
}
}
static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
const Twine &Name,
llvm::Value *Init = nullptr) {
auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
if (Init)
CGF.EmitScalarInit(Init, LVal);
return LVal;
}
OpenMPDirectiveKind
CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
auto *CS = dyn_cast<CompoundStmt>(Stmt);
if (CS && CS->size() > 1) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) {
auto &C = CGF.CGM.getContext();
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Emit helper vars inits.
LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
CGF.Builder.getInt32(0));
auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1);
LValue UB =
createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
CGF.Builder.getInt32(1));
LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
CGF.Builder.getInt32(0));
// Loop counter.
LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue);
CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
// Generate condition for loop.
BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue,
OK_Ordinary, S.getLocStart(),
/*fpContractable=*/false);
// Increment for loop counter.
UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue,
OK_Ordinary, S.getLocStart());
auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
// switch (IV) {
// case 0:
// <SectionStmt[0]>;
// break;
// ...
// case <NumSection> - 1:
// <SectionStmt[<NumSection> - 1]>;
// break;
// }
// .omp.sections.exit:
auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
auto *SwitchStmt = CGF.Builder.CreateSwitch(
CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB,
CS->size());
unsigned CaseNumber = 0;
for (auto *SubStmt : CS->children()) {
auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
CGF.EmitBlock(CaseBB);
SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
CGF.EmitStmt(SubStmt);
CGF.EmitBranch(ExitBB);
++CaseNumber;
}
CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
};
CodeGenFunction::OMPPrivateScope LoopScope(CGF);
if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
OMPD_unknown);
}
CGF.EmitOMPPrivateClause(S, LoopScope);
HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
(void)LoopScope.Privatize();
// Emit static non-chunked loop.
CGF.CGM.getOpenMPRuntime().emitForInit(
CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32,
/*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
LB.getAddress(), UB.getAddress(), ST.getAddress());
// UB = min(UB, GlobalUB);
auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart());
auto *MinUBGlobalUB = CGF.Builder.CreateSelect(
CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
// IV = LB;
CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV);
// while (idx <= UB) { BODY; ++idx; }
CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen,
[](CodeGenFunction &) {});
// Tell the runtime we are done.
CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart());
CGF.EmitOMPReductionClauseFinal(S);
// Emit final copy of the lastprivate variables if IsLastIter != 0.
if (HasLastprivates)
CGF.EmitOMPLastprivateClauseFinal(
S, CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(IL, S.getLocStart())));
};
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen);
// Emit barrier for lastprivates only if 'sections' directive has 'nowait'
// clause. Otherwise the barrier will be generated by the codegen for the
// directive.
if (HasLastprivates && S.getSingleClause(OMPC_nowait)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(),
OMPD_unknown);
}
return OMPD_sections;
}
// If only one section is found - no need to generate loop, emit as a single
// region.
bool HasFirstprivates;
// No need to generate reductions for sections with single section region, we
// can use original shared variables for all operations.
bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty();
// No need to generate lastprivates for sections with single section region,
// we can use original shared variable for all calculations with barrier at
// the end of the sections.
bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty();
auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) {
CodeGenFunction::OMPPrivateScope SingleScope(CGF);
HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
CGF.EmitOMPPrivateClause(S, SingleScope);
(void)SingleScope.Privatize();
CGF.EmitStmt(Stmt);
};
CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
llvm::None, llvm::None, llvm::None,
llvm::None);
// Emit barrier for firstprivates, lastprivates or reductions only if
// 'sections' directive has 'nowait' clause. Otherwise the barrier will be
// generated by the codegen for the directive.
if ((HasFirstprivates || HasLastprivates || HasReductions) &&
S.getSingleClause(OMPC_nowait)) {
// Emit implicit barrier to synchronize threads and avoid data races on
// initialization of firstprivate variables.
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_unknown);
}
return OMPD_single;
}
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
OpenMPDirectiveKind EmittedAs = EmitSections(S);
// Emit an implicit barrier at the end.
if (!S.getSingleClause(OMPC_nowait)) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs);
}
}
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_section, CodeGen);
}
void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
llvm::SmallVector<const Expr *, 8> CopyprivateVars;
llvm::SmallVector<const Expr *, 8> DestExprs;
llvm::SmallVector<const Expr *, 8> SrcExprs;
llvm::SmallVector<const Expr *, 8> AssignmentOps;
// Check if there are any 'copyprivate' clauses associated with this
// 'single'
// construct.
// Build a list of copyprivate variables along with helper expressions
// (<source>, <destination>, <destination>=<source> expressions)
for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) {
auto *C = cast<OMPCopyprivateClause>(*I);
CopyprivateVars.append(C->varlists().begin(), C->varlists().end());
DestExprs.append(C->destination_exprs().begin(),
C->destination_exprs().end());
SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
AssignmentOps.append(C->assignment_ops().begin(),
C->assignment_ops().end());
}
LexicalScope Scope(*this, S.getSourceRange());
// Emit code for 'single' region along with 'copyprivate' clauses
bool HasFirstprivates;
auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) {
CodeGenFunction::OMPPrivateScope SingleScope(CGF);
HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope);
CGF.EmitOMPPrivateClause(S, SingleScope);
(void)SingleScope.Privatize();
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(),
CopyprivateVars, DestExprs, SrcExprs,
AssignmentOps);
// Emit an implicit barrier at the end (to avoid data race on firstprivate
// init or if no 'nowait' clause was specified and no 'copyprivate' clause).
if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) &&
CopyprivateVars.empty()) {
CGM.getOpenMPRuntime().emitBarrierCall(
*this, S.getLocStart(),
S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single);
}
}
void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart());
}
void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitCriticalRegion(
*this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart());
}
void CodeGenFunction::EmitOMPParallelForDirective(
const OMPParallelForDirective &S) {
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
LexicalScope Scope(*this, S.getSourceRange());
(void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitOMPWorksharingLoop(S);
// Emit implicit barrier at the end of parallel region, but this barrier
// is at the end of 'for' directive, so emit it as the implicit barrier for
// this 'for' directive.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
}
void CodeGenFunction::EmitOMPParallelForSimdDirective(
const OMPParallelForSimdDirective &S) {
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
LexicalScope Scope(*this, S.getSourceRange());
(void)emitScheduleClause(*this, S, /*OuterRegion=*/true);
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitOMPWorksharingLoop(S);
// Emit implicit barrier at the end of parallel region, but this barrier
// is at the end of 'for' directive, so emit it as the implicit barrier for
// this 'for' directive.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
}
void CodeGenFunction::EmitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective &S) {
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'sections' directive.
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
(void)CGF.EmitSections(S);
// Emit implicit barrier at the end of parallel region.
CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(),
OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
}
void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Emit outlined function for task construct.
LexicalScope Scope(*this, S.getSourceRange());
auto CS = cast<CapturedStmt>(S.getAssociatedStmt());
auto CapturedStruct = GenerateCapturedStmtArgument(*CS);
auto *I = CS->getCapturedDecl()->param_begin();
auto *PartId = std::next(I);
// The first function argument for tasks is a thread id, the second one is a
// part id (0 for tied tasks, >=0 for untied task).
llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
// Get list of private variables.
llvm::SmallVector<const Expr *, 8> PrivateVars;
llvm::SmallVector<const Expr *, 8> PrivateCopies;
for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) {
auto *C = cast<OMPPrivateClause>(*I);
auto IRef = C->varlist_begin();
for (auto *IInit : C->private_copies()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
PrivateVars.push_back(*IRef);
PrivateCopies.push_back(IInit);
}
++IRef;
}
}
EmittedAsPrivate.clear();
// Get list of firstprivate variables.
llvm::SmallVector<const Expr *, 8> FirstprivateVars;
llvm::SmallVector<const Expr *, 8> FirstprivateCopies;
llvm::SmallVector<const Expr *, 8> FirstprivateInits;
for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) {
auto *C = cast<OMPFirstprivateClause>(*I);
auto IRef = C->varlist_begin();
auto IElemInitRef = C->inits().begin();
for (auto *IInit : C->private_copies()) {
auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
FirstprivateVars.push_back(*IRef);
FirstprivateCopies.push_back(IInit);
FirstprivateInits.push_back(*IElemInitRef);
}
++IRef, ++IElemInitRef;
}
}
// Build list of dependences.
llvm::SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 8>
Dependences;
for (auto &&I = S.getClausesOfKind(OMPC_depend); I; ++I) {
auto *C = cast<OMPDependClause>(*I);
for (auto *IRef : C->varlists()) {
Dependences.push_back(std::make_pair(C->getDependencyKind(), IRef));
}
}
auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars](
CodeGenFunction &CGF) {
// Set proper addresses for generated private copies.
auto *CS = cast<CapturedStmt>(S.getAssociatedStmt());
OMPPrivateScope Scope(CGF);
if (!PrivateVars.empty() || !FirstprivateVars.empty()) {
auto *CopyFn = CGF.Builder.CreateAlignedLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)),
CGF.PointerAlignInBytes);
auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)),
CGF.PointerAlignInBytes);
// Map privates.
llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16>
PrivatePtrs;
llvm::SmallVector<llvm::Value *, 16> CallArgs;
CallArgs.push_back(PrivatesPtr);
for (auto *E : PrivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
auto *PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
CallArgs.push_back(PrivatePtr);
}
for (auto *E : FirstprivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
auto *PrivatePtr =
CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()));
PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr));
CallArgs.push_back(PrivatePtr);
}
CGF.EmitRuntimeCall(CopyFn, CallArgs);
for (auto &&Pair : PrivatePtrs) {
auto *Replacement =
CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes);
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
}
}
(void)Scope.Privatize();
if (*PartId) {
// TODO: emit code for untied tasks.
}
CGF.EmitStmt(CS->getCapturedStmt());
};
auto OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
S, *I, OMPD_task, CodeGen);
// Check if we should emit tied or untied task.
bool Tied = !S.getSingleClause(OMPC_untied);
// Check if the task is final
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
if (auto *Clause = S.getSingleClause(OMPC_final)) {
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
auto *Cond = cast<OMPFinalClause>(Clause)->getCondition();
bool CondConstant;
if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
Final.setInt(CondConstant);
else
Final.setPointer(EvaluateExprAsBool(Cond));
} else {
// By default the task is not final.
Final.setInt(/*IntVal=*/false);
}
auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
if (auto C = S.getSingleClause(OMPC_if)) {
IfCond = cast<OMPIfClause>(C)->getCondition();
}
CGM.getOpenMPRuntime().emitTaskCall(
*this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy,
CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars,
FirstprivateCopies, FirstprivateInits, Dependences);
}
void CodeGenFunction::EmitOMPTaskyieldDirective(
const OMPTaskyieldDirective &S) {
CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart());
}
void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier);
}
void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart());
}
void CodeGenFunction::EmitOMPTaskgroupDirective(
const OMPTaskgroupDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getLocStart());
}
void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> {
if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) {
auto FlushClause = cast<OMPFlushClause>(C);
return llvm::makeArrayRef(FlushClause->varlist_begin(),
FlushClause->varlist_end());
}
return llvm::None;
}(), S.getLocStart());
}
void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S](CodeGenFunction &CGF) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EnsureInsertPoint();
};
CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart());
}
static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
QualType SrcType, QualType DestType) {
assert(CGF.hasScalarEvaluationKind(DestType) &&
"DestType must have scalar evaluation kind.");
assert(!Val.isAggregate() && "Must be a scalar or complex.");
return Val.isScalar()
? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType)
: CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType,
DestType);
}
static CodeGenFunction::ComplexPairTy
convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
QualType DestType) {
assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
"DestType must have complex evaluation kind.");
CodeGenFunction::ComplexPairTy ComplexVal;
if (Val.isScalar()) {
// Convert the input element to the element type of the complex.
auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
auto ScalarVal =
CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType);
ComplexVal = CodeGenFunction::ComplexPairTy(
ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
} else {
assert(Val.isComplex() && "Must be a scalar or complex.");
auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
auto DestElementType = DestType->castAs<ComplexType>()->getElementType();
ComplexVal.first = CGF.EmitScalarConversion(
Val.getComplexVal().first, SrcElementType, DestElementType);
ComplexVal.second = CGF.EmitScalarConversion(
Val.getComplexVal().second, SrcElementType, DestElementType);
}
return ComplexVal;
}
static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst,
LValue LVal, RValue RVal) {
if (LVal.isGlobalReg()) {
CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
} else {
CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent
: llvm::Monotonic,
LVal.isVolatile(), /*IsInit=*/false);
}
}
static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal,
QualType RValTy) {
switch (CGF.getEvaluationKind(LVal.getType())) {
case TEK_Scalar:
CGF.EmitStoreThroughLValue(
RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())),
LVal);
break;
case TEK_Complex:
CGF.EmitStoreOfComplex(
convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal,
/*isInit=*/false);
break;
case TEK_Aggregate:
llvm_unreachable("Must be a scalar or complex.");
}
}
static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *V,
SourceLocation Loc) {
// v = x;
assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
LValue VLValue = CGF.EmitLValue(V);
RValue Res = XLValue.isGlobalReg()
? CGF.EmitLoadOfLValue(XLValue, Loc)
: CGF.EmitAtomicLoad(XLValue, Loc,
IsSeqCst ? llvm::SequentiallyConsistent
: llvm::Monotonic,
XLValue.isVolatile());
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
// list.
if (IsSeqCst)
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType());
}
static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *E,
SourceLocation Loc) {
// x = expr;
assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
// list.
if (IsSeqCst)
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
}
static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
RValue Update,
BinaryOperatorKind BO,
llvm::AtomicOrdering AO,
bool IsXLHSInRHSPart) {
auto &Context = CGF.CGM.getContext();
// Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
// expression is simple and atomic is allowed for the given type for the
// target platform.
if (BO == BO_Comma || !Update.isScalar() ||
!Update.getScalarVal()->getType()->isIntegerTy() ||
!X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
(Update.getScalarVal()->getType() !=
X.getAddress()->getType()->getPointerElementType())) ||
!X.getAddress()->getType()->getPointerElementType()->isIntegerTy() ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
llvm::AtomicRMWInst::BinOp RMWOp;
switch (BO) {
case BO_Add:
RMWOp = llvm::AtomicRMWInst::Add;
break;
case BO_Sub:
if (!IsXLHSInRHSPart)
return std::make_pair(false, RValue::get(nullptr));
RMWOp = llvm::AtomicRMWInst::Sub;
break;
case BO_And:
RMWOp = llvm::AtomicRMWInst::And;
break;
case BO_Or:
RMWOp = llvm::AtomicRMWInst::Or;
break;
case BO_Xor:
RMWOp = llvm::AtomicRMWInst::Xor;
break;
case BO_LT:
RMWOp = X.getType()->hasSignedIntegerRepresentation()
? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
: llvm::AtomicRMWInst::Max)
: (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
: llvm::AtomicRMWInst::UMax);
break;
case BO_GT:
RMWOp = X.getType()->hasSignedIntegerRepresentation()
? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
: llvm::AtomicRMWInst::Min)
: (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
: llvm::AtomicRMWInst::UMin);
break;
case BO_Assign:
RMWOp = llvm::AtomicRMWInst::Xchg;
break;
case BO_Mul:
case BO_Div:
case BO_Rem:
case BO_Shl:
case BO_Shr:
case BO_LAnd:
case BO_LOr:
return std::make_pair(false, RValue::get(nullptr));
case BO_PtrMemD:
case BO_PtrMemI:
case BO_LE:
case BO_GE:
case BO_EQ:
case BO_NE:
case BO_AddAssign:
case BO_SubAssign:
case BO_AndAssign:
case BO_OrAssign:
case BO_XorAssign:
case BO_MulAssign:
case BO_DivAssign:
case BO_RemAssign:
case BO_ShlAssign:
case BO_ShrAssign:
case BO_Comma:
llvm_unreachable("Unsupported atomic update operation");
}
auto *UpdateVal = Update.getScalarVal();
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
UpdateVal = CGF.Builder.CreateIntCast(
IC, X.getAddress()->getType()->getPointerElementType(),
X.getType()->hasSignedIntegerRepresentation());
}
auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
llvm::AtomicOrdering AO, SourceLocation Loc,
const llvm::function_ref<RValue(RValue)> &CommonGen) {
// Update expressions are allowed to have the following forms:
// x binop= expr; -> xrval + expr;
// x++, ++x -> xrval + 1;
// x--, --x -> xrval - 1;
// x = x binop expr; -> xrval binop expr
// x = expr Op x; - > expr binop xrval;
auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
if (!Res.first) {
if (X.isGlobalReg()) {
// Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
// 'xrval'.
EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
} else {
// Perform compare-and-swap procedure.
EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
}
}
return Res;
}
static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
SourceLocation Loc) {
assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
"Update expr in 'atomic update' must be a binary operator.");
auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
// Update expressions are allowed to have the following forms:
// x binop= expr; -> xrval + expr;
// x++, ++x -> xrval + 1;
// x--, --x -> xrval - 1;
// x = x binop expr; -> xrval binop expr
// x = expr Op x; - > expr binop xrval;
assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
auto Gen =
[&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue {
CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
return CGF.EmitAnyExpr(UE);
};
(void)CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
// list.
if (IsSeqCst)
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
}
static RValue convertToType(CodeGenFunction &CGF, RValue Value,
QualType SourceType, QualType ResType) {
switch (CGF.getEvaluationKind(ResType)) {
case TEK_Scalar:
return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType));
case TEK_Complex: {
auto Res = convertToComplexValue(CGF, Value, SourceType, ResType);
return RValue::getComplex(Res.first, Res.second);
}
case TEK_Aggregate:
break;
}
llvm_unreachable("Must be a scalar or complex.");
}
static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst,
bool IsPostfixUpdate, const Expr *V,
const Expr *X, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
SourceLocation Loc) {
assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
RValue NewVVal;
LValue VLValue = CGF.EmitLValue(V);
LValue XLValue = CGF.EmitLValue(X);
RValue ExprRValue = CGF.EmitAnyExpr(E);
auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic;
QualType NewVValType;
if (UE) {
// 'x' is updated with some additional value.
assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
"Update expr in 'atomic capture' must be a binary operator.");
auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
// Update expressions are allowed to have the following forms:
// x binop= expr; -> xrval + expr;
// x++, ++x -> xrval + 1;
// x--, --x -> xrval - 1;
// x = x binop expr; -> xrval binop expr
// x = expr Op x; - > expr binop xrval;
auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
NewVValType = XRValExpr->getType();
auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
IsPostfixUpdate](RValue XRValue) -> RValue {
CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
RValue Res = CGF.EmitAnyExpr(UE);
NewVVal = IsPostfixUpdate ? XRValue : Res;
return Res;
};
auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
if (Res.first) {
// 'atomicrmw' instruction was generated.
if (IsPostfixUpdate) {
// Use old value from 'atomicrmw'.
NewVVal = Res.second;
} else {
// 'atomicrmw' does not provide new value, so evaluate it using old
// value of 'x'.
CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
NewVVal = CGF.EmitAnyExpr(UE);
}
}
} else {
// 'x' is simply rewritten with some 'expr'.
NewVValType = X->getType().getNonReferenceType();
ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
X->getType().getNonReferenceType());
auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) -> RValue {
NewVVal = XRValue;
return ExprRValue;
};
// Try to perform atomicrmw xchg, otherwise simple exchange.
auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
Loc, Gen);
if (Res.first) {
// 'atomicrmw' instruction was generated.
NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
}
}
// Emit post-update store to 'v' of old/new 'x' value.
emitSimpleStore(CGF, VLValue, NewVVal, NewVValType);
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
// list.
if (IsSeqCst)
CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc);
}
static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
bool IsSeqCst, bool IsPostfixUpdate,
const Expr *X, const Expr *V, const Expr *E,
const Expr *UE, bool IsXLHSInRHSPart,
SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc);
break;
case OMPC_write:
EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc);
break;
case OMPC_unknown:
case OMPC_update:
EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc);
break;
case OMPC_capture:
EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_private:
case OMPC_firstprivate:
case OMPC_lastprivate:
case OMPC_reduction:
case OMPC_safelen:
case OMPC_collapse:
case OMPC_default:
case OMPC_seq_cst:
case OMPC_shared:
case OMPC_linear:
case OMPC_aligned:
case OMPC_copyin:
case OMPC_copyprivate:
case OMPC_flush:
case OMPC_proc_bind:
case OMPC_schedule:
case OMPC_ordered:
case OMPC_nowait:
case OMPC_untied:
case OMPC_threadprivate:
case OMPC_depend:
case OMPC_mergeable:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst);
OpenMPClauseKind Kind = OMPC_unknown;
for (auto *C : S.clauses()) {
// Find first clause (skip seq_cst clause, if it is first).
if (C->getClauseKind() != OMPC_seq_cst) {
Kind = C->getClauseKind();
break;
}
}
const auto *CS =
S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) {
enterFullExpression(EWC);
}
// Processing for statements under 'atomic capture'.
if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) {
for (const auto *C : Compound->body()) {
if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) {
enterFullExpression(EWC);
}
}
}
LexicalScope Scope(*this, S.getSourceRange());
auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) {
EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(),
S.getV(), S.getExpr(), S.getUpdateExpr(),
S.isXLHSInRHSPart(), S.getLocStart());
};
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_atomic, CodeGen);
}
void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) {
llvm_unreachable("CodeGen for 'omp target' is not supported yet.");
}
void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) {
llvm_unreachable("CodeGen for 'omp teams' is not supported yet.");
}
void CodeGenFunction::EmitOMPCancellationPointDirective(
const OMPCancellationPointDirective &S) {
CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getLocStart(),
S.getCancelRegion());
}
void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
CGM.getOpenMPRuntime().emitCancelCall(*this, S.getLocStart(),
S.getCancelRegion());
}
CodeGenFunction::JumpDest
CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
if (Kind == OMPD_parallel || Kind == OMPD_task)
return ReturnBlock;
else if (Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections)
return BreakContinueStack.empty() ? JumpDest()
: BreakContinueStack.back().BreakBlock;
return JumpDest();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGExprCXX.cpp | //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with code generation of C++ expressions
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CGCUDARuntime.h"
#include "CGHLSLRuntime.h" // HLSL Change
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
static RequiredArgs commonEmitCXXMemberOrOperatorCall(
CodeGenFunction &CGF, const CXXMethodDecl *MD, llvm::Value *Callee,
ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *CE, CallArgList &Args,
ArrayRef<const Stmt *> argList// HLSL Change - use updated argList for out parameter.
) {
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
"Trying to emit a member or operator call expr on a static method!");
// C++11 [class.mfct.non-static]p2:
// If a non-static member function of a class X is called for an object that
// is not of type X, or of a type derived from X, the behavior is undefined.
SourceLocation CallLoc;
if (CE)
CallLoc = CE->getExprLoc();
CGF.EmitTypeCheck(
isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
: CodeGenFunction::TCK_MemberCall,
CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
// Push the this ptr.
Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
// If there is an implicit parameter (e.g. VTT), emit it.
if (ImplicitParam) {
Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
}
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
// And the rest of the call args.
if (CE) {
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
CGF.EmitCallArgs(Args, FPT,
argList.begin() + ArgsToSkip, // HLSL Change - use updated argList for out parameter.
argList.end(), // HLSL Change - use updated argList for out parameter.
CE->getDirectCallee());
} else {
assert(
FPT->getNumParams() == 0 &&
"No CallExpr specified for function with non-zero number of arguments");
}
return required;
}
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
const CallExpr *CE) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
CallArgList Args;
// HLSL Change Begins
llvm::SmallVector<LValue, 8> castArgList;
llvm::SmallVector<LValue, 8> lifetimeCleanupList;
// The argList of the CallExpr, may be update for out parameter
llvm::SmallVector<const Stmt *, 8> argList(CE->arg_begin(), CE->arg_end());
// out param conversion
CodeGenFunction::HLSLOutParamScope OutParamScope(*this);
auto MapTemp = [&](const VarDecl *LocalVD, llvm::Value *TmpArg) {
OutParamScope.addTemp(LocalVD, TmpArg);
};
if (getLangOpts().HLSL) {
if (const FunctionDecl *FD = CE->getDirectCallee())
CGM.getHLSLRuntime().EmitHLSLOutParamConversionInit(*this, FD, CE,
castArgList, argList, lifetimeCleanupList, MapTemp);
}
// HLSL Change Ends
RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
*this, MD, Callee, ReturnValue, This, ImplicitParam, ImplicitParamTy, CE,
Args, argList); // HLSL Change - use updated argList.
RValue CallVal = EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args, MD);
// HLSL Change Begins
// out param conversion
// conversion and copy back after the call
if (getLangOpts().HLSL)
CGM.getHLSLRuntime().EmitHLSLOutParamConversionCopyBack(*this, castArgList, lifetimeCleanupList);
// HLSL Change Ends
return CallVal;
}
RValue CodeGenFunction::EmitCXXStructorCall(
const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
const CallExpr *CE, StructorType Type) {
CallArgList Args;
// HLSL Change Begins
llvm::SmallVector<LValue, 8> castArgList;
llvm::SmallVector<LValue, 8> lifetimeCleanupList;
// The argList of the CallExpr, may be update for out parameter
llvm::SmallVector<const Stmt *, 8> argList(CE->arg_begin(), CE->arg_end());
// out param conversion
CodeGenFunction::HLSLOutParamScope OutParamScope(*this);
auto MapTemp = [&](const VarDecl *LocalVD, llvm::Value *TmpArg) {
OutParamScope.addTemp(LocalVD, TmpArg);
};
if (getLangOpts().HLSL) {
if (const FunctionDecl *FD = CE->getDirectCallee())
CGM.getHLSLRuntime().EmitHLSLOutParamConversionInit(*this, FD, CE,
castArgList, argList, lifetimeCleanupList, MapTemp);
}
// HLSL Change Ends
commonEmitCXXMemberOrOperatorCall(*this, MD, Callee, ReturnValue, This,
ImplicitParam, ImplicitParamTy, CE, Args,
argList); // HLSL Change - use updated argList.
RValue CallVal = EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(MD, Type),
Callee, ReturnValue, Args, MD);
// HLSL Change Begins
// out param conversion
// conversion and copy back after the call
if (getLangOpts().HLSL)
CGM.getHLSLRuntime().EmitHLSLOutParamConversionCopyBack(*this, castArgList, lifetimeCleanupList);
// HLSL Change Ends
return CallVal;
}
static CXXRecordDecl *getCXXRecord(const Expr *E) {
QualType T = E->getType();
if (const PointerType *PTy = T->getAs<PointerType>())
T = PTy->getPointeeType();
const RecordType *Ty = T->castAs<RecordType>();
return cast<CXXRecordDecl>(Ty->getDecl());
}
// Note: This function also emit constructor calls to support a MSVC
// extensions allowing explicit constructor function call.
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
ReturnValueSlot ReturnValue) {
const Expr *callee = CE->getCallee()->IgnoreParens();
if (isa<BinaryOperator>(callee))
return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
const MemberExpr *ME = cast<MemberExpr>(callee);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
ReturnValue);
}
bool HasQualifier = ME->hasQualifier();
NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
bool IsArrow = ME->isArrow();
const Expr *Base = ME->getBase();
return EmitCXXMemberOrOperatorMemberCallExpr(
CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
}
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
const Expr *Base) {
assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
// HLSL Change Begins
if (hlsl::IsHLSLMatType(Base->getType())) {
if (const CXXOperatorCallExpr *opCall = dyn_cast<CXXOperatorCallExpr>(CE)) {
assert(opCall->getOperator() == OverloadedOperatorKind::OO_Subscript &&
"must be subscript");
llvm::Value *This = nullptr;
if (Base->getValueKind() != ExprValueKind::VK_RValue) {
This = EmitLValue(Base).getAddress();
} else {
llvm::Value *Val = EmitScalarExpr(Base);
This = CreateTempAlloca(Val->getType());
CGM.getHLSLRuntime().EmitHLSLMatrixStore(*this, Val, This, Base->getType());
}
llvm::Value *Idx = EmitScalarExpr(CE->getArg(1));
llvm::Type *RetTy =
ConvertType(getContext().getLValueReferenceType(CE->getType()));
llvm::Value *matSub = CGM.getHLSLRuntime().EmitHLSLMatrixSubscript(
*this, RetTy, This, Idx, Base->getType());
return RValue::get(matSub);
}
}
if (hlsl::IsHLSLVecType(Base->getType())) {
if (const CXXOperatorCallExpr *opCall = dyn_cast<CXXOperatorCallExpr>(CE)) {
assert(opCall->getOperator() == OverloadedOperatorKind::OO_Subscript &&
"must be subscript");
llvm::Value *This = nullptr;
if (Base->getValueKind() != ExprValueKind::VK_RValue) {
LValue LV = EmitLValue(Base);
if (LV.isSimple()) {
This = LV.getAddress();
if (isa<ExtMatrixElementExpr>(Base)) {
llvm::Value *Val = Builder.CreateLoad(This);
This = CreateTempAlloca(Val->getType());
Builder.CreateStore(Val, This);
}
} else {
assert(LV.isExtVectorElt() && "must be ext vector here");
This = LV.getExtVectorAddr();
llvm::Constant *Elts = LV.getExtVectorElts();
llvm::Type *Ty = ConvertType(LV.getType());
llvm::Constant *zero = Builder.getInt32(0);
llvm::Value *TmpThis = CreateTempAlloca(Ty);
QualType ElTy = hlsl::GetElementTypeOrType(Base->getType());
bool IsBool = ElTy->isSpecificBuiltinType(BuiltinType::Bool);
for (unsigned i = 0; i < Ty->getVectorNumElements(); i++) {
llvm::Value *EltIdx = Elts->getAggregateElement(i);
llvm::Value *EltGEP = Builder.CreateGEP(This, {zero, EltIdx});
llvm::Value *TmpEltGEP =
Builder.CreateGEP(TmpThis, {zero, Builder.getInt32(i)});
llvm::Value *Elt = Builder.CreateLoad(EltGEP);
if (IsBool)
Elt = Builder.CreateTrunc(
Elt, llvm::Type::getInt1Ty(getLLVMContext()));
Builder.CreateStore(Elt, TmpEltGEP);
}
This = TmpThis;
}
} else {
llvm::Value *Val = EmitScalarExpr(Base);
This = CreateTempAlloca(Val->getType());
Builder.CreateStore(Val, This);
}
bool isBool = false;
if (llvm::IntegerType *IT =
dyn_cast<llvm::IntegerType>(This->getType()
->getPointerElementType()
->getVectorElementType())) {
if (IT->getBitWidth() == 1) {
isBool = true;
}
}
llvm::Value *Idx = EmitScalarExpr(CE->getArg(1));
llvm::Constant *zero = llvm::ConstantInt::get(Idx->getType(), 0);
llvm::Value *Elt = Builder.CreateGEP(This, {zero, Idx});
if (isBool) {
// bool pointer is not i1 *.
llvm::Type *BoolTy = llvm::IntegerType::get(
getLLVMContext(), getContext().getTypeSize(CE->getType()));
Elt = Builder.CreateBitCast(
Elt, llvm::PointerType::get(
BoolTy, Elt->getType()->getPointerAddressSpace()));
}
return RValue::get(Elt);
}
}
if (hlsl::IsHLSLOutputPatchType(Base->getType()) ||
hlsl::IsHLSLInputPatchType(Base->getType())) {
if (const CXXOperatorCallExpr *opCall = dyn_cast<CXXOperatorCallExpr>(CE)) {
assert(opCall->getOperator() == OverloadedOperatorKind::OO_Subscript &&
"must be subscript");
llvm::Value *This = EmitLValue(Base).getAddress();
llvm::Value *Idx = EmitScalarExpr(CE->getArg(1));
llvm::Constant *zero = llvm::ConstantInt::get(Idx->getType(), 0);
llvm::Value *Elt = Builder.CreateGEP(This, { zero, Idx });
return RValue::get(Elt);
}
}
// HLSL Change Ends
// Compute the object pointer.
bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
const CXXMethodDecl *DevirtualizedMethod = nullptr;
if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
assert(DevirtualizedMethod);
const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
const Expr *Inner = Base->ignoreParenBaseCasts();
if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
MD->getReturnType().getCanonicalType())
// If the return types are not the same, this might be a case where more
// code needs to run to compensate for it. For example, the derived
// method might return a type that inherits form from the return
// type of MD and has a prefix.
// For now we just avoid devirtualizing these covariant cases.
DevirtualizedMethod = nullptr;
else if (getCXXRecord(Inner) == DevirtualizedClass)
// If the class of the Inner expression is where the dynamic method
// is defined, build the this pointer from it.
Base = Inner;
else if (getCXXRecord(Base) != DevirtualizedClass) {
// If the method is defined in a class that is not the best dynamic
// one or the one of the full expression, we would have to build
// a derived-to-base cast to compute the correct this pointer, but
// we don't have support for that yet, so do a virtual call.
DevirtualizedMethod = nullptr;
}
}
llvm::Value *This;
if (IsArrow)
This = EmitScalarExpr(Base);
else
This = EmitLValue(Base).getAddress();
if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) {
if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr);
if (isa<CXXConstructorDecl>(MD) &&
cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
return RValue::get(nullptr);
if (!MD->getParent()->mayInsertExtraPadding()) {
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
// We don't like to generate the trivial copy/move assignment operator
// when it isn't necessary; just produce the proper effect here.
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
llvm::Value *RHS =
EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
EmitAggregateAssign(This, RHS, CE->getType());
return RValue::get(This);
}
if (isa<CXXConstructorDecl>(MD) &&
cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
// Trivial move and copy ctor are the same.
assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
EmitAggregateCopy(This, RHS, CE->arg_begin()->getType());
return RValue::get(This);
}
llvm_unreachable("unknown trivial member function");
}
}
// Compute the function type we're calling.
const CXXMethodDecl *CalleeDecl =
DevirtualizedMethod ? DevirtualizedMethod : MD;
const CGFunctionInfo *FInfo = nullptr;
if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
Dtor, StructorType::Complete);
else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
Ctor, StructorType::Complete);
else
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
// virtual call mechanism.
//
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
llvm::Value *Callee;
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
assert(CE->arg_begin() == CE->arg_end() &&
"Destructor shouldn't have explicit parameters");
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
CGM.getCXXABI().EmitVirtualDestructorCall(
*this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
} else {
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
Callee =
CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
}
EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
/*ImplicitParam=*/nullptr, QualType(), CE);
}
return RValue::get(nullptr);
}
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
} else if (UseVirtualCall) {
Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
CE->getLocStart());
} else {
if (SanOpts.has(SanitizerKind::CFINVCall) &&
MD->getParent()->isDynamicClass()) {
llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy);
EmitVTablePtrCheckForCall(MD, VTable, CFITCK_NVCall, CE->getLocStart());
}
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
Callee = CGM.GetAddrOfFunction(MD, Ty);
else {
Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
}
}
if (MD->isVirtual()) {
This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
*this, MD, This, UseVirtualCall);
}
return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
/*ImplicitParam=*/nullptr, QualType(), CE);
}
RValue
CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue) {
const BinaryOperator *BO =
cast<BinaryOperator>(E->getCallee()->IgnoreParens());
const Expr *BaseExpr = BO->getLHS();
const Expr *MemFnExpr = BO->getRHS();
const MemberPointerType *MPT =
MemFnExpr->getType()->castAs<MemberPointerType>();
const FunctionProtoType *FPT =
MPT->getPointeeType()->castAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
// Get the member function pointer.
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
// Emit the 'this' pointer.
llvm::Value *This;
if (BO->getOpcode() == BO_PtrMemI)
This = EmitScalarExpr(BaseExpr);
else
This = EmitLValue(BaseExpr).getAddress();
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
QualType(MPT->getClass(), 0));
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *Callee =
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, MemFnPtr, MPT);
CallArgList Args;
QualType ThisType =
getContext().getPointerType(getContext().getTagDeclType(RD));
// Push the this ptr.
Args.add(RValue::get(This), ThisType);
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getDirectCallee());
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args);
}
RValue
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue) {
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
return EmitCXXMemberOrOperatorMemberCallExpr(
E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
/*IsArrow=*/false, E->getArg(0));
}
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue) {
return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
}
// HLSL Change Begins
RValue CodeGenFunction::EmitHLSLBuiltinCallExpr(const FunctionDecl *FD,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
return CGM.getHLSLRuntime().EmitHLSLBuiltinCallExpr(*this, FD, E,
ReturnValue);
}
// HLSL Change Ends
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
llvm::Value *DestPtr,
const CXXRecordDecl *Base) {
if (Base->isEmpty())
return;
DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
CharUnits Size = Layout.getNonVirtualSize();
CharUnits Align = Layout.getNonVirtualAlignment();
llvm::Value *SizeVal = CGF.CGM.getSize(Size);
// If the type contains a pointer to data member we can't memset it to zero.
// Instead, create a null constant and copy it to the destination.
// TODO: there are other patterns besides zero that we can usefully memset,
// like -1, which happens to be the pattern used by member-pointers.
// TODO: isZeroInitializable can be over-conservative in the case where a
// virtual base contains a member pointer.
if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
llvm::GlobalVariable *NullVariable =
new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
/*isConstant=*/true,
llvm::GlobalVariable::PrivateLinkage,
NullConstant, Twine());
NullVariable->setAlignment(Align.getQuantity());
llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
// Get and call the appropriate llvm.memcpy overload.
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
return;
}
// Otherwise, just memset the whole thing to zero. This is legal
// because in LLVM, all default initializers (other than the ones we just
// handled above) are guaranteed to have a bit pattern of all zeros.
CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
Align.getQuantity());
}
void
CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
AggValueSlot Dest) {
assert(!Dest.isIgnored() && "Must have a destination!");
const CXXConstructorDecl *CD = E->getConstructor();
// If we require zero initialization before (or instead of) calling the
// constructor, as can be the case with a non-user-provided default
// constructor, emit the zero initialization now, unless destination is
// already zeroed.
if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
switch (E->getConstructionKind()) {
case CXXConstructExpr::CK_Delegating:
case CXXConstructExpr::CK_Complete:
EmitNullInitialization(Dest.getAddr(), E->getType());
break;
case CXXConstructExpr::CK_VirtualBase:
case CXXConstructExpr::CK_NonVirtualBase:
EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
break;
}
}
// If this is a call to a trivial default constructor, do nothing.
if (CD->isTrivial() && CD->isDefaultConstructor())
return;
// Elide the constructor if we're constructing from a temporary.
// The temporary check is required because Sema sets this on NRVO
// returns.
if (getLangOpts().ElideConstructors && E->isElidable()) {
assert(getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(0)->getType()));
if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
EmitAggExpr(E->getArg(0), Dest);
return;
}
}
if (const ConstantArrayType *arrayType
= getContext().getAsConstantArrayType(E->getType())) {
EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), E);
} else {
CXXCtorType Type = Ctor_Complete;
bool ForVirtualBase = false;
bool Delegating = false;
switch (E->getConstructionKind()) {
case CXXConstructExpr::CK_Delegating:
// We should be emitting a constructor; GlobalDecl will assert this
Type = CurGD.getCtorType();
Delegating = true;
break;
case CXXConstructExpr::CK_Complete:
Type = Ctor_Complete;
break;
case CXXConstructExpr::CK_VirtualBase:
ForVirtualBase = true;
LLVM_FALLTHROUGH; // HLSL Change
case CXXConstructExpr::CK_NonVirtualBase:
Type = Ctor_Base;
}
// Call the constructor.
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
E);
}
}
void
CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
llvm::Value *Src,
const Expr *Exp) {
if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
Exp = E->getSubExpr();
assert(isa<CXXConstructExpr>(Exp) &&
"EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
const CXXConstructorDecl *CD = E->getConstructor();
RunCleanupsScope Scope(*this);
// If we require zero initialization before (or instead of) calling the
// constructor, as can be the case with a non-user-provided default
// constructor, emit the zero initialization now.
// FIXME. Do I still need this for a copy ctor synthesis?
if (E->requiresZeroInitialization())
EmitNullInitialization(Dest, E->getType());
assert(!getContext().getAsConstantArrayType(E->getType())
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
}
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
const CXXNewExpr *E) {
if (!E->isArray())
return CharUnits::Zero();
// No cookie is required if the operator new[] being used is the
// reserved placement operator new[].
if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
return CharUnits::Zero();
return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
}
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
const CXXNewExpr *e,
unsigned minElements,
llvm::Value *&numElements,
llvm::Value *&sizeWithoutCookie) {
QualType type = e->getAllocatedType();
if (!e->isArray()) {
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
sizeWithoutCookie
= llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
return sizeWithoutCookie;
}
// The width of size_t.
unsigned sizeWidth = CGF.SizeTy->getBitWidth();
// Figure out the cookie size.
llvm::APInt cookieSize(sizeWidth,
CalculateCookiePadding(CGF, e).getQuantity());
// Emit the array size expression.
// We multiply the size of all dimensions for NumElements.
// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
numElements = CGF.EmitScalarExpr(e->getArraySize());
assert(isa<llvm::IntegerType>(numElements->getType()));
// The number of elements can be have an arbitrary integer type;
// essentially, we need to multiply it by a constant factor, add a
// cookie size, and verify that the result is representable as a
// size_t. That's just a gloss, though, and it's wrong in one
// important way: if the count is negative, it's an error even if
// the cookie size would bring the total size >= 0.
bool isSigned
= e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
llvm::IntegerType *numElementsType
= cast<llvm::IntegerType>(numElements->getType());
unsigned numElementsWidth = numElementsType->getBitWidth();
// Compute the constant factor.
llvm::APInt arraySizeMultiplier(sizeWidth, 1);
while (const ConstantArrayType *CAT
= CGF.getContext().getAsConstantArrayType(type)) {
type = CAT->getElementType();
arraySizeMultiplier *= CAT->getSize();
}
CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
typeSizeMultiplier *= arraySizeMultiplier;
// This will be a size_t.
llvm::Value *size;
// If someone is doing 'new int[42]' there is no need to do a dynamic check.
// Don't bloat the -O0 code.
if (llvm::ConstantInt *numElementsC =
dyn_cast<llvm::ConstantInt>(numElements)) {
const llvm::APInt &count = numElementsC->getValue();
bool hasAnyOverflow = false;
// If 'count' was a negative number, it's an overflow.
if (isSigned && count.isNegative())
hasAnyOverflow = true;
// We want to do all this arithmetic in size_t. If numElements is
// wider than that, check whether it's already too big, and if so,
// overflow.
else if (numElementsWidth > sizeWidth &&
numElementsWidth - sizeWidth > count.countLeadingZeros())
hasAnyOverflow = true;
// Okay, compute a count at the right width.
llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
// If there is a brace-initializer, we cannot allocate fewer elements than
// there are initializers. If we do, that's treated like an overflow.
if (adjustedCount.ult(minElements))
hasAnyOverflow = true;
// Scale numElements by that. This might overflow, but we don't
// care because it only overflows if allocationSize does, too, and
// if that overflows then we shouldn't use this.
numElements = llvm::ConstantInt::get(CGF.SizeTy,
adjustedCount * arraySizeMultiplier);
// Compute the size before cookie, and track whether it overflowed.
bool overflow;
llvm::APInt allocationSize
= adjustedCount.umul_ov(typeSizeMultiplier, overflow);
hasAnyOverflow |= overflow;
// Add in the cookie, and check whether it's overflowed.
if (cookieSize != 0) {
// Save the current size without a cookie. This shouldn't be
// used if there was overflow.
sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
hasAnyOverflow |= overflow;
}
// On overflow, produce a -1 so operator new will fail.
if (hasAnyOverflow) {
size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
} else {
size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
}
// Otherwise, we might need to use the overflow intrinsics.
} else {
// There are up to five conditions we need to test for:
// 1) if isSigned, we need to check whether numElements is negative;
// 2) if numElementsWidth > sizeWidth, we need to check whether
// numElements is larger than something representable in size_t;
// 3) if minElements > 0, we need to check whether numElements is smaller
// than that.
// 4) we need to compute
// sizeWithoutCookie := numElements * typeSizeMultiplier
// and check whether it overflows; and
// 5) if we need a cookie, we need to compute
// size := sizeWithoutCookie + cookieSize
// and check whether it overflows.
llvm::Value *hasOverflow = nullptr;
// If numElementsWidth > sizeWidth, then one way or another, we're
// going to have to do a comparison for (2), and this happens to
// take care of (1), too.
if (numElementsWidth > sizeWidth) {
llvm::APInt threshold(numElementsWidth, 1);
threshold <<= sizeWidth;
llvm::Value *thresholdV
= llvm::ConstantInt::get(numElementsType, threshold);
hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
// Otherwise, if we're signed, we want to sext up to size_t.
} else if (isSigned) {
if (numElementsWidth < sizeWidth)
numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
// If there's a non-1 type size multiplier, then we can do the
// signedness check at the same time as we do the multiply
// because a negative number times anything will cause an
// unsigned overflow. Otherwise, we have to do it here. But at least
// in this case, we can subsume the >= minElements check.
if (typeSizeMultiplier == 1)
hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
llvm::ConstantInt::get(CGF.SizeTy, minElements));
// Otherwise, zext up to size_t if necessary.
} else if (numElementsWidth < sizeWidth) {
numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
}
assert(numElements->getType() == CGF.SizeTy);
if (minElements) {
// Don't allow allocation of fewer elements than we have initializers.
if (!hasOverflow) {
hasOverflow = CGF.Builder.CreateICmpULT(numElements,
llvm::ConstantInt::get(CGF.SizeTy, minElements));
} else if (numElementsWidth > sizeWidth) {
// The other existing overflow subsumes this check.
// We do an unsigned comparison, since any signed value < -1 is
// taken care of either above or below.
hasOverflow = CGF.Builder.CreateOr(hasOverflow,
CGF.Builder.CreateICmpULT(numElements,
llvm::ConstantInt::get(CGF.SizeTy, minElements)));
}
}
size = numElements;
// Multiply by the type size if necessary. This multiplier
// includes all the factors for nested arrays.
//
// This step also causes numElements to be scaled up by the
// nested-array factor if necessary. Overflow on this computation
// can be ignored because the result shouldn't be used if
// allocation fails.
if (typeSizeMultiplier != 1) {
llvm::Value *umul_with_overflow
= CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
llvm::Value *tsmV =
llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
llvm::Value *result =
CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
if (hasOverflow)
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
else
hasOverflow = overflowed;
size = CGF.Builder.CreateExtractValue(result, 0);
// Also scale up numElements by the array size multiplier.
if (arraySizeMultiplier != 1) {
// If the base element type size is 1, then we can re-use the
// multiply we just did.
if (typeSize.isOne()) {
assert(arraySizeMultiplier == typeSizeMultiplier);
numElements = size;
// Otherwise we need a separate multiply.
} else {
llvm::Value *asmV =
llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
numElements = CGF.Builder.CreateMul(numElements, asmV);
}
}
} else {
// numElements doesn't need to be scaled.
assert(arraySizeMultiplier == 1);
}
// Add in the cookie size if necessary.
if (cookieSize != 0) {
sizeWithoutCookie = size;
llvm::Value *uadd_with_overflow
= CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
llvm::Value *result =
CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
if (hasOverflow)
hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
else
hasOverflow = overflowed;
size = CGF.Builder.CreateExtractValue(result, 0);
}
// If we had any possibility of dynamic overflow, make a select to
// overwrite 'size' with an all-ones value, which should cause
// operator new to throw.
if (hasOverflow)
size = CGF.Builder.CreateSelect(hasOverflow,
llvm::Constant::getAllOnesValue(CGF.SizeTy),
size);
}
if (cookieSize == 0)
sizeWithoutCookie = size;
else
assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
return size;
}
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
QualType AllocType, llvm::Value *NewPtr) {
// FIXME: Refactor with EmitExprAsInit.
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
switch (CGF.getEvaluationKind(AllocType)) {
case TEK_Scalar:
CGF.EmitScalarInit(Init, nullptr,
CGF.MakeAddrLValue(NewPtr, AllocType, Alignment), false);
return;
case TEK_Complex:
CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
Alignment),
/*isInit*/ true);
return;
case TEK_Aggregate: {
AggValueSlot Slot
= AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(Init, Slot);
return;
}
}
llvm_unreachable("bad evaluation kind");
}
void CodeGenFunction::EmitNewArrayInitializer(
const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
llvm::Value *BeginPtr, llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
// If we have a type with trivial initialization and no initializer,
// there's nothing to do.
if (!E->hasInitializer())
return;
llvm::Value *CurPtr = BeginPtr;
unsigned InitListElements = 0;
const Expr *Init = E->getInitializer();
llvm::AllocaInst *EndOfInit = nullptr;
QualType::DestructionKind DtorKind = ElementType.isDestructedType();
EHScopeStack::stable_iterator Cleanup;
llvm::Instruction *CleanupDominator = nullptr;
// If the initializer is an initializer list, first do the explicit elements.
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
InitListElements = ILE->getNumInits();
// If this is a multi-dimensional array new, we will initialize multiple
// elements with each init list element.
QualType AllocType = E->getAllocatedType();
if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
AllocType->getAsArrayTypeUnsafe())) {
unsigned AS = CurPtr->getType()->getPointerAddressSpace();
ElementTy = ConvertTypeForMem(AllocType);
llvm::Type *AllocPtrTy = ElementTy->getPointerTo(AS);
CurPtr = Builder.CreateBitCast(CurPtr, AllocPtrTy);
InitListElements *= getContext().getConstantArrayElementCount(CAT);
}
// Enter a partial-destruction Cleanup if necessary.
if (needsEHCleanup(DtorKind)) {
// In principle we could tell the Cleanup where we are more
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
EndOfInit = CreateTempAlloca(BeginPtr->getType(), "array.init.end");
CleanupDominator = Builder.CreateStore(BeginPtr, EndOfInit);
pushIrregularPartialArrayCleanup(BeginPtr, EndOfInit, ElementType,
getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
}
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
if (EndOfInit)
Builder.CreateStore(Builder.CreateBitCast(CurPtr, BeginPtr->getType()),
EndOfInit);
// FIXME: If the last initializer is an incomplete initializer list for
// an array, and we have an array filler, we can fold together the two
// initialization loops.
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
ILE->getInit(i)->getType(), CurPtr);
CurPtr = Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1,
"array.exp.next");
}
// The remaining elements are filled with the array filler expression.
Init = ILE->getArrayFiller();
// Extract the initializer for the individual array elements by pulling
// out the array filler from all the nested initializer lists. This avoids
// generating a nested loop for the initialization.
while (Init && Init->getType()->isConstantArrayType()) {
auto *SubILE = dyn_cast<InitListExpr>(Init);
if (!SubILE)
break;
assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
Init = SubILE->getArrayFiller();
}
// Switch back to initializing one base element at a time.
CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr->getType());
}
// Attempt to perform zero-initialization using memset.
auto TryMemsetInitialization = [&]() -> bool {
// FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
// we can initialize with a memset to -1.
if (!CGM.getTypes().isZeroInitializable(ElementType))
return false;
// Optimization: since zero initialization will just set the memory
// to all zeroes, generate a single memset to do it in one shot.
// Subtract out the size of any elements we've already initialized.
auto *RemainingSize = AllocSizeWithoutCookie;
if (InitListElements) {
// We know this can't overflow; we check this when doing the allocation.
auto *InitializedSize = llvm::ConstantInt::get(
RemainingSize->getType(),
getContext().getTypeSizeInChars(ElementType).getQuantity() *
InitListElements);
RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
}
// Create the memset.
CharUnits Alignment = getContext().getTypeAlignInChars(ElementType);
Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize,
Alignment.getQuantity(), false);
return true;
};
// If all elements have already been initialized, skip any further
// initialization.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
if (ConstNum && ConstNum->getZExtValue() <= InitListElements) {
// If there was a Cleanup, deactivate it.
if (CleanupDominator)
DeactivateCleanupBlock(Cleanup, CleanupDominator);
return;
}
assert(Init && "have trailing elements to initialize but no initializer");
// If this is a constructor call, try to optimize it out, and failing that
// emit a single loop to initialize all remaining elements.
if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
CXXConstructorDecl *Ctor = CCE->getConstructor();
if (Ctor->isTrivial()) {
// If new expression did not specify value-initialization, then there
// is no initialization.
if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
return;
if (TryMemsetInitialization())
return;
}
// Store the new Cleanup position for irregular Cleanups.
//
// FIXME: Share this cleanup with the constructor call emission rather than
// having it create a cleanup of its own.
if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
// Emit a constructor call loop to initialize the remaining elements.
if (InitListElements)
NumElements = Builder.CreateSub(
NumElements,
llvm::ConstantInt::get(NumElements->getType(), InitListElements));
EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
CCE->requiresZeroInitialization());
return;
}
// If this is value-initialization, we can usually use memset.
ImplicitValueInitExpr IVIE(ElementType);
if (isa<ImplicitValueInitExpr>(Init)) {
if (TryMemsetInitialization())
return;
// Switch to an ImplicitValueInitExpr for the element type. This handles
// only one case: multidimensional array new of pointers to members. In
// all other cases, we already have an initializer for the array element.
Init = &IVIE;
}
// At this point we should have found an initializer for the individual
// elements of the array.
assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
"got wrong type of element to initialize");
// If we have an empty initializer list, we can usually use memset.
if (auto *ILE = dyn_cast<InitListExpr>(Init))
if (ILE->getNumInits() == 0 && TryMemsetInitialization())
return;
// If we have a struct whose every field is value-initialized, we can
// usually use memset.
if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
if (RType->getDecl()->isStruct()) {
unsigned NumFields = 0;
for (auto *Field : RType->getDecl()->fields())
if (!Field->isUnnamedBitfield())
++NumFields;
if (ILE->getNumInits() == NumFields)
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
--NumFields;
if (ILE->getNumInits() == NumFields && TryMemsetInitialization())
return;
}
}
}
// Create the loop blocks.
llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
// Find the end of the array, hoisted out of the loop.
llvm::Value *EndPtr =
Builder.CreateInBoundsGEP(BeginPtr, NumElements, "array.end");
// If the number of elements isn't constant, we have to now check if there is
// anything left to initialize.
if (!ConstNum) {
llvm::Value *IsEmpty = Builder.CreateICmpEQ(CurPtr, EndPtr,
"array.isempty");
Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
}
// Enter the loop.
EmitBlock(LoopBB);
// Set up the current-element phi.
llvm::PHINode *CurPtrPhi =
Builder.CreatePHI(CurPtr->getType(), 2, "array.cur");
CurPtrPhi->addIncoming(CurPtr, EntryBB);
CurPtr = CurPtrPhi;
// Store the new Cleanup position for irregular Cleanups.
if (EndOfInit) Builder.CreateStore(CurPtr, EndOfInit);
// Enter a partial-destruction Cleanup if necessary.
if (!CleanupDominator && needsEHCleanup(DtorKind)) {
pushRegularPartialArrayCleanup(BeginPtr, CurPtr, ElementType,
getDestroyer(DtorKind));
Cleanup = EHStack.stable_begin();
CleanupDominator = Builder.CreateUnreachable();
}
// Emit the initializer into this element.
StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr);
// Leave the Cleanup if we entered one.
if (CleanupDominator) {
DeactivateCleanupBlock(Cleanup, CleanupDominator);
CleanupDominator->eraseFromParent();
}
// Advance to the next element by adjusting the pointer type as necessary.
llvm::Value *NextPtr =
Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1, "array.next");
// Check whether we've gotten to the end of the array and, if so,
// exit the loop.
llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
EmitBlock(ContBB);
}
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
QualType ElementType, llvm::Type *ElementTy,
llvm::Value *NewPtr, llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
ApplyDebugLocation DL(CGF, E);
if (E->isArray())
CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
AllocSizeWithoutCookie);
else if (const Expr *Init = E->getInitializer())
StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
}
/// Emit a call to an operator new or operator delete function, as implicitly
/// created by new-expressions and delete-expressions.
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
const FunctionDecl *Callee,
const FunctionProtoType *CalleeType,
const CallArgList &Args) {
llvm::Instruction *CallOrInvoke;
llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
RValue RV =
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
Args, CalleeType, /*chainCall=*/false),
CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
/// C++1y [expr.new]p10:
/// [In a new-expression,] an implementation is allowed to omit a call
/// to a replaceable global allocation function.
///
/// We model such elidable calls with the 'builtin' attribute.
llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
if (Callee->isReplaceableGlobalAllocationFunction() &&
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
// FIXME: Add addAttribute to CallSite.
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
CI->addAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::Builtin);
else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
II->addAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::Builtin);
else
llvm_unreachable("unexpected kind of call instruction");
}
return RV;
}
RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
const Expr *Arg,
bool IsDelete) {
CallArgList Args;
const Stmt *ArgS = Arg;
EmitCallArgs(Args, *Type->param_type_begin(),
ConstExprIterator(&ArgS), ConstExprIterator(&ArgS + 1));
// Find the allocation or deallocation function that we're calling.
ASTContext &Ctx = getContext();
DeclarationName Name = Ctx.DeclarationNames
.getCXXOperatorName(IsDelete ? OO_Delete : OO_New);
for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
if (auto *FD = dyn_cast<FunctionDecl>(Decl))
if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args);
llvm_unreachable("predeclared global operator new/delete is missing");
}
namespace {
/// A cleanup to call the given 'operator delete' function upon
/// abnormal exit from a new expression.
class CallDeleteDuringNew : public EHScopeStack::Cleanup {
size_t NumPlacementArgs;
const FunctionDecl *OperatorDelete;
llvm::Value *Ptr;
llvm::Value *AllocSize;
RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
public:
static size_t getExtraSize(size_t NumPlacementArgs) {
return NumPlacementArgs * sizeof(RValue);
}
CallDeleteDuringNew(size_t NumPlacementArgs,
const FunctionDecl *OperatorDelete,
llvm::Value *Ptr,
llvm::Value *AllocSize)
: NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
Ptr(Ptr), AllocSize(AllocSize) {}
void setPlacementArg(unsigned I, RValue Arg) {
assert(I < NumPlacementArgs && "index out of range");
getPlacementArgs()[I] = Arg;
}
void Emit(CodeGenFunction &CGF, Flags flags) override {
const FunctionProtoType *FPT
= OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
(FPT->getNumParams() == 2 && NumPlacementArgs == 0));
CallArgList DeleteArgs;
// The first argument is always a void*.
FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
DeleteArgs.add(RValue::get(Ptr), *AI++);
// A member 'operator delete' can take an extra 'size_t' argument.
if (FPT->getNumParams() == NumPlacementArgs + 2)
DeleteArgs.add(RValue::get(AllocSize), *AI++);
// Pass the rest of the arguments, which must match exactly.
for (unsigned I = 0; I != NumPlacementArgs; ++I)
DeleteArgs.add(getPlacementArgs()[I], *AI++);
// Call 'operator delete'.
EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
}
};
/// A cleanup to call the given 'operator delete' function upon
/// abnormal exit from a new expression when the new expression is
/// conditional.
class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
size_t NumPlacementArgs;
const FunctionDecl *OperatorDelete;
DominatingValue<RValue>::saved_type Ptr;
DominatingValue<RValue>::saved_type AllocSize;
DominatingValue<RValue>::saved_type *getPlacementArgs() {
return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
}
public:
static size_t getExtraSize(size_t NumPlacementArgs) {
return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
}
CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
const FunctionDecl *OperatorDelete,
DominatingValue<RValue>::saved_type Ptr,
DominatingValue<RValue>::saved_type AllocSize)
: NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
Ptr(Ptr), AllocSize(AllocSize) {}
void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
assert(I < NumPlacementArgs && "index out of range");
getPlacementArgs()[I] = Arg;
}
void Emit(CodeGenFunction &CGF, Flags flags) override {
const FunctionProtoType *FPT
= OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
(FPT->getNumParams() == 2 && NumPlacementArgs == 0));
CallArgList DeleteArgs;
// The first argument is always a void*.
FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
DeleteArgs.add(Ptr.restore(CGF), *AI++);
// A member 'operator delete' can take an extra 'size_t' argument.
if (FPT->getNumParams() == NumPlacementArgs + 2) {
RValue RV = AllocSize.restore(CGF);
DeleteArgs.add(RV, *AI++);
}
// Pass the rest of the arguments, which must match exactly.
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
RValue RV = getPlacementArgs()[I].restore(CGF);
DeleteArgs.add(RV, *AI++);
}
// Call 'operator delete'.
EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
}
};
}
/// Enter a cleanup to call 'operator delete' if the initializer in a
/// new-expression throws.
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *AllocSize,
const CallArgList &NewArgs) {
// If we're not inside a conditional branch, then the cleanup will
// dominate and we can do the easier (and more efficient) thing.
if (!CGF.isInConditionalBranch()) {
CallDeleteDuringNew *Cleanup = CGF.EHStack
.pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
E->getNumPlacementArgs(),
E->getOperatorDelete(),
NewPtr, AllocSize);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
return;
}
// Otherwise, we need to save all this stuff.
DominatingValue<RValue>::saved_type SavedNewPtr =
DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
.pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
E->getNumPlacementArgs(),
E->getOperatorDelete(),
SavedNewPtr,
SavedAllocSize);
for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
Cleanup->setPlacementArg(I,
DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
CGF.initFullExprCleanup();
}
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// The element type being allocated.
QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
// 1. Build a call to the allocation function.
FunctionDecl *allocator = E->getOperatorNew();
const FunctionProtoType *allocatorType =
allocator->getType()->castAs<FunctionProtoType>();
CallArgList allocatorArgs;
// The allocation size is the first argument.
QualType sizeType = getContext().getSizeType();
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
if (E->isArray() && E->hasInitializer()) {
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
minElements = ILE->getNumInits();
}
llvm::Value *numElements = nullptr;
llvm::Value *allocSizeWithoutCookie = nullptr;
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
allocatorArgs.add(RValue::get(allocSize), sizeType);
// We start at 1 here because the first argument (the allocation size)
// has already been emitted.
EmitCallArgs(allocatorArgs, allocatorType, E->placement_arg_begin(),
E->placement_arg_end(), /* CalleeDecl */ nullptr,
/*ParamsToSkip*/ 1);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
RValue RV;
if (allocator->isReservedGlobalPlacementOperator()) {
assert(allocatorArgs.size() == 2);
RV = allocatorArgs[1].RV;
// TODO: kill any unnecessary computations done for the size
// argument.
} else {
RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
}
// Emit a null check on the allocation result if the allocation
// function is allowed to return null (because it has a non-throwing
// exception spec or is the reserved placement new) and we have an
// interesting initializer.
bool nullCheck = E->shouldNullCheckAllocation(getContext()) &&
(!allocType.isPODType(getContext()) || E->hasInitializer());
llvm::BasicBlock *nullCheckBB = nullptr;
llvm::BasicBlock *contBB = nullptr;
llvm::Value *allocation = RV.getScalarVal();
unsigned AS = allocation->getType()->getPointerAddressSpace();
// The null-check means that the initializer is conditionally
// evaluated.
ConditionalEvaluation conditional(*this);
if (nullCheck) {
conditional.begin(*this);
nullCheckBB = Builder.GetInsertBlock();
llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
contBB = createBasicBlock("new.cont");
llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
Builder.CreateCondBr(isNull, contBB, notNullBB);
EmitBlock(notNullBB);
}
// If there's an operator delete, enter a cleanup to call it if an
// exception is thrown.
EHScopeStack::stable_iterator operatorDeleteCleanup;
llvm::Instruction *cleanupDominator = nullptr;
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
operatorDeleteCleanup = EHStack.stable_begin();
cleanupDominator = Builder.CreateUnreachable();
}
assert((allocSize == allocSizeWithoutCookie) ==
CalculateCookiePadding(*this, E).isZero());
if (allocSize != allocSizeWithoutCookie) {
assert(E->isArray());
allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
numElements,
E, allocType);
}
llvm::Type *elementTy = ConvertTypeForMem(allocType);
llvm::Type *elementPtrTy = elementTy->getPointerTo(AS);
llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
if (E->isArray()) {
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
llvm::Type *resultType = ConvertTypeForMem(E->getType());
if (result->getType() != resultType)
result = Builder.CreateBitCast(result, resultType);
}
// Deactivate the 'operator delete' cleanup if we finished
// initialization.
if (operatorDeleteCleanup.isValid()) {
DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
cleanupDominator->eraseFromParent();
}
if (nullCheck) {
conditional.end(*this);
llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
EmitBlock(contBB);
llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
PHI->addIncoming(result, notNullBB);
PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
nullCheckBB);
result = PHI;
}
return result;
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
llvm::Value *Ptr,
QualType DeleteTy) {
assert(DeleteFD->getOverloadedOperator() == OO_Delete);
const FunctionProtoType *DeleteFTy =
DeleteFD->getType()->getAs<FunctionProtoType>();
CallArgList DeleteArgs;
// Check if we need to pass the size to the delete operator.
llvm::Value *Size = nullptr;
QualType SizeTy;
if (DeleteFTy->getNumParams() == 2) {
SizeTy = DeleteFTy->getParamType(1);
CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
Size = llvm::ConstantInt::get(ConvertType(SizeTy),
DeleteTypeSize.getQuantity());
}
QualType ArgTy = DeleteFTy->getParamType(0);
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
if (Size)
DeleteArgs.add(RValue::get(Size), SizeTy);
// Emit the call to delete.
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
}
namespace {
/// Calls the given 'operator delete' on a single object.
struct CallObjectDelete : EHScopeStack::Cleanup {
llvm::Value *Ptr;
const FunctionDecl *OperatorDelete;
QualType ElementType;
CallObjectDelete(llvm::Value *Ptr,
const FunctionDecl *OperatorDelete,
QualType ElementType)
: Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
}
};
}
void
CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
llvm::Value *CompletePtr,
QualType ElementType) {
EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
OperatorDelete, ElementType);
}
/// Emit the code for deleting a single object.
static void EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
llvm::Value *Ptr,
QualType ElementType) {
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
if (const RecordType *RT = ElementType->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
Dtor = RD->getDestructor();
if (Dtor->isVirtual()) {
CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
Dtor);
return;
}
}
}
// Make sure that we call delete even if the dtor throws.
// This doesn't have to a conditional cleanup because we're going
// to pop it off in a second.
const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
Ptr, OperatorDelete, ElementType);
if (Dtor)
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false,
Ptr);
else if (CGF.getLangOpts().ObjCAutoRefCount &&
ElementType->isObjCLifetimeType()) {
switch (ElementType.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
break;
case Qualifiers::OCL_Strong: {
// Load the pointer value.
llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
ElementType.isVolatileQualified());
CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
break;
}
case Qualifiers::OCL_Weak:
CGF.EmitARCDestroyWeak(Ptr);
break;
}
}
CGF.PopCleanupBlock();
}
namespace {
/// Calls the given 'operator delete' on an array of objects.
struct CallArrayDelete : EHScopeStack::Cleanup {
llvm::Value *Ptr;
const FunctionDecl *OperatorDelete;
llvm::Value *NumElements;
QualType ElementType;
CharUnits CookieSize;
CallArrayDelete(llvm::Value *Ptr,
const FunctionDecl *OperatorDelete,
llvm::Value *NumElements,
QualType ElementType,
CharUnits CookieSize)
: Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
ElementType(ElementType), CookieSize(CookieSize) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
const FunctionProtoType *DeleteFTy =
OperatorDelete->getType()->getAs<FunctionProtoType>();
assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
CallArgList Args;
// Pass the pointer as the first argument.
QualType VoidPtrTy = DeleteFTy->getParamType(0);
llvm::Value *DeletePtr
= CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
Args.add(RValue::get(DeletePtr), VoidPtrTy);
// Pass the original requested size as the second argument.
if (DeleteFTy->getNumParams() == 2) {
QualType size_t = DeleteFTy->getParamType(1);
llvm::IntegerType *SizeTy
= cast<llvm::IntegerType>(CGF.ConvertType(size_t));
CharUnits ElementTypeSize =
CGF.CGM.getContext().getTypeSizeInChars(ElementType);
// The size of an element, multiplied by the number of elements.
llvm::Value *Size
= llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
if (NumElements)
Size = CGF.Builder.CreateMul(Size, NumElements);
// Plus the size of the cookie if applicable.
if (!CookieSize.isZero()) {
llvm::Value *CookieSizeV
= llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
}
Args.add(RValue::get(Size), size_t);
}
// Emit the call to delete.
EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
}
};
}
/// Emit the code for deleting an array of objects.
static void EmitArrayDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *E,
llvm::Value *deletedPtr,
QualType elementType) {
llvm::Value *numElements = nullptr;
llvm::Value *allocatedPtr = nullptr;
CharUnits cookieSize;
CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
numElements, allocatedPtr, cookieSize);
assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
// Make sure that we call delete even if one of the dtors throws.
const FunctionDecl *operatorDelete = E->getOperatorDelete();
CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
allocatedPtr, operatorDelete,
numElements, elementType,
cookieSize);
// Destroy the elements.
if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
assert(numElements && "no element count for a type with a destructor!");
llvm::Value *arrayEnd =
CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
// Note that it is legal to allocate a zero-length array, and we
// can never fold the check away because the length should always
// come from a cookie.
CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
CGF.getDestroyer(dtorKind),
/*checkZeroLength*/ true,
CGF.needsEHCleanup(dtorKind));
}
// Pop the cleanup block.
CGF.PopCleanupBlock();
}
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
const Expr *Arg = E->getArgument();
llvm::Value *Ptr = EmitScalarExpr(Arg);
// Null check the pointer.
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
// We might be deleting a pointer to array. If so, GEP down to the
// first non-array element.
// (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
if (DeleteTy->isConstantArrayType()) {
llvm::Value *Zero = Builder.getInt32(0);
SmallVector<llvm::Value*,8> GEP;
GEP.push_back(Zero); // point at the outermost array
// For each layer of array type we're pointing at:
while (const ConstantArrayType *Arr
= getContext().getAsConstantArrayType(DeleteTy)) {
// 1. Unpeel the array type.
DeleteTy = Arr->getElementType();
// 2. GEP to the first element of the array.
GEP.push_back(Zero);
}
Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
}
assert(ConvertTypeForMem(DeleteTy) ==
cast<llvm::PointerType>(Ptr->getType())->getElementType());
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
} else {
EmitObjectDelete(*this, E, Ptr, DeleteTy);
}
EmitBlock(DeleteEnd);
}
static bool isGLValueFromPointerDeref(const Expr *E) {
E = E->IgnoreParens();
if (const auto *CE = dyn_cast<CastExpr>(E)) {
if (!CE->getSubExpr()->isGLValue())
return false;
return isGLValueFromPointerDeref(CE->getSubExpr());
}
if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
return isGLValueFromPointerDeref(OVE->getSourceExpr());
if (const auto *BO = dyn_cast<BinaryOperator>(E))
if (BO->getOpcode() == BO_Comma)
return isGLValueFromPointerDeref(BO->getRHS());
if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
isGLValueFromPointerDeref(ACO->getFalseExpr());
// C++11 [expr.sub]p1:
// The expression E1[E2] is identical (by definition) to *((E1)+(E2))
if (isa<ArraySubscriptExpr>(E))
return true;
if (const auto *UO = dyn_cast<UnaryOperator>(E))
if (UO->getOpcode() == UO_Deref)
return true;
return false;
}
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
llvm::Type *StdTypeInfoPtrTy) {
// Get the vtable pointer.
llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
// C++ [expr.typeid]p2:
// If the glvalue expression is obtained by applying the unary * operator to
// a pointer and the pointer is a null pointer value, the typeid expression
// throws the std::bad_typeid exception.
//
// However, this paragraph's intent is not clear. We choose a very generous
// interpretation which implores us to consider comma operators, conditional
// operators, parentheses and other such constructs.
QualType SrcRecordTy = E->getType();
if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
isGLValueFromPointerDeref(E), SrcRecordTy)) {
llvm::BasicBlock *BadTypeidBlock =
CGF.createBasicBlock("typeid.bad_typeid");
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
CGF.EmitBlock(BadTypeidBlock);
CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
CGF.EmitBlock(EndBlock);
}
return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
StdTypeInfoPtrTy);
}
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
llvm::Type *StdTypeInfoPtrTy =
ConvertType(E->getType())->getPointerTo();
if (E->isTypeOperand()) {
llvm::Constant *TypeInfo =
CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
}
// C++ [expr.typeid]p2:
// When typeid is applied to a glvalue expression whose type is a
// polymorphic class type, the result refers to a std::type_info object
// representing the type of the most derived object (that is, the dynamic
// type) to which the glvalue refers.
if (E->isPotentiallyEvaluated())
return EmitTypeidFromVTable(*this, E->getExprOperand(),
StdTypeInfoPtrTy);
QualType OperandTy = E->getExprOperand()->getType();
return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
StdTypeInfoPtrTy);
}
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
QualType DestTy) {
llvm::Type *DestLTy = CGF.ConvertType(DestTy);
if (DestTy->isPointerType())
return llvm::Constant::getNullValue(DestLTy);
/// C++ [expr.dynamic.cast]p9:
/// A failed cast to reference type throws std::bad_cast
if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
return nullptr;
CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
return llvm::UndefValue::get(DestLTy);
}
llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
const CXXDynamicCastExpr *DCE) {
QualType DestTy = DCE->getTypeAsWritten();
if (DCE->isAlwaysNull())
if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
return T;
QualType SrcTy = DCE->getSubExpr()->getType();
// C++ [expr.dynamic.cast]p7:
// If T is "pointer to cv void," then the result is a pointer to the most
// derived object pointed to by v.
const PointerType *DestPTy = DestTy->getAs<PointerType>();
bool isDynamicCastToVoid;
QualType SrcRecordTy;
QualType DestRecordTy;
if (DestPTy) {
isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
DestRecordTy = DestPTy->getPointeeType();
} else {
isDynamicCastToVoid = false;
SrcRecordTy = SrcTy;
DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
}
assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
// C++ [expr.dynamic.cast]p4:
// If the value of v is a null pointer value in the pointer case, the result
// is the null pointer value of type T.
bool ShouldNullCheckSrcValue =
CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
SrcRecordTy);
llvm::BasicBlock *CastNull = nullptr;
llvm::BasicBlock *CastNotNull = nullptr;
llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
if (ShouldNullCheckSrcValue) {
CastNull = createBasicBlock("dynamic_cast.null");
CastNotNull = createBasicBlock("dynamic_cast.notnull");
llvm::Value *IsNull = Builder.CreateIsNull(Value);
Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
EmitBlock(CastNotNull);
}
if (isDynamicCastToVoid) {
Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, Value, SrcRecordTy,
DestTy);
} else {
assert(DestRecordTy->isRecordType() &&
"destination type must be a record type!");
Value = CGM.getCXXABI().EmitDynamicCastCall(*this, Value, SrcRecordTy,
DestTy, DestRecordTy, CastEnd);
}
if (ShouldNullCheckSrcValue) {
EmitBranch(CastEnd);
EmitBlock(CastNull);
EmitBranch(CastEnd);
}
EmitBlock(CastEnd);
if (ShouldNullCheckSrcValue) {
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
Value = PHI;
}
return Value;
}
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
RunCleanupsScope Scope(*this);
LValue SlotLV =
MakeAddrLValue(Slot.getAddr(), E->getType(), Slot.getAlignment());
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
e = E->capture_init_end();
i != e; ++i, ++CurField) {
// Emit initialization
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
if (CurField->hasCapturedVLAType()) {
auto VAT = CurField->getCapturedVLAType();
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
} else {
ArrayRef<VarDecl *> ArrayIndexes;
if (CurField->getType()->isArrayType())
ArrayIndexes = E->getCaptureInitIndexVars(i);
EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
}
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGExprAgg.cpp | //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Aggregate Expr nodes as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "CGHLSLRuntime.h" // HLSL Change
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
using namespace CodeGen;
//===----------------------------------------------------------------------===//
// Aggregate Expression Emitter
//===----------------------------------------------------------------------===//
namespace {
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
CGBuilderTy &Builder;
AggValueSlot Dest;
bool IsResultUnused;
/// We want to use 'dest' as the return slot except under two
/// conditions:
/// - The destination slot requires garbage collection, so we
/// need to use the GC API.
/// - The destination slot is potentially aliased.
bool shouldUseDestForReturnSlot() const {
return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
}
ReturnValueSlot getReturnValueSlot() const {
if (!shouldUseDestForReturnSlot())
return ReturnValueSlot();
return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile(), IsResultUnused);
}
AggValueSlot EnsureSlot(QualType T) {
if (!Dest.isIgnored()) return Dest;
return CGF.CreateAggTemp(T, "agg.tmp.ensured");
}
void EnsureDest(QualType T) {
if (!Dest.isIgnored()) return;
Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
}
public:
AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
: CGF(cgf), Builder(CGF.Builder), Dest(Dest),
IsResultUnused(IsResultUnused) { }
//===--------------------------------------------------------------------===//
// Utilities
//===--------------------------------------------------------------------===//
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
/// represents a value lvalue, this method emits the address of the lvalue,
/// then loads the result into DestPtr.
void EmitAggLoadOfLValue(const Expr *E);
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void EmitFinalDestCopy(QualType type, const LValue &src);
void EmitFinalDestCopy(QualType type, RValue src,
CharUnits srcAlignment = CharUnits::Zero());
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
QualType elementType, InitListExpr *E);
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
return AggValueSlot::NeedsGCBarriers;
return AggValueSlot::DoesNotNeedGCBarriers;
}
bool TypeRequiresGCollection(QualType T);
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
void Visit(Expr *E) {
ApplyDebugLocation DL(CGF, E);
StmtVisitor<AggExprEmitter>::Visit(E);
}
void VisitStmt(Stmt *S) {
CGF.ErrorUnsupported(S, "aggregate expression");
}
void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
Visit(GE->getResultExpr());
}
void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
return Visit(E->getReplacement());
}
// l-values.
void VisitDeclRefExpr(DeclRefExpr *E) {
// For aggregates, we should always be able to emit the variable
// as an l-value unless it's a reference. This is due to the fact
// that we can't actually ever see a normal l2r conversion on an
// aggregate in C++, and in C there's no language standard
// actively preventing us from listing variables in the captures
// list of a block.
if (E->getDecl()->getType()->isReferenceType()) {
if (CodeGenFunction::ConstantEmission result
= CGF.tryEmitAsConstant(E)) {
EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
return;
}
}
EmitAggLoadOfLValue(E);
}
void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
EmitAggLoadOfLValue(E);
}
void VisitPredefinedExpr(const PredefinedExpr *E) {
EmitAggLoadOfLValue(E);
}
// Operators.
void VisitCastExpr(CastExpr *E);
void VisitCallExpr(const CallExpr *E);
void VisitStmtExpr(const StmtExpr *E);
void VisitBinaryOperator(const BinaryOperator *BO);
void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
void VisitBinAssign(const BinaryOperator *E);
void VisitBinComma(const BinaryOperator *E);
void VisitObjCMessageExpr(ObjCMessageExpr *E);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
EmitAggLoadOfLValue(E);
}
void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
Visit(DAE->getExpr());
}
void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
Visit(DIE->getExpr());
}
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
void VisitLambdaExpr(LambdaExpr *E);
void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
void VisitOpaqueValueExpr(OpaqueValueExpr *E);
void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
if (E->isGLValue()) {
LValue LV = CGF.EmitPseudoObjectLValue(E);
return EmitFinalDestCopy(E->getType(), LV);
}
CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
}
void VisitVAArgExpr(VAArgExpr *E);
void EmitInitializationToLValue(Expr *E, LValue Address);
void EmitNullInitializationToLValue(LValue Address);
// case Expr::ChooseExprClass:
void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
void VisitAtomicExpr(AtomicExpr *E) {
CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
}
};
} // end anonymous namespace.
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
/// EmitAggLoadOfLValue - Given an expression with aggregate type that
/// represents a value lvalue, this method emits the address of the lvalue,
/// then loads the result into DestPtr.
void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
LValue LV = CGF.EmitLValue(E);
// If the type of the l-value is atomic, then do an atomic load.
if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
return;
}
EmitFinalDestCopy(E->getType(), LV);
}
/// \brief True if the given aggregate type requires special GC API calls.
bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
// Only record types have members that might require garbage collection.
const RecordType *RecordTy = T->getAs<RecordType>();
if (!RecordTy) return false;
// Don't mess with non-trivial C++ types.
RecordDecl *Record = RecordTy->getDecl();
if (isa<CXXRecordDecl>(Record) &&
(cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
!cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
return false;
// Check whether the type has an object member.
return Record->hasObjectMember();
}
/// \brief Perform the final move to DestPtr if for some reason
/// getReturnValueSlot() didn't use it directly.
///
/// The idea is that you do something like this:
/// RValue Result = EmitSomething(..., getReturnValueSlot());
/// EmitMoveFromReturnSlot(E, Result);
///
/// If nothing interferes, this will cause the result to be emitted
/// directly into the return value slot. Otherwise, a final move
/// will be performed.
void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
if (shouldUseDestForReturnSlot()) {
// Logically, Dest.getAddr() should equal Src.getAggregateAddr().
// The possibility of undef rvalues complicates that a lot,
// though, so we can't really assert.
return;
}
// Otherwise, copy from there to the destination.
assert(Dest.getAddr() != src.getAggregateAddr());
std::pair<CharUnits, CharUnits> typeInfo =
CGF.getContext().getTypeInfoInChars(E->getType());
EmitFinalDestCopy(E->getType(), src, typeInfo.second);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
CharUnits srcAlign) {
assert(src.isAggregate() && "value must be aggregate value!");
LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
EmitFinalDestCopy(type, srcLV);
}
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
// If Dest is ignored, then we're evaluating an aggregate expression
// in a context that doesn't care about the result. Note that loads
// from volatile l-values force the existence of a non-ignored
// destination.
if (Dest.isIgnored())
return;
AggValueSlot srcAgg =
AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
needsGC(type), AggValueSlot::IsAliased);
EmitCopy(type, Dest, srcAgg);
}
/// Perform a copy from the source into the destination.
///
/// \param type - the type of the aggregate being copied; qualifiers are
/// ignored
void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src) {
if (dest.requiresGCollection()) {
CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
dest.getAddr(),
src.getAddr(),
size);
return;
}
// If the result of the assignment is used, copy the LHS there also.
// It's volatile if either side is. Use the minimum alignment of
// the two sides.
CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
dest.isVolatile() || src.isVolatile(),
std::min(dest.getAlignment(), src.getAlignment()));
}
/// \brief Emit the initializer for a std::initializer_list initialized with a
/// real initializer list.
void
AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
// Emit an array containing the elements. The array is externally destructed
// if the std::initializer_list object is.
ASTContext &Ctx = CGF.getContext();
LValue Array = CGF.EmitLValue(E->getSubExpr());
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
llvm::Value *ArrayPtr = Array.getAddress();
const ConstantArrayType *ArrayType =
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
assert(ArrayType && "std::initializer_list constructed from non-array");
// FIXME: Perform the checks on the field types in SemaInit.
RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator Field = Record->field_begin();
if (Field == Record->field_end()) {
CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
// Start pointer.
if (!Field->getType()->isPointerType() ||
!Ctx.hasSameType(Field->getType()->getPointeeType(),
ArrayType->getElementType())) {
CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
AggValueSlot Dest = EnsureSlot(E->getType());
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
Dest.getAlignment());
LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
llvm::Value *IdxStart[] = { Zero, Zero };
llvm::Value *ArrayStart =
Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
++Field;
if (Field == Record->field_end()) {
CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
llvm::Value *Size = Builder.getInt(ArrayType->getSize());
LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
if (Field->getType()->isPointerType() &&
Ctx.hasSameType(Field->getType()->getPointeeType(),
ArrayType->getElementType())) {
// End pointer.
llvm::Value *IdxEnd[] = { Zero, Size };
llvm::Value *ArrayEnd =
Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
} else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
// Length.
CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
} else {
CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
}
/// \brief Determine if E is a trivial array filler, that is, one that is
/// equivalent to zero-initialization.
static bool isTrivialFiller(Expr *E) {
if (!E)
return true;
if (isa<ImplicitValueInitExpr>(E))
return true;
if (auto *ILE = dyn_cast<InitListExpr>(E)) {
if (ILE->getNumInits())
return false;
return isTrivialFiller(ILE->getArrayFiller());
}
if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
return Cons->getConstructor()->isDefaultConstructor() &&
Cons->getConstructor()->isTrivial();
// FIXME: Are there other cases where we can avoid emitting an initializer?
return false;
}
/// \brief Emit initialization of an array from an initializer list.
void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
QualType elementType, InitListExpr *E) {
uint64_t NumInitElements = E->getNumInits();
uint64_t NumArrayElements = AType->getNumElements();
// HLSL Change Starts
if (CGF.getLangOpts().HLSL &&
!CGF.CGM.getHLSLRuntime().IsTrivalInitListExpr(CGF, E)) {
// Generate dx.hl.init for every array, not only matrix array.
CGF.CGM.getHLSLRuntime().EmitHLSLInitListExpr(CGF, E, DestPtr);
return;
}
// HLSL Change Ends
assert(NumInitElements <= NumArrayElements);
// DestPtr is an array*. Construct an elementType* by drilling
// down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
llvm::Value *begin =
Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
// Exception safety requires us to destroy all the
// already-constructed members if an initializer throws.
// For that, we'll need an EH cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
llvm::AllocaInst *endOfInit = nullptr;
EHScopeStack::stable_iterator cleanup;
llvm::Instruction *cleanupDominator = nullptr;
if (CGF.needsEHCleanup(dtorKind)) {
// In principle we could tell the cleanup where we are more
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
endOfInit = CGF.CreateTempAlloca(begin->getType(),
"arrayinit.endOfInit");
cleanupDominator = Builder.CreateStore(begin, endOfInit);
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
CGF.getDestroyer(dtorKind));
cleanup = CGF.EHStack.stable_begin();
// Otherwise, remember that we didn't need a cleanup.
} else {
dtorKind = QualType::DK_none;
}
llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
// The 'current element to initialize'. The invariants on this
// variable are complicated. Essentially, after each iteration of
// the loop, it points to the last initialized element, except
// that it points to the beginning of the array before any
// elements have been initialized.
llvm::Value *element = begin;
// Emit the explicit initializers.
for (uint64_t i = 0; i != NumInitElements; ++i) {
// Advance to the next element.
if (i > 0) {
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
if (endOfInit) Builder.CreateStore(element, endOfInit);
}
LValue elementLV = CGF.MakeAddrLValue(element, elementType);
EmitInitializationToLValue(E->getInit(i), elementLV);
}
// Check whether there's a non-trivial array-fill expression.
Expr *filler = E->getArrayFiller();
bool hasTrivialFiller = isTrivialFiller(filler);
// Any remaining elements need to be zero-initialized, possibly
// using the filler expression. We can skip this if the we're
// emitting to zeroed memory.
if (NumInitElements != NumArrayElements &&
!(Dest.isZeroed() && hasTrivialFiller &&
CGF.getTypes().isZeroInitializable(elementType))) {
// Use an actual loop. This is basically
// do { *array++ = filler; } while (array != end);
// Advance to the start of the rest of the array.
if (NumInitElements) {
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
if (endOfInit) Builder.CreateStore(element, endOfInit);
}
// Compute the end of the array.
llvm::Value *end = Builder.CreateInBoundsGEP(begin,
llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
"arrayinit.end");
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
// Jump into the body.
CGF.EmitBlock(bodyBB);
llvm::PHINode *currentElement =
Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
currentElement->addIncoming(element, entryBB);
// Emit the actual filler expression.
LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
if (filler)
EmitInitializationToLValue(filler, elementLV);
else
EmitNullInitializationToLValue(elementLV);
// Move on to the next element.
llvm::Value *nextElement =
Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
// Tell the EH cleanup that we finished with the last element.
if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
// Leave the loop if we're done.
llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
"arrayinit.done");
llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
Builder.CreateCondBr(done, endBB, bodyBB);
currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
CGF.EmitBlock(endBB);
}
// Leave the partial-array cleanup if we entered one.
if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
}
//===----------------------------------------------------------------------===//
// Visitor Methods
//===----------------------------------------------------------------------===//
void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
Visit(E->GetTemporaryExpr());
}
void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
}
void
AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
if (Dest.isPotentiallyAliased() &&
E->getType().isPODType(CGF.getContext())) {
// For a POD type, just emit a load of the lvalue + a copy, because our
// compound literal might alias the destination.
EmitAggLoadOfLValue(E);
return;
}
AggValueSlot Slot = EnsureSlot(E->getType());
CGF.EmitAggExpr(E->getInitializer(), Slot);
}
/// Attempt to look through various unimportant expressions to find a
/// cast of the given kind.
static Expr *findPeephole(Expr *op, CastKind kind) {
while (true) {
op = op->IgnoreParens();
if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
if (castE->getCastKind() == kind)
return castE->getSubExpr();
if (castE->getCastKind() == CK_NoOp)
continue;
}
return nullptr;
}
}
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
case CK_Dynamic: {
// FIXME: Can this actually happen? We have no test coverage for it.
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
CodeGenFunction::TCK_Load);
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
if (!Dest.isIgnored())
CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
break;
}
case CK_ToUnion: {
// Evaluate even if the destination is ignored.
if (Dest.isIgnored()) {
CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
/*ignoreResult=*/true);
break;
}
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
QualType PtrTy = CGF.getContext().getPointerType(Ty);
llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
CGF.ConvertType(PtrTy));
EmitInitializationToLValue(E->getSubExpr(),
CGF.MakeAddrLValue(CastPtr, Ty));
break;
}
case CK_DerivedToBase:
case CK_BaseToDerived:
case CK_UncheckedDerivedToBase: {
llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
"should have been unpacked before we got here");
}
case CK_NonAtomicToAtomic:
case CK_AtomicToNonAtomic: {
bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
// Determine the atomic and value types.
QualType atomicType = E->getSubExpr()->getType();
QualType valueType = E->getType();
if (isToAtomic) std::swap(atomicType, valueType);
assert(atomicType->isAtomicType());
assert(CGF.getContext().hasSameUnqualifiedType(valueType,
atomicType->castAs<AtomicType>()->getValueType()));
// Just recurse normally if we're ignoring the result or the
// atomic type doesn't change representation.
if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
return Visit(E->getSubExpr());
}
CastKind peepholeTarget =
(isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
// These two cases are reverses of each other; try to peephole them.
if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
E->getType()) &&
"peephole significantly changed types?");
return Visit(op);
}
// If we're converting an r-value of non-atomic type to an r-value
// of atomic type, just emit directly into the relevant sub-object.
if (isToAtomic) {
AggValueSlot valueDest = Dest;
if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
// Zero-initialize. (Strictly speaking, we only need to intialize
// the padding at the end, but this is simpler.)
if (!Dest.isZeroed())
CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
// Build a GEP to refer to the subobject.
llvm::Value *valueAddr =
CGF.Builder.CreateStructGEP(nullptr, valueDest.getAddr(), 0);
valueDest = AggValueSlot::forAddr(valueAddr,
valueDest.getAlignment(),
valueDest.getQualifiers(),
valueDest.isExternallyDestructed(),
valueDest.requiresGCollection(),
valueDest.isPotentiallyAliased(),
AggValueSlot::IsZeroed);
}
CGF.EmitAggExpr(E->getSubExpr(), valueDest);
return;
}
// Otherwise, we're converting an atomic type to a non-atomic type.
// Make an atomic temporary, emit into that, and then copy the value out.
AggValueSlot atomicSlot =
CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
llvm::Value *valueAddr =
Builder.CreateStructGEP(nullptr, atomicSlot.getAddr(), 0);
RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
return EmitFinalDestCopy(valueType, rvalue);
}
case CK_LValueToRValue:
// If we're loading from a volatile type, force the destination
// into existence.
if (E->getSubExpr()->getType().isVolatileQualified()) {
EnsureDest(E->getType());
return Visit(E->getSubExpr());
}
LLVM_FALLTHROUGH; // HLSL Change
case CK_NoOp:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
E->getType()) &&
"Implicit cast types must be compatible");
Visit(E->getSubExpr());
break;
// HLSL Change Begins.
case CK_HLSLDerivedToBase:
case CK_FlatConversion: {
QualType Ty = E->getSubExpr()->getType();
// We must emit the converted subexpression for any side-effects,
// but the conversion itself doesn't have any, so we should not
// emit it if we were not provided a destination aggregate value slot.
if (IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E->getSubExpr())) {
if (Dest.isIgnored()) return;
llvm::Value *SrcVal = llvm::ConstantInt::get(CGF.getLLVMContext(), IL->getValue());
CGF.CGM.getHLSLRuntime().EmitHLSLFlatConversion(
CGF, SrcVal, Dest.getAddr(), E->getType(), Ty);
} else if (FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E->getSubExpr())) {
if (Dest.isIgnored()) return;
llvm::Value *SrcVal = llvm::ConstantFP::get(CGF.getLLVMContext(), FL->getValue());
CGF.CGM.getHLSLRuntime().EmitHLSLFlatConversion(
CGF, SrcVal, Dest.getAddr(), E->getType(), Ty);
} else {
Expr *Src = E->getSubExpr();
switch (CGF.getEvaluationKind(Ty)) {
case TEK_Aggregate: {
if (CastExpr *SrcCast = dyn_cast<CastExpr>(Src)) {
if (SrcCast->getCastKind() == CK_LValueToRValue) {
// Skip the lval to rval cast to reach decl.
Src = SrcCast->getSubExpr();
}
}
// Just use decl if possible to skip useless copy.
LValue LV;
if (DeclRefExpr *SrcDecl = dyn_cast<DeclRefExpr>(Src))
LV = CGF.EmitLValue(SrcDecl);
else if (ArraySubscriptExpr *ArraySubExpr = dyn_cast<ArraySubscriptExpr>(Src))
LV = CGF.EmitLValue(ArraySubExpr);
else if (ParenExpr *parenExpr = dyn_cast<ParenExpr>(Src))
LV = CGF.EmitLValue(parenExpr->getSubExpr());
else if (isa<CXXThisExpr>(Src))
LV = CGF.EmitLValue(Src);
else
LV = CGF.EmitAggExprToLValue(Src);
if (Dest.isIgnored()) return;
CGF.CGM.getHLSLRuntime().EmitHLSLFlatConversionAggregateCopy(
CGF, LV.getAddress(), Src->getType(), Dest.getAddr(), E->getType());
} break;
case TEK_Scalar: {
llvm::Value *SrcVal = CGF.EmitScalarExpr(Src);
if (Dest.isIgnored()) return;
CGF.CGM.getHLSLRuntime().EmitHLSLFlatConversion(
CGF, SrcVal, Dest.getAddr(), E->getType(), Ty);
} break;
default:
assert(0 && "invalid type for flat cast");
break;
}
}
} break;
// HLSL Change Ends.
case CK_LValueBitCast:
llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
case CK_Dependent:
case CK_BitCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToPointer:
case CK_NullToMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
case CK_ReinterpretMemberPointer:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
case CK_ToVoid:
case CK_VectorSplat:
case CK_IntegralCast:
case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingRealToComplex:
case CK_FloatingComplexToReal:
case CK_FloatingComplexToBoolean:
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralRealToComplex:
case CK_IntegralComplexToReal:
case CK_IntegralComplexToBoolean:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
case CK_AddressSpaceConversion:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
EmitAggLoadOfLValue(E);
return;
}
RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
EmitMoveFromReturnSlot(E, RV);
}
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
EmitMoveFromReturnSlot(E, RV);
}
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
CGF.EmitIgnoredExpr(E->getLHS());
Visit(E->getRHS());
}
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
}
void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
VisitPointerToDataMemberBinaryOperator(E);
else
CGF.ErrorUnsupported(E, "aggregate binary expression");
}
void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
const BinaryOperator *E) {
LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
EmitFinalDestCopy(E->getType(), LV);
}
/// Is the value of the given expression possibly a reference to or
/// into a __block variable?
static bool isBlockVarRef(const Expr *E) {
// Make sure we look through parens.
E = E->IgnoreParens();
// Check for a direct reference to a __block variable.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
return (var && var->hasAttr<BlocksAttr>());
}
// More complicated stuff.
// Binary operators.
if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
// For an assignment or pointer-to-member operation, just care
// about the LHS.
if (op->isAssignmentOp() || op->isPtrMemOp())
return isBlockVarRef(op->getLHS());
// For a comma, just care about the RHS.
if (op->getOpcode() == BO_Comma)
return isBlockVarRef(op->getRHS());
// FIXME: pointer arithmetic?
return false;
// Check both sides of a conditional operator.
} else if (const AbstractConditionalOperator *op
= dyn_cast<AbstractConditionalOperator>(E)) {
return isBlockVarRef(op->getTrueExpr())
|| isBlockVarRef(op->getFalseExpr());
// OVEs are required to support BinaryConditionalOperators.
} else if (const OpaqueValueExpr *op
= dyn_cast<OpaqueValueExpr>(E)) {
if (const Expr *src = op->getSourceExpr())
return isBlockVarRef(src);
// Casts are necessary to get things like (*(int*)&var) = foo().
// We don't really care about the kind of cast here, except
// we don't want to look through l2r casts, because it's okay
// to get the *value* in a __block variable.
} else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
if (cast->getCastKind() == CK_LValueToRValue)
return false;
return isBlockVarRef(cast->getSubExpr());
// Handle unary operators. Again, just aggressively look through
// it, ignoring the operation.
} else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
return isBlockVarRef(uop->getSubExpr());
// Look into the base of a field access.
} else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
return isBlockVarRef(mem->getBase());
// Look into the base of a subscript.
} else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
return isBlockVarRef(sub->getBase());
}
return false;
}
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// For an assignment to work, the value on the right has
// to be compatible with the value on the left.
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType())
&& "Invalid assignment");
// If the LHS might be a __block variable, and the RHS can
// potentially cause a block copy, we need to evaluate the RHS first
// so that the assignment goes the right place.
// This is pretty semantically fragile.
if (isBlockVarRef(E->getLHS()) &&
E->getRHS()->HasSideEffects(CGF.getContext())) {
// Ensure that we have a destination, and evaluate the RHS into that.
EnsureDest(E->getRHS()->getType());
Visit(E->getRHS());
// Now emit the LHS and copy into it.
LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
// That copy is an atomic copy if the LHS is atomic.
if (LHS.getType()->isAtomicType() ||
CGF.LValueIsSuitableForInlineAtomic(LHS)) {
CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
return;
}
EmitCopy(E->getLHS()->getType(),
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased),
Dest);
return;
}
LValue LHS = CGF.EmitLValue(E->getLHS());
// If we have an atomic type, evaluate into the destination and then
// do an atomic copy.
if (LHS.getType()->isAtomicType() ||
CGF.LValueIsSuitableForInlineAtomic(LHS)) {
EnsureDest(E->getRHS()->getType());
Visit(E->getRHS());
CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
return;
}
// Codegen the RHS so that it stores directly into the LHS.
AggValueSlot LHSSlot =
AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased);
// A non-volatile aggregate destination might have volatile member.
if (!LHSSlot.isVolatile() &&
CGF.hasVolatileMember(E->getLHS()->getType()))
LHSSlot.setVolatile(true);
CGF.EmitAggExpr(E->getRHS(), LHSSlot);
// Copy into the destination if the assignment isn't ignored.
EmitFinalDestCopy(E->getType(), LHS);
}
void AggExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
// Bind the common expression if necessary.
CodeGenFunction::OpaqueValueMapping binding(CGF, E);
CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
CGF.getProfileCount(E));
// Save whether the destination's lifetime is externally managed.
bool isExternallyDestructed = Dest.isExternallyDestructed();
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
CGF.incrementProfileCounter(E);
Visit(E->getTrueExpr());
eval.end(CGF);
assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
CGF.Builder.CreateBr(ContBlock);
// If the result of an agg expression is unused, then the emission
// of the LHS might need to create a destination slot. That's fine
// with us, and we can safely emit the RHS into the same slot, but
// we shouldn't claim that it's already being destructed.
Dest.setExternallyDestructed(isExternallyDestructed);
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
Visit(E->getFalseExpr());
eval.end(CGF);
CGF.EmitBlock(ContBlock);
}
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
Visit(CE->getChosenSubExpr());
}
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
if (!ArgPtr) {
// If EmitVAArg fails, we fall back to the LLVM instruction.
llvm::Value *Val =
Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType()));
if (!Dest.isIgnored())
Builder.CreateStore(Val, Dest.getAddr());
return;
}
EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
// Ensure that we have a slot, but if we already do, remember
// whether it was externally destructed.
bool wasExternallyDestructed = Dest.isExternallyDestructed();
EnsureDest(E->getType());
// We're going to push a destructor if there isn't already one.
Dest.setExternallyDestructed();
Visit(E->getSubExpr());
// Push that destructor we promised.
if (!wasExternallyDestructed)
CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
}
void
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
AggValueSlot Slot = EnsureSlot(E->getType());
CGF.EmitCXXConstructExpr(E, Slot);
}
void
AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
AggValueSlot Slot = EnsureSlot(E->getType());
CGF.EmitLambdaExpr(E, Slot);
}
void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope cleanups(CGF);
Visit(E->getSubExpr());
}
void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
}
void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
QualType T = E->getType();
AggValueSlot Slot = EnsureSlot(T);
EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
}
/// isSimpleZero - If emitting this value will obviously just cause a store of
/// zero to memory, return true. This can return false if uncertain, so it just
/// handles simple cases.
static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
E = E->IgnoreParens();
// 0
if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
return IL->getValue() == 0;
// +0.0
if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
return FL->getValue().isPosZero();
// int()
if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
CGF.getTypes().isZeroInitializable(E->getType()))
return true;
// (int*)0 - Null pointer expressions.
if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
return ICE->getCastKind() == CK_NullToPointer;
// '\0'
if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
return CL->getValue() == 0;
// Otherwise, hard case: conservatively return false.
return false;
}
void
AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
QualType type = LV.getType();
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
// Storing "i32 0" to a zero'd memory location is a noop.
return;
} else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
return EmitNullInitializationToLValue(LV);
} else if (isa<NoInitExpr>(E)) {
// Do nothing.
return;
} else if (type->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E);
return CGF.EmitStoreThroughLValue(RV, LV);
}
switch (CGF.getEvaluationKind(type)) {
case TEK_Complex:
CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
return;
case TEK_Aggregate:
CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
Dest.isZeroed()));
return;
case TEK_Scalar:
// HLSL Change Begins.
if (hlsl::IsHLSLMatType(LV.getType())) {
llvm::Value *V = CGF.EmitScalarExpr(E);
llvm::Value *Ptr = LV.getAddress();
CGF.CGM.getHLSLRuntime().EmitHLSLMatrixStore(CGF, V, Ptr, LV.getType());
} else
// HLSL Change Ends.
if (LV.isSimple()) {
CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
} else {
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
}
return;
}
llvm_unreachable("bad evaluation kind");
}
void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
QualType type = lv.getType();
// If the destination slot is already zeroed out before the aggregate is
// copied into it, we don't have to emit any zeros here.
if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
return;
if (CGF.hasScalarEvaluationKind(type)) {
// For non-aggregates, we can store the appropriate null constant.
llvm::Value *null = CGF.CGM.EmitNullConstant(type);
// Note that the following is not equivalent to
// EmitStoreThroughBitfieldLValue for ARC types.
if (lv.isBitField()) {
CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
} else {
assert(lv.isSimple());
CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
}
} else {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
}
}
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
// FIXME: Assess perf here? Figure out what cases are worth optimizing here
// (Length of globals? Chunks of zeroed-out space?).
//
// If we can, prefer a copy from a global; this is a lot less code for long
// globals, and it's easier for the current optimizers to analyze.
if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
llvm::GlobalVariable* GV =
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage, C, "");
EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
return;
}
#endif
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
AggValueSlot Dest = EnsureSlot(E->getType());
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
Dest.getAlignment());
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
if (E->isStringLiteralInit())
return Visit(E->getInit(0));
QualType elementType =
CGF.getContext().getAsArrayType(E->getType())->getElementType();
llvm::PointerType *APType =
cast<llvm::PointerType>(Dest.getAddr()->getType());
llvm::ArrayType *AType =
cast<llvm::ArrayType>(APType->getElementType());
EmitArrayInit(Dest.getAddr(), AType, elementType, E);
return;
}
if (E->getType()->isAtomicType()) {
// An _Atomic(T) object can be list-initialized from an expression
// of the same type.
assert(E->getNumInits() == 1 &&
CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
E->getType()) &&
"unexpected list initialization for atomic object");
return Visit(E->getInit(0));
}
assert(E->getType()->isRecordType() && "Only support structs/unions here!");
// Do struct initialization; this code just sets each individual member
// to the approprate value. This makes bitfield support automatic;
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned NumInitElements = E->getNumInits();
RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
// Prepare a 'this' for CXXDefaultInitExprs.
CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
if (!E->getInitializedFieldInUnion()) {
// Empty union; we have nothing to do.
#ifndef NDEBUG
// Make sure that it's really an empty and not a failure of
// semantic analysis.
for (const auto *Field : record->fields())
assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
#endif
return;
}
// FIXME: volatility
FieldDecl *Field = E->getInitializedFieldInUnion();
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
if (NumInitElements) {
// Store the initializer into the field
EmitInitializationToLValue(E->getInit(0), FieldLoc);
} else {
// Default-initialize to null.
EmitNullInitializationToLValue(FieldLoc);
}
return;
}
// HLSL Change Begins
if (CGF.getLangOpts().HLSL &&
!CGF.CGM.getHLSLRuntime().IsTrivalInitListExpr(CGF, E)) {
CGF.CGM.getHLSLRuntime().EmitHLSLInitListExpr(CGF, E, Dest.getAddr());
return;
}
// HLSL Change Ends
// We'll need to enter cleanup scopes in case any of the member
// initializers throw an exception.
SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
llvm::Instruction *cleanupDominator = nullptr;
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
unsigned curInitIndex = 0;
for (const auto *field : record->fields()) {
// We're done once we hit the flexible array member.
if (field->getType()->isIncompleteArrayType())
break;
// Always skip anonymous bitfields.
if (field->isUnnamedBitfield())
continue;
// We're done if we reach the end of the explicit initializers, we
// have a zeroed object, and the rest of the fields are
// zero-initializable.
if (curInitIndex == NumInitElements && Dest.isZeroed() &&
CGF.getTypes().isZeroInitializable(E->getType()))
break;
LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
// We never generate write-barries for initialized fields.
LV.setNonGC(true);
if (curInitIndex < NumInitElements) {
// Store the initializer into the field.
EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
} else {
// We're out of initalizers; default-initialize to null
EmitNullInitializationToLValue(LV);
}
// Push a destructor if necessary.
// FIXME: if we have an array of structures, all explicitly
// initialized, we can end up pushing a linear number of cleanups.
bool pushedCleanup = false;
if (QualType::DestructionKind dtorKind
= field->getType().isDestructedType()) {
assert(LV.isSimple());
if (CGF.needsEHCleanup(dtorKind)) {
if (!cleanupDominator)
cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
CGF.getDestroyer(dtorKind), false);
cleanups.push_back(CGF.EHStack.stable_begin());
pushedCleanup = true;
}
}
// If the GEP didn't get used because of a dead zero init or something
// else, clean it up for -O0 builds and general tidiness.
if (!pushedCleanup && LV.isSimple())
if (llvm::GetElementPtrInst *GEP =
dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
if (GEP->use_empty())
GEP->eraseFromParent();
}
// Deactivate all the partial cleanups in reverse order, which
// generally means popping them.
for (unsigned i = cleanups.size(); i != 0; --i)
CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
// Destroy the placeholder if we made one.
if (cleanupDominator)
cleanupDominator->eraseFromParent();
}
void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
AggValueSlot Dest = EnsureSlot(E->getType());
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
Dest.getAlignment());
EmitInitializationToLValue(E->getBase(), DestLV);
VisitInitListExpr(E->getUpdater());
}
//===----------------------------------------------------------------------===//
// Entry Points into this File
//===----------------------------------------------------------------------===//
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
/// non-zero bytes that will be stored when outputting the initializer for the
/// specified initializer expression.
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
E = E->IgnoreParens();
// 0 and 0.0 won't require any non-zero stores!
if (isSimpleZero(E, CGF)) return CharUnits::Zero();
// If this is an initlist expr, sum up the size of sizes of the (present)
// elements. If this is something weird, assume the whole thing is non-zero.
const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
return CGF.getContext().getTypeSizeInChars(E->getType());
// InitListExprs for structs have to be handled carefully. If there are
// reference members, we need to consider the size of the reference, not the
// referencee. InitListExprs for unions and arrays can't have references.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
if (!RT->isUnionType()) {
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
CharUnits NumNonZeroBytes = CharUnits::Zero();
unsigned ILEElement = 0;
for (const auto *Field : SD->fields()) {
// We're done once we hit the flexible array member or run out of
// InitListExpr elements.
if (Field->getType()->isIncompleteArrayType() ||
ILEElement == ILE->getNumInits())
break;
if (Field->isUnnamedBitfield())
continue;
const Expr *E = ILE->getInit(ILEElement++);
// Reference values are always non-null and have the width of a pointer.
if (Field->getType()->isReferenceType())
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
CGF.getTarget().getPointerWidth(0));
else
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
}
return NumNonZeroBytes;
}
}
CharUnits NumNonZeroBytes = CharUnits::Zero();
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
return NumNonZeroBytes;
}
/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
/// zeros in it, emit a memset and avoid storing the individual zeros.
///
static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
CodeGenFunction &CGF) {
// If the slot is already known to be zeroed, nothing to do. Don't mess with
// volatile stores.
if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr)
return;
// C++ objects with a user-declared constructor don't need zero'ing.
if (CGF.getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(E->getType())->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (RD->hasUserDeclaredConstructor())
return;
}
// HLSL Change Begins
// Don't generate memset for hlsl.
if (CGF.getLangOpts().HLSL)
return;
// HLSL Change Ends
// If the type is 16-bytes or smaller, prefer individual stores over memset.
std::pair<CharUnits, CharUnits> TypeInfo =
CGF.getContext().getTypeInfoInChars(E->getType());
if (TypeInfo.first <= CharUnits::fromQuantity(16))
return;
// Check to see if over 3/4 of the initializer are known to be zero. If so,
// we prefer to emit memset + individual stores for the rest.
CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
if (NumNonZeroBytes*4 > TypeInfo.first)
return;
// Okay, it seems like a good idea to use an initial memset, emit the call.
llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
CharUnits Align = TypeInfo.second;
llvm::Value *Loc = Slot.getAddr();
Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
Align.getQuantity(), false);
// Tell the AggExprEmitter that the slot is known zero.
Slot.setZeroed();
}
/// EmitAggExpr - Emit the computation of the specified expression of aggregate
/// type. The result is computed into DestPtr. Note that if DestPtr is null,
/// the value of the aggregate expression is not needed. If VolatileDest is
/// true, DestPtr cannot be 0.
void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
assert(E && hasAggregateEvaluationKind(E->getType()) &&
"Invalid aggregate expression to emit");
assert((Slot.getAddr() != nullptr || Slot.isIgnored()) &&
"slot has bits but no address");
// Optimize the slot if possible.
CheckAggExprForMemSetUse(Slot, E, *this);
AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
}
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
CGM.getHLSLRuntime().MarkPotentialResourceTemp(*this, Temp, E->getType());
EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
return LV;
}
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile,
CharUnits alignment,
bool isAssignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
Record->hasTrivialCopyAssignment() ||
Record->hasTrivialMoveConstructor() ||
Record->hasTrivialMoveAssignment() ||
Record->isUnion()) &&
"Trying to aggregate-copy a type without a trivial copy/move "
"constructor or assignment operator");
// Ignore empty classes in C++.
if (Record->isEmpty())
return;
}
}
// HLSL Change Begins
if (getLangOpts().HLSL) {
// Don't generate memcpy for hlsl.
CGM.getHLSLRuntime().EmitHLSLAggregateCopy(*this, SrcPtr, DestPtr, Ty);
return;
}
// HLSL Change Ends
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
// object, then the overlap shall be exact and the two objects shall have
// qualified or unqualified versions of a compatible type."
//
// memcpy is not defined if the source and destination pointers are exactly
// equal, but other compilers do this optimization, and almost every memcpy
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
// Get data size and alignment info for this aggregate. If this is an
// assignment don't copy the tail padding. Otherwise copying it is fine.
std::pair<CharUnits, CharUnits> TypeInfo;
if (isAssignment)
TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
else
TypeInfo = getContext().getTypeInfoInChars(Ty);
if (alignment.isZero())
alignment = TypeInfo.second;
llvm::Value *SizeVal = nullptr;
if (TypeInfo.first.isZero()) {
// But note that getTypeInfo returns 0 for a VLA.
if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
getContext().getAsArrayType(Ty))) {
QualType BaseEltTy;
SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
std::pair<CharUnits, CharUnits> LastElementTypeInfo;
if (!isAssignment)
LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
assert(!TypeInfo.first.isZero());
SizeVal = Builder.CreateNUWMul(
SizeVal,
llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
if (!isAssignment) {
SizeVal = Builder.CreateNUWSub(
SizeVal,
llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
SizeVal = Builder.CreateNUWAdd(
SizeVal, llvm::ConstantInt::get(
SizeTy, LastElementTypeInfo.first.getQuantity()));
}
}
}
if (!SizeVal) {
SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
}
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
//
// volatile struct { int i; } a, b;
//
// int main() {
// a = b;
// a = b;
// }
//
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
llvm::Type *DBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
DestPtr = Builder.CreateBitCast(DestPtr, DBP);
llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
llvm::Type *SBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
// Don't do any of the memmove_collectable tests if GC isn't set.
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
}
} else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
}
}
}
// Determine the metadata to describe the position of any padding in this
// memcpy, as well as the TBAA tags for the members of the struct, in case
// the optimizer wishes to expand it in to scalar memory operations.
llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, alignment.getQuantity(),
isVolatile, /*TBAATag=*/nullptr, TBAAStructTag);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp | //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CGCleanup.h"
#include "clang/AST/Decl.h"
#include "clang/AST/StmtOpenMP.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
using namespace clang;
using namespace CodeGen;
// HLSL Change Starts
// No OpenMP codegen support, so simply skip all of this compilation.
// Here are enough stubs to link the current targets.
#if 0
// HLSL Change Ends
namespace {
/// \brief Base class for handling code generation inside OpenMP regions.
class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
public:
/// \brief Kinds of OpenMP regions used in codegen.
enum CGOpenMPRegionKind {
/// \brief Region with outlined function for standalone 'parallel'
/// directive.
ParallelOutlinedRegion,
/// \brief Region with outlined function for standalone 'task' directive.
TaskOutlinedRegion,
/// \brief Region for constructs that do not require function outlining,
/// like 'for', 'sections', 'atomic' etc. directives.
InlinedRegion,
};
CGOpenMPRegionInfo(const CapturedStmt &CS,
const CGOpenMPRegionKind RegionKind,
const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind)
: CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind),
CodeGen(CodeGen), Kind(Kind) {}
CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind,
const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind)
: CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen),
Kind(Kind) {}
/// \brief Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
virtual const VarDecl *getThreadIDVariable() const = 0;
/// \brief Emit the captured statement body.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) override;
/// \brief Get an LValue for the current ThreadID variable.
/// \return LValue for thread id variable. This LValue always has type int32*.
virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
CGOpenMPRegionKind getRegionKind() const { return RegionKind; }
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const CGCapturedStmtInfo *Info) {
return Info->getKind() == CR_OpenMP;
}
protected:
CGOpenMPRegionKind RegionKind;
const RegionCodeGenTy &CodeGen;
OpenMPDirectiveKind Kind;
};
/// \brief API for captured statement code generation in OpenMP constructs.
class CGOpenMPOutlinedRegionInfo : public CGOpenMPRegionInfo {
public:
CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar,
const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind)
: CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind),
ThreadIDVar(ThreadIDVar) {
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
/// \brief Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
/// \brief Get the name of the capture helper.
StringRef getHelperName() const override { return ".omp_outlined."; }
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
ParallelOutlinedRegion;
}
private:
/// \brief A variable or parameter storing global thread id for OpenMP
/// constructs.
const VarDecl *ThreadIDVar;
};
/// \brief API for captured statement code generation in OpenMP constructs.
class CGOpenMPTaskOutlinedRegionInfo : public CGOpenMPRegionInfo {
public:
CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS,
const VarDecl *ThreadIDVar,
const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind)
: CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind),
ThreadIDVar(ThreadIDVar) {
assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
}
/// \brief Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; }
/// \brief Get an LValue for the current ThreadID variable.
LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override;
/// \brief Get the name of the capture helper.
StringRef getHelperName() const override { return ".omp_outlined."; }
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() ==
TaskOutlinedRegion;
}
private:
/// \brief A variable or parameter storing global thread id for OpenMP
/// constructs.
const VarDecl *ThreadIDVar;
};
/// \brief API for inlined captured statement code generation in OpenMP
/// constructs.
class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
public:
CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI,
const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind)
: CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind), OldCSI(OldCSI),
OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {}
// \brief Retrieve the value of the context parameter.
llvm::Value *getContextValue() const override {
if (OuterRegionInfo)
return OuterRegionInfo->getContextValue();
llvm_unreachable("No context value for inlined OpenMP region");
}
virtual void setContextValue(llvm::Value *V) override {
if (OuterRegionInfo) {
OuterRegionInfo->setContextValue(V);
return;
}
llvm_unreachable("No context value for inlined OpenMP region");
}
/// \brief Lookup the captured field decl for a variable.
const FieldDecl *lookup(const VarDecl *VD) const override {
if (OuterRegionInfo)
return OuterRegionInfo->lookup(VD);
// If there is no outer outlined region,no need to lookup in a list of
// captured variables, we can use the original one.
return nullptr;
}
FieldDecl *getThisFieldDecl() const override {
if (OuterRegionInfo)
return OuterRegionInfo->getThisFieldDecl();
return nullptr;
}
/// \brief Get a variable or parameter for storing global thread id
/// inside OpenMP construct.
const VarDecl *getThreadIDVariable() const override {
if (OuterRegionInfo)
return OuterRegionInfo->getThreadIDVariable();
return nullptr;
}
/// \brief Get the name of the capture helper.
StringRef getHelperName() const override {
if (auto *OuterRegionInfo = getOldCSI())
return OuterRegionInfo->getHelperName();
llvm_unreachable("No helper name for inlined OpenMP construct");
}
CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; }
static bool classof(const CGCapturedStmtInfo *Info) {
return CGOpenMPRegionInfo::classof(Info) &&
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion;
}
private:
/// \brief CodeGen info about outer OpenMP region.
CodeGenFunction::CGCapturedStmtInfo *OldCSI;
CGOpenMPRegionInfo *OuterRegionInfo;
};
/// \brief RAII for emitting code of OpenMP constructs.
class InlinedOpenMPRegionRAII {
CodeGenFunction &CGF;
public:
/// \brief Constructs region for combined constructs.
/// \param CodeGen Code generation sequence for combined directives. Includes
/// a list of functions used for code generation of implicitly inlined
/// regions.
InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
OpenMPDirectiveKind Kind)
: CGF(CGF) {
// Start emission for the construct.
CGF.CapturedStmtInfo =
new CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, CodeGen, Kind);
}
~InlinedOpenMPRegionRAII() {
// Restore original CapturedStmtInfo only if we're done with code emission.
auto *OldCSI =
cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
delete CGF.CapturedStmtInfo;
CGF.CapturedStmtInfo = OldCSI;
}
};
} // namespace
LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
return CGF.MakeNaturalAlignAddrLValue(
CGF.Builder.CreateAlignedLoad(
CGF.GetAddrOfLocalVar(getThreadIDVariable()),
CGF.PointerAlignInBytes),
getThreadIDVariable()
->getType()
->castAs<PointerType>()
->getPointeeType());
}
void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CGF.EHStack.pushTerminate();
{
CodeGenFunction::RunCleanupsScope Scope(CGF);
CodeGen(CGF);
}
CGF.EHStack.popTerminate();
}
LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
CodeGenFunction &CGF) {
return CGF.MakeNaturalAlignAddrLValue(
CGF.GetAddrOfLocalVar(getThreadIDVariable()),
getThreadIDVariable()->getType());
}
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
: CGM(CGM), DefaultOpenMPPSource(nullptr), KmpRoutineEntryPtrTy(nullptr) {
IdentTy = llvm::StructType::create(
"ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
CGM.Int8PtrTy /* psource */, nullptr);
// Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
llvm::PointerType::getUnqual(CGM.Int32Ty)};
Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
}
void CGOpenMPRuntime::clear() {
InternalVars.clear();
}
llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
assert(ThreadIDVar->getType()->isPointerType() &&
"thread id variable must be of type kmp_int32 *");
const CapturedStmt *CS = cast<CapturedStmt>(D.getAssociatedStmt());
CodeGenFunction CGF(CGM, true);
CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
return CGF.GenerateCapturedStmtFunction(*CS);
}
llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
assert(!ThreadIDVar->getType()->isPointerType() &&
"thread id variable must be of type kmp_int32 for tasks");
auto *CS = cast<CapturedStmt>(D.getAssociatedStmt());
CodeGenFunction CGF(CGM, true);
CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen,
InnermostKind);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
return CGF.GenerateCapturedStmtFunction(*CS);
}
llvm::Value *
CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) {
llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags);
if (!Entry) {
if (!DefaultOpenMPPSource) {
// Initialize default location for psource field of ident_t structure of
// all ident_t objects. Format is ";file;function;line;column;;".
// Taken from
// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c
DefaultOpenMPPSource =
CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;");
DefaultOpenMPPSource =
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
auto DefaultOpenMPLocation = new llvm::GlobalVariable(
CGM.getModule(), IdentTy, /*isConstant*/ true,
llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
DefaultOpenMPLocation->setUnnamedAddr(true);
llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
llvm::Constant *Values[] = {Zero,
llvm::ConstantInt::get(CGM.Int32Ty, Flags),
Zero, Zero, DefaultOpenMPPSource};
llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
DefaultOpenMPLocation->setInitializer(Init);
OpenMPDefaultLocMap[Flags] = DefaultOpenMPLocation;
return DefaultOpenMPLocation;
}
return Entry;
}
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPLocationFlags Flags) {
// If no debug info is generated - return global default location.
if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::NoDebugInfo ||
Loc.isInvalid())
return getOrCreateDefaultLocation(Flags);
assert(CGF.CurFn && "No function in current CodeGenFunction.");
llvm::Value *LocValue = nullptr;
auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
if (I != OpenMPLocThreadIDMap.end())
LocValue = I->second.DebugLoc;
// OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
// GetOpenMPThreadID was called before this routine.
if (LocValue == nullptr) {
// Generate "ident_t .kmpc_loc.addr;"
llvm::AllocaInst *AI = CGF.CreateTempAlloca(IdentTy, ".kmpc_loc.addr");
AI->setAlignment(CGM.getDataLayout().getPrefTypeAlignment(IdentTy));
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.DebugLoc = AI;
LocValue = AI;
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags),
llvm::ConstantExpr::getSizeOf(IdentTy),
CGM.PointerAlignInBytes);
}
// char **psource = &.kmpc_loc_<flags>.addr.psource;
auto *PSource = CGF.Builder.CreateConstInBoundsGEP2_32(IdentTy, LocValue, 0,
IdentField_PSource);
auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
if (OMPDebugLoc == nullptr) {
SmallString<128> Buffer2;
llvm::raw_svector_ostream OS2(Buffer2);
// Build debug location
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
OS2 << ";" << PLoc.getFilename() << ";";
if (const FunctionDecl *FD =
dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) {
OS2 << FD->getQualifiedNameAsString();
}
OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;";
OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str());
OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc;
}
// *psource = ";<File>;<Function>;<Line>;<Column>;;";
CGF.Builder.CreateStore(OMPDebugLoc, PSource);
return LocValue;
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
SourceLocation Loc) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
llvm::Value *ThreadID = nullptr;
// Check whether we've already cached a load of the thread id in this
// function.
auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
if (I != OpenMPLocThreadIDMap.end()) {
ThreadID = I->second.ThreadID;
if (ThreadID != nullptr)
return ThreadID;
}
if (auto OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
if (OMPRegionInfo->getThreadIDVariable()) {
// Check if this an outlined function with thread id passed as argument.
auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal();
// If value loaded in entry block, cache it and use it everywhere in
// function.
if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.ThreadID = ThreadID;
}
return ThreadID;
}
}
// This is not an outlined function region - need to call __kmpc_int32
// kmpc_global_thread_num(ident_t *loc).
// Generate thread id value and cache this value for use across the
// function.
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
ThreadID =
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
emitUpdateLocation(CGF, Loc));
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
Elem.second.ThreadID = ThreadID;
return ThreadID;
}
void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
if (OpenMPLocThreadIDMap.count(CGF.CurFn))
OpenMPLocThreadIDMap.erase(CGF.CurFn);
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
return llvm::PointerType::getUnqual(IdentTy);
}
llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
return llvm::PointerType::getUnqual(Kmpc_MicroTy);
}
llvm::Constant *
CGOpenMPRuntime::createRuntimeFunction(OpenMPRTLFunction Function) {
llvm::Constant *RTLFn = nullptr;
switch (Function) {
case OMPRTL__kmpc_fork_call: {
// Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
// microtask, ...);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
getKmpc_MicroPointerTy()};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
break;
}
case OMPRTL__kmpc_global_thread_num: {
// Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
break;
}
case OMPRTL__kmpc_threadprivate_cached: {
// Build void *__kmpc_threadprivate_cached(ident_t *loc,
// kmp_int32 global_tid, void *data, size_t size, void ***cache);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy, CGM.SizeTy,
CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
break;
}
case OMPRTL__kmpc_critical: {
// Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *crit);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
break;
}
case OMPRTL__kmpc_threadprivate_register: {
// Build void __kmpc_threadprivate_register(ident_t *, void *data,
// kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
// typedef void *(*kmpc_ctor)(void *);
auto KmpcCtorTy =
llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
/*isVarArg*/ false)->getPointerTo();
// typedef void *(*kmpc_cctor)(void *, void *);
llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
auto KmpcCopyCtorTy =
llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
/*isVarArg*/ false)->getPointerTo();
// typedef void (*kmpc_dtor)(void *);
auto KmpcDtorTy =
llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
->getPointerTo();
llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
KmpcCopyCtorTy, KmpcDtorTy};
auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
/*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
break;
}
case OMPRTL__kmpc_end_critical: {
// Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *crit);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
break;
}
case OMPRTL__kmpc_cancel_barrier: {
// Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
break;
}
case OMPRTL__kmpc_barrier: {
// Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
break;
}
case OMPRTL__kmpc_for_static_fini: {
// Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
break;
}
case OMPRTL__kmpc_push_num_threads: {
// Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 num_threads)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
break;
}
case OMPRTL__kmpc_serialized_parallel: {
// Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
break;
}
case OMPRTL__kmpc_end_serialized_parallel: {
// Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
break;
}
case OMPRTL__kmpc_flush: {
// Build void __kmpc_flush(ident_t *loc);
llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
break;
}
case OMPRTL__kmpc_master: {
// Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
break;
}
case OMPRTL__kmpc_end_master: {
// Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
break;
}
case OMPRTL__kmpc_omp_taskyield: {
// Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
// int end_part);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield");
break;
}
case OMPRTL__kmpc_single: {
// Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single");
break;
}
case OMPRTL__kmpc_end_single: {
// Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single");
break;
}
case OMPRTL__kmpc_omp_task_alloc: {
// Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
// kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
// kmp_routine_entry_t *task_entry);
assert(KmpRoutineEntryPtrTy != nullptr &&
"Type kmp_routine_entry_t must be created.");
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty,
CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy};
// Return void * and then cast to particular kmp_task_t type.
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc");
break;
}
case OMPRTL__kmpc_omp_task: {
// Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
// *new_task);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task");
break;
}
case OMPRTL__kmpc_copyprivate: {
// Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
// size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
// kmp_int32 didit);
llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
auto *CpyFnTy =
llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy,
CGM.VoidPtrTy, CpyFnTy->getPointerTo(),
CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate");
break;
}
case OMPRTL__kmpc_reduce: {
// Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
// (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
/*isVarArg=*/false);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce");
break;
}
case OMPRTL__kmpc_reduce_nowait: {
// Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
// global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
// void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
// *lck);
llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams,
/*isVarArg=*/false);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy,
CGM.VoidPtrTy, ReduceFnTy->getPointerTo(),
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait");
break;
}
case OMPRTL__kmpc_end_reduce: {
// Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *lck);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce");
break;
}
case OMPRTL__kmpc_end_reduce_nowait: {
// Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *lck);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty,
llvm::PointerType::getUnqual(KmpCriticalNameTy)};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait");
break;
}
case OMPRTL__kmpc_omp_task_begin_if0: {
// Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
// *new_task);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0");
break;
}
case OMPRTL__kmpc_omp_task_complete_if0: {
// Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
// *new_task);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.VoidPtrTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy,
/*Name=*/"__kmpc_omp_task_complete_if0");
break;
}
case OMPRTL__kmpc_ordered: {
// Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered");
break;
}
case OMPRTL__kmpc_end_ordered: {
// Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered");
break;
}
case OMPRTL__kmpc_omp_taskwait: {
// Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait");
break;
}
case OMPRTL__kmpc_taskgroup: {
// Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup");
break;
}
case OMPRTL__kmpc_end_taskgroup: {
// Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup");
break;
}
case OMPRTL__kmpc_push_proc_bind: {
// Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
// int proc_bind)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind");
break;
}
case OMPRTL__kmpc_omp_task_with_deps: {
// Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
// kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
// kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty,
CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
RTLFn =
CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps");
break;
}
case OMPRTL__kmpc_omp_wait_deps: {
// Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
// kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
// kmp_depend_info_t *noalias_dep_list);
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
CGM.Int32Ty, CGM.VoidPtrTy,
CGM.Int32Ty, CGM.VoidPtrTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps");
break;
}
case OMPRTL__kmpc_cancellationpoint: {
// Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
// global_tid, kmp_int32 cncl_kind)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint");
break;
}
case OMPRTL__kmpc_cancel: {
// Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind)
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel");
break;
}
}
return RTLFn;
}
llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
: "__kmpc_for_static_init_4u")
: (IVSigned ? "__kmpc_for_static_init_8"
: "__kmpc_for_static_init_8u");
auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
auto PtrTy = llvm::PointerType::getUnqual(ITy);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
CGM.Int32Ty, // schedtype
llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
PtrTy, // p_lower
PtrTy, // p_upper
PtrTy, // p_stride
ITy, // incr
ITy // chunk
};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
auto Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
: (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
CGM.Int32Ty, // schedtype
ITy, // lower
ITy, // upper
ITy, // stride
ITy // chunk
};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
auto Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
: (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize,
bool IVSigned) {
assert((IVSize == 32 || IVSize == 64) &&
"IV size is not compatible with the omp runtime");
auto Name =
IVSize == 32
? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
: (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
auto PtrTy = llvm::PointerType::getUnqual(ITy);
llvm::Type *TypeParams[] = {
getIdentTyPointerTy(), // loc
CGM.Int32Ty, // tid
llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
PtrTy, // p_lower
PtrTy, // p_upper
PtrTy // p_stride
};
llvm::FunctionType *FnTy =
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
return CGM.CreateRuntimeFunction(FnTy, Name);
}
llvm::Constant *
CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
assert(!CGM.getLangOpts().OpenMPUseTLS ||
!CGM.getContext().getTargetInfo().isTLSSupported());
// Lookup the entry, lazily creating it if necessary.
return getOrCreateInternalVariable(CGM.Int8PtrPtrTy,
Twine(CGM.getMangledName(VD)) + ".cache.");
}
llvm::Value *CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
llvm::Value *VDAddr,
SourceLocation Loc) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
return VDAddr;
auto VarTy = VDAddr->getType()->getPointerElementType();
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.CreatePointerCast(VDAddr, CGM.Int8PtrTy),
CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
getOrCreateThreadPrivateCache(VD)};
return CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args);
}
void CGOpenMPRuntime::emitThreadPrivateVarInit(
CodeGenFunction &CGF, llvm::Value *VDAddr, llvm::Value *Ctor,
llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
// Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
// library.
auto OMPLoc = emitUpdateLocation(CGF, Loc);
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num),
OMPLoc);
// Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
// to register constructor/destructor for variable.
llvm::Value *Args[] = {OMPLoc,
CGF.Builder.CreatePointerCast(VDAddr, CGM.VoidPtrTy),
Ctor, CopyCtor, Dtor};
CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
}
llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
const VarDecl *VD, llvm::Value *VDAddr, SourceLocation Loc,
bool PerformInit, CodeGenFunction *CGF) {
if (CGM.getLangOpts().OpenMPUseTLS &&
CGM.getContext().getTargetInfo().isTLSSupported())
return nullptr;
VD = VD->getDefinition(CGM.getContext());
if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
ThreadPrivateWithDefinition.insert(VD);
QualType ASTTy = VD->getType();
llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
auto Init = VD->getAnyInitializer();
if (CGM.getLangOpts().CPlusPlus && PerformInit) {
// Generate function that re-emits the declaration's initializer into the
// threadprivate copy of the variable VD
CodeGenFunction CtorCGF(CGM);
FunctionArgList Args;
ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
/*Id=*/nullptr, CGM.getContext().VoidPtrTy);
Args.push_back(&Dst);
auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
CGM.getContext().VoidPtrTy, Args, FunctionType::ExtInfo(),
/*isVariadic=*/false);
auto FTy = CGM.getTypes().GetFunctionType(FI);
auto Fn = CGM.CreateGlobalInitOrDestructFunction(
FTy, ".__kmpc_global_ctor_.", Loc);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
Args, SourceLocation());
auto ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst),
/*Volatile=*/false, CGM.PointerAlignInBytes,
CGM.getContext().VoidPtrTy, Dst.getLocation());
auto Arg = CtorCGF.Builder.CreatePointerCast(
ArgVal,
CtorCGF.ConvertTypeForMem(CGM.getContext().getPointerType(ASTTy)));
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
/*IsInitializer=*/true);
ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst),
/*Volatile=*/false, CGM.PointerAlignInBytes,
CGM.getContext().VoidPtrTy, Dst.getLocation());
CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
CtorCGF.FinishFunction();
Ctor = Fn;
}
if (VD->getType().isDestructedType() != QualType::DK_none) {
// Generate function that emits destructor call for the threadprivate copy
// of the variable VD
CodeGenFunction DtorCGF(CGM);
FunctionArgList Args;
ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
/*Id=*/nullptr, CGM.getContext().VoidPtrTy);
Args.push_back(&Dst);
auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
CGM.getContext().VoidTy, Args, FunctionType::ExtInfo(),
/*isVariadic=*/false);
auto FTy = CGM.getTypes().GetFunctionType(FI);
auto Fn = CGM.CreateGlobalInitOrDestructFunction(
FTy, ".__kmpc_global_dtor_.", Loc);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
SourceLocation());
auto ArgVal = DtorCGF.EmitLoadOfScalar(
DtorCGF.GetAddrOfLocalVar(&Dst),
/*Volatile=*/false, CGM.PointerAlignInBytes,
CGM.getContext().VoidPtrTy, Dst.getLocation());
DtorCGF.emitDestroy(ArgVal, ASTTy,
DtorCGF.getDestroyer(ASTTy.isDestructedType()),
DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
Dtor = Fn;
}
// Do not emit init function if it is not required.
if (!Ctor && !Dtor)
return nullptr;
llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
auto CopyCtorTy =
llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
/*isVarArg=*/false)->getPointerTo();
// Copying constructor for the threadprivate variable.
// Must be NULL - reserved by runtime, but currently it requires that this
// parameter is always NULL. Otherwise it fires assertion.
CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
if (Ctor == nullptr) {
auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
/*isVarArg=*/false)->getPointerTo();
Ctor = llvm::Constant::getNullValue(CtorTy);
}
if (Dtor == nullptr) {
auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
/*isVarArg=*/false)->getPointerTo();
Dtor = llvm::Constant::getNullValue(DtorTy);
}
if (!CGF) {
auto InitFunctionTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
InitFunctionTy, ".__omp_threadprivate_init_.");
CodeGenFunction InitCGF(CGM);
FunctionArgList ArgList;
InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
CGM.getTypes().arrangeNullaryFunction(), ArgList,
Loc);
emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
InitCGF.FinishFunction();
return InitFunction;
}
emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
}
return nullptr;
}
/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
static void emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen) {
CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange());
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
bool CondConstant;
if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
if (CondConstant) {
ThenGen(CGF);
} else {
ElseGen(CGF);
}
return;
}
// Otherwise, the condition did not fold, or we couldn't elide it. Just
// emit the conditional branch.
auto ThenBlock = CGF.createBasicBlock("omp_if.then");
auto ElseBlock = CGF.createBasicBlock("omp_if.else");
auto ContBlock = CGF.createBasicBlock("omp_if.end");
CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0);
// Emit the 'then' code.
CGF.EmitBlock(ThenBlock);
{
CodeGenFunction::RunCleanupsScope ThenScope(CGF);
ThenGen(CGF);
}
CGF.EmitBranch(ContBlock);
// Emit the 'else' code if present.
{
// There is no need to emit line number for unconditional branch.
auto NL = ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBlock(ElseBlock);
}
{
CodeGenFunction::RunCleanupsScope ThenScope(CGF);
ElseGen(CGF);
}
{
// There is no need to emit line number for unconditional branch.
auto NL = ApplyDebugLocation::CreateEmpty(CGF);
CGF.EmitBranch(ContBlock);
}
// Emit the continuation block for code after the if.
CGF.EmitBlock(ContBlock, /*IsFinished=*/true);
}
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
llvm::Value *CapturedStruct,
const Expr *IfCond) {
auto *RTLoc = emitUpdateLocation(CGF, Loc);
auto &&ThenGen =
[this, OutlinedFn, CapturedStruct, RTLoc](CodeGenFunction &CGF) {
// Build call __kmpc_fork_call(loc, 1, microtask,
// captured_struct/*context*/)
llvm::Value *Args[] = {
RTLoc,
CGF.Builder.getInt32(
1), // Number of arguments after 'microtask' argument
// (there is only one additional argument - 'context')
CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy()),
CGF.EmitCastToVoidPtr(CapturedStruct)};
auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_call);
CGF.EmitRuntimeCall(RTLFn, Args);
};
auto &&ElseGen = [this, OutlinedFn, CapturedStruct, RTLoc, Loc](
CodeGenFunction &CGF) {
auto ThreadID = getThreadID(CGF, Loc);
// Build calls:
// __kmpc_serialized_parallel(&Loc, GTid);
llvm::Value *Args[] = {RTLoc, ThreadID};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_serialized_parallel),
Args);
// OutlinedFn(>id, &zero, CapturedStruct);
auto ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
auto Int32Ty = CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32,
/*Signed*/ true);
auto ZeroAddr = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".zero.addr");
CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
llvm::Value *OutlinedFnArgs[] = {ThreadIDAddr, ZeroAddr, CapturedStruct};
CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs);
// __kmpc_end_serialized_parallel(&Loc, GTid);
llvm::Value *EndArgs[] = {emitUpdateLocation(CGF, Loc), ThreadID};
CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel), EndArgs);
};
if (IfCond) {
emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
} else {
CodeGenFunction::RunCleanupsScope Scope(CGF);
ThenGen(CGF);
}
}
// If we're inside an (outlined) parallel region, use the region info's
// thread-ID variable (it is passed in a first argument of the outlined function
// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
// regular serial code region, get thread ID by calling kmp_int32
// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
// return the address of that temp.
llvm::Value *CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
SourceLocation Loc) {
if (auto OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
auto ThreadID = getThreadID(CGF, Loc);
auto Int32Ty =
CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
CGF.EmitStoreOfScalar(ThreadID,
CGF.MakeNaturalAlignAddrLValue(ThreadIDTemp, Int32Ty));
return ThreadIDTemp;
}
llvm::Constant *
CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << Name;
auto RuntimeName = Out.str();
auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
if (Elem.second) {
assert(Elem.second->getType()->getPointerElementType() == Ty &&
"OMP internal variable has different type than requested");
return &*Elem.second;
}
return Elem.second = new llvm::GlobalVariable(
CGM.getModule(), Ty, /*IsConstant*/ false,
llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
Elem.first());
}
llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
llvm::Twine Name(".gomp_critical_user_", CriticalName);
return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
}
namespace {
template <size_t N> class CallEndCleanup : public EHScopeStack::Cleanup {
llvm::Value *Callee;
llvm::Value *Args[N];
public:
CallEndCleanup(llvm::Value *Callee, ArrayRef<llvm::Value *> CleanupArgs)
: Callee(Callee) {
assert(CleanupArgs.size() == N);
std::copy(CleanupArgs.begin(), CleanupArgs.end(), std::begin(Args));
}
void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
CGF.EmitRuntimeCall(Callee, Args);
}
};
} // namespace
void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF,
StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc) {
// __kmpc_critical(ident_t *, gtid, Lock);
// CriticalOpGen();
// __kmpc_end_critical(ident_t *, gtid, Lock);
// Prepare arguments and build a call to __kmpc_critical
{
CodeGenFunction::RunCleanupsScope Scope(CGF);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
getCriticalRegionLock(CriticalName)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_critical), Args);
// Build a call to __kmpc_end_critical
CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_critical),
llvm::makeArrayRef(Args));
emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen);
}
}
static void emitIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond,
OpenMPDirectiveKind Kind,
const RegionCodeGenTy &BodyOpGen) {
llvm::Value *CallBool = CGF.EmitScalarConversion(
IfCond,
CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true),
CGF.getContext().BoolTy);
auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
auto *ContBlock = CGF.createBasicBlock("omp_if.end");
// Generate the branch (If-stmt)
CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
CGF.EmitBlock(ThenBlock);
CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, Kind, BodyOpGen);
// Emit the rest of bblocks/branches
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock, true);
}
void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) {
// if(__kmpc_master(ident_t *, gtid)) {
// MasterOpGen();
// __kmpc_end_master(ident_t *, gtid);
// }
// Prepare arguments and build a call to __kmpc_master
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
auto *IsMaster =
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_master), Args);
typedef CallEndCleanup<std::extent<decltype(Args)>::value>
MasterCallEndCleanup;
emitIfStmt(CGF, IsMaster, OMPD_master, [&](CodeGenFunction &CGF) -> void {
CodeGenFunction::RunCleanupsScope Scope(CGF);
CGF.EHStack.pushCleanup<MasterCallEndCleanup>(
NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_master),
llvm::makeArrayRef(Args));
MasterOpGen(CGF);
});
}
void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
// Build call __kmpc_omp_taskyield(loc, thread_id, 0);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args);
}
void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) {
// __kmpc_taskgroup(ident_t *, gtid);
// TaskgroupOpGen();
// __kmpc_end_taskgroup(ident_t *, gtid);
// Prepare arguments and build a call to __kmpc_taskgroup
{
CodeGenFunction::RunCleanupsScope Scope(CGF);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args);
// Build a call to __kmpc_end_taskgroup
CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_taskgroup),
llvm::makeArrayRef(Args));
emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen);
}
}
static llvm::Value *emitCopyprivateCopyFunction(
CodeGenModule &CGM, llvm::Type *ArgsType,
ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) {
auto &C = CGM.getContext();
// void copy_func(void *LHSArg, void *RHSArg);
FunctionArgList Args;
ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
C.VoidPtrTy);
ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
C.VoidPtrTy);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
FunctionType::ExtInfo EI;
auto &CGFI = CGM.getTypes().arrangeFreeFunctionDeclaration(
C.VoidTy, Args, EI, /*isVariadic=*/false);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
".omp.copyprivate.copy_func", &CGM.getModule());
CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, CGFI, Fn);
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
// Dest = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg),
CGF.PointerAlignInBytes),
ArgsType);
auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg),
CGF.PointerAlignInBytes),
ArgsType);
// *(Type0*)Dst[0] = *(Type0*)Src[0];
// *(Type1*)Dst[1] = *(Type1*)Src[1];
// ...
// *(Typen*)Dst[n] = *(Typen*)Src[n];
for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) {
auto *DestAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(
CGF.Builder.CreateStructGEP(nullptr, LHS, I),
CGM.PointerAlignInBytes),
CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType())));
auto *SrcAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(
CGF.Builder.CreateStructGEP(nullptr, RHS, I),
CGM.PointerAlignInBytes),
CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType())));
auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl();
QualType Type = VD->getType();
CGF.EmitOMPCopy(CGF, Type, DestAddr, SrcAddr,
cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl()),
AssignmentOps[I]);
}
CGF.FinishFunction();
return Fn;
}
void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> DstExprs,
ArrayRef<const Expr *> AssignmentOps) {
assert(CopyprivateVars.size() == SrcExprs.size() &&
CopyprivateVars.size() == DstExprs.size() &&
CopyprivateVars.size() == AssignmentOps.size());
auto &C = CGM.getContext();
// int32 did_it = 0;
// if(__kmpc_single(ident_t *, gtid)) {
// SingleOpGen();
// __kmpc_end_single(ident_t *, gtid);
// did_it = 1;
// }
// call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
// <copy_func>, did_it);
llvm::AllocaInst *DidIt = nullptr;
if (!CopyprivateVars.empty()) {
// int32 did_it = 0;
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it");
CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(0), DidIt,
DidIt->getAlignment());
}
// Prepare arguments and build a call to __kmpc_single
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
auto *IsSingle =
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_single), Args);
typedef CallEndCleanup<std::extent<decltype(Args)>::value>
SingleCallEndCleanup;
emitIfStmt(CGF, IsSingle, OMPD_single, [&](CodeGenFunction &CGF) -> void {
CodeGenFunction::RunCleanupsScope Scope(CGF);
CGF.EHStack.pushCleanup<SingleCallEndCleanup>(
NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_single),
llvm::makeArrayRef(Args));
SingleOpGen(CGF);
if (DidIt) {
// did_it = 1;
CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(1), DidIt,
DidIt->getAlignment());
}
});
// call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>,
// <copy_func>, did_it);
if (DidIt) {
llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
auto CopyprivateArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
// Create a list of all private variables for copyprivate.
auto *CopyprivateList =
CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list");
for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) {
auto *Elem = CGF.Builder.CreateStructGEP(
CopyprivateList->getAllocatedType(), CopyprivateList, I);
CGF.Builder.CreateAlignedStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLValue(CopyprivateVars[I]).getAddress(), CGF.VoidPtrTy),
Elem, CGM.PointerAlignInBytes);
}
// Build function that copies private values from single region to all other
// threads in the corresponding parallel region.
auto *CpyFn = emitCopyprivateCopyFunction(
CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
CopyprivateVars, SrcExprs, DstExprs, AssignmentOps);
auto *BufSize = llvm::ConstantInt::get(
CGM.SizeTy, C.getTypeSizeInChars(CopyprivateArrayTy).getQuantity());
auto *CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
CGF.VoidPtrTy);
auto *DidItVal =
CGF.Builder.CreateAlignedLoad(DidIt, CGF.PointerAlignInBytes);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
getThreadID(CGF, Loc), // i32 <gtid>
BufSize, // size_t <buf_size>
CL, // void *<copyprivate list>
CpyFn, // void (*) (void *, void *) <copy_func>
DidItVal // i32 did_it
};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args);
}
}
void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc) {
// __kmpc_ordered(ident_t *, gtid);
// OrderedOpGen();
// __kmpc_end_ordered(ident_t *, gtid);
// Prepare arguments and build a call to __kmpc_ordered
{
CodeGenFunction::RunCleanupsScope Scope(CGF);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_ordered), Args);
// Build a call to __kmpc_end_ordered
CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>(
NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_ordered),
llvm::makeArrayRef(Args));
emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen);
}
}
void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool CheckForCancel) {
// Build call __kmpc_cancel_barrier(loc, thread_id);
// Build call __kmpc_barrier(loc, thread_id);
OpenMPLocationFlags Flags = OMP_IDENT_KMPC;
if (Kind == OMPD_for) {
Flags =
static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_FOR);
} else if (Kind == OMPD_sections) {
Flags = static_cast<OpenMPLocationFlags>(Flags |
OMP_IDENT_BARRIER_IMPL_SECTIONS);
} else if (Kind == OMPD_single) {
Flags =
static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_SINGLE);
} else if (Kind == OMPD_barrier) {
Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_EXPL);
} else {
Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL);
}
// Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc,
// thread_id);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
getThreadID(CGF, Loc)};
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
auto CancelDestination =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
if (CancelDestination.isValid()) {
auto *Result = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args);
if (CheckForCancel) {
// if (__kmpc_cancel_barrier()) {
// exit from construct;
// }
auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
auto *ContBB = CGF.createBasicBlock(".cancel.continue");
auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// exit from construct;
CGF.EmitBranchThroughCleanup(CancelDestination);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
}
return;
}
}
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args);
}
/// \brief Schedule types for 'omp for' loops (these enumerators are taken from
/// the enum sched_type in kmp.h).
enum OpenMPSchedType {
/// \brief Lower bound for default (unordered) versions.
OMP_sch_lower = 32,
OMP_sch_static_chunked = 33,
OMP_sch_static = 34,
OMP_sch_dynamic_chunked = 35,
OMP_sch_guided_chunked = 36,
OMP_sch_runtime = 37,
OMP_sch_auto = 38,
/// \brief Lower bound for 'ordered' versions.
OMP_ord_lower = 64,
OMP_ord_static_chunked = 65,
OMP_ord_static = 66,
OMP_ord_dynamic_chunked = 67,
OMP_ord_guided_chunked = 68,
OMP_ord_runtime = 69,
OMP_ord_auto = 70,
OMP_sch_default = OMP_sch_static,
};
/// \brief Map the OpenMP loop schedule to the runtime enumeration.
static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked, bool Ordered) {
switch (ScheduleKind) {
case OMPC_SCHEDULE_static:
return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked)
: (Ordered ? OMP_ord_static : OMP_sch_static);
case OMPC_SCHEDULE_dynamic:
return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked;
case OMPC_SCHEDULE_guided:
return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked;
case OMPC_SCHEDULE_runtime:
return Ordered ? OMP_ord_runtime : OMP_sch_runtime;
case OMPC_SCHEDULE_auto:
return Ordered ? OMP_ord_auto : OMP_sch_auto;
case OMPC_SCHEDULE_unknown:
assert(!Chunked && "chunk was specified but schedule kind not known");
return Ordered ? OMP_ord_static : OMP_sch_static;
}
llvm_unreachable("Unexpected runtime schedule");
}
bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const {
auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false);
return Schedule == OMP_sch_static;
}
bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const {
auto Schedule =
getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false);
assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here");
return Schedule != OMP_sch_static;
}
void CGOpenMPRuntime::emitForInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPScheduleClauseKind ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
llvm::Value *IL, llvm::Value *LB,
llvm::Value *UB, llvm::Value *ST,
llvm::Value *Chunk) {
OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered);
if (Ordered ||
(Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked)) {
// Call __kmpc_dispatch_init(
// ident_t *loc, kmp_int32 tid, kmp_int32 schedule,
// kmp_int[32|64] lower, kmp_int[32|64] upper,
// kmp_int[32|64] stride, kmp_int[32|64] chunk);
// If the Chunk was not specified in the clause - use default value 1.
if (Chunk == nullptr)
Chunk = CGF.Builder.getIntN(IVSize, 1);
llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
getThreadID(CGF, Loc),
CGF.Builder.getInt32(Schedule), // Schedule type
CGF.Builder.getIntN(IVSize, 0), // Lower
UB, // Upper
CGF.Builder.getIntN(IVSize, 1), // Stride
Chunk // Chunk
};
CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
} else {
// Call __kmpc_for_static_init(
// ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
// kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
// kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
// kmp_int[32|64] incr, kmp_int[32|64] chunk);
if (Chunk == nullptr) {
assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) &&
"expected static non-chunked schedule");
// If the Chunk was not specified in the clause - use default value 1.
Chunk = CGF.Builder.getIntN(IVSize, 1);
} else
assert((Schedule == OMP_sch_static_chunked ||
Schedule == OMP_ord_static_chunked) &&
"expected static chunked schedule");
llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
getThreadID(CGF, Loc),
CGF.Builder.getInt32(Schedule), // Schedule type
IL, // &isLastIter
LB, // &LB
UB, // &UB
ST, // &Stride
CGF.Builder.getIntN(IVSize, 1), // Incr
Chunk // Chunk
};
CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args);
}
}
void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
SourceLocation Loc) {
// Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
getThreadID(CGF, Loc)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini),
Args);
}
void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc,
unsigned IVSize,
bool IVSigned) {
// Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
getThreadID(CGF, Loc)};
CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
}
llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned, llvm::Value *IL,
llvm::Value *LB, llvm::Value *UB,
llvm::Value *ST) {
// Call __kmpc_dispatch_next(
// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
// kmp_int[32|64] *p_stride);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), getThreadID(CGF, Loc),
IL, // &isLastIter
LB, // &Lower
UB, // &Upper
ST // &Stride
};
llvm::Value *Call =
CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
return CGF.EmitScalarConversion(
Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true),
CGF.getContext().BoolTy);
}
void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
// Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads),
Args);
}
void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF,
OpenMPProcBindClauseKind ProcBind,
SourceLocation Loc) {
// Constants for proc bind value accepted by the runtime.
enum ProcBindTy {
ProcBindFalse = 0,
ProcBindTrue,
ProcBindMaster,
ProcBindClose,
ProcBindSpread,
ProcBindIntel,
ProcBindDefault
} RuntimeProcBind;
switch (ProcBind) {
case OMPC_PROC_BIND_master:
RuntimeProcBind = ProcBindMaster;
break;
case OMPC_PROC_BIND_close:
RuntimeProcBind = ProcBindClose;
break;
case OMPC_PROC_BIND_spread:
RuntimeProcBind = ProcBindSpread;
break;
case OMPC_PROC_BIND_unknown:
llvm_unreachable("Unsupported proc_bind value.");
}
// Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind)
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)};
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args);
}
void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
SourceLocation Loc) {
// Build call void __kmpc_flush(ident_t *loc)
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush),
emitUpdateLocation(CGF, Loc));
}
namespace {
/// \brief Indexes of fields for type kmp_task_t.
enum KmpTaskTFields {
/// \brief List of shared variables.
KmpTaskTShareds,
/// \brief Task routine.
KmpTaskTRoutine,
/// \brief Partition id for the untied tasks.
KmpTaskTPartId,
/// \brief Function with call of destructors for private variables.
KmpTaskTDestructors,
};
} // namespace
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
if (!KmpRoutineEntryPtrTy) {
// Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
auto &C = CGM.getContext();
QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy};
FunctionProtoType::ExtProtoInfo EPI;
KmpRoutineEntryPtrQTy = C.getPointerType(
C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI));
KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy);
}
}
static void addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
QualType FieldTy) {
auto *Field = FieldDecl::Create(
C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy,
C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()),
/*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit);
Field->setAccess(AS_public);
DC->addDecl(Field);
}
namespace {
struct PrivateHelpersTy {
PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
const VarDecl *PrivateElemInit)
: Original(Original), PrivateCopy(PrivateCopy),
PrivateElemInit(PrivateElemInit) {}
const VarDecl *Original;
const VarDecl *PrivateCopy;
const VarDecl *PrivateElemInit;
};
typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy;
} // namespace
static RecordDecl *
createPrivatesRecordDecl(CodeGenModule &CGM,
const ArrayRef<PrivateDataTy> Privates) {
if (!Privates.empty()) {
auto &C = CGM.getContext();
// Build struct .kmp_privates_t. {
// /* private vars */
// };
auto *RD = C.buildImplicitRecord(".kmp_privates.t");
RD->startDefinition();
for (auto &&Pair : Privates) {
auto Type = Pair.second.Original->getType();
Type = Type.getNonReferenceType();
addFieldToRecordDecl(C, RD, Type);
}
RD->completeDefinition();
return RD;
}
return nullptr;
}
static RecordDecl *
createKmpTaskTRecordDecl(CodeGenModule &CGM, QualType KmpInt32Ty,
QualType KmpRoutineEntryPointerQTy) {
auto &C = CGM.getContext();
// Build struct kmp_task_t {
// void * shareds;
// kmp_routine_entry_t routine;
// kmp_int32 part_id;
// kmp_routine_entry_t destructors;
// };
auto *RD = C.buildImplicitRecord("kmp_task_t");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
addFieldToRecordDecl(C, RD, KmpInt32Ty);
addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy);
RD->completeDefinition();
return RD;
}
static RecordDecl *
createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
const ArrayRef<PrivateDataTy> Privates) {
auto &C = CGM.getContext();
// Build struct kmp_task_t_with_privates {
// kmp_task_t task_data;
// .kmp_privates_t. privates;
// };
auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates");
RD->startDefinition();
addFieldToRecordDecl(C, RD, KmpTaskTQTy);
if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) {
addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
}
RD->completeDefinition();
return RD;
}
/// \brief Emit a proxy function which accepts kmp_task_t as the second
/// argument.
/// \code
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map,
/// tt->shareds);
/// return 0;
/// }
/// \endcode
static llvm::Value *
emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
QualType KmpInt32Ty, QualType KmpTaskTWithPrivatesPtrQTy,
QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy,
QualType SharedsPtrTy, llvm::Value *TaskFunction,
llvm::Value *TaskPrivatesMap) {
auto &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty);
ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc,
/*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
FunctionType::ExtInfo Info;
auto &TaskEntryFnInfo =
CGM.getTypes().arrangeFreeFunctionDeclaration(KmpInt32Ty, Args, Info,
/*isVariadic=*/false);
auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo);
auto *TaskEntry =
llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage,
".omp_task_entry.", &CGM.getModule());
CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, TaskEntryFnInfo, TaskEntry);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args);
// TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map,
// tt->task_data.shareds);
auto *GtidParam = CGF.EmitLoadOfScalar(
CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false,
C.getTypeAlignInChars(KmpInt32Ty).getQuantity(), KmpInt32Ty, Loc);
auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad(
CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes);
LValue TDBase =
CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
LValue Base =
CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin());
auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId);
auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI);
auto *PartidParam = CGF.EmitLoadOfLValue(PartIdLVal, Loc).getScalarVal();
auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds);
auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI);
auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfLValue(SharedsLVal, Loc).getScalarVal(),
CGF.ConvertTypeForMem(SharedsPtrTy));
auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1);
llvm::Value *PrivatesParam;
if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) {
auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI);
PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivatesLVal.getAddress(), CGF.VoidPtrTy);
} else {
PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
llvm::Value *CallArgs[] = {GtidParam, PartidParam, PrivatesParam,
TaskPrivatesMap, SharedsParam};
CGF.EmitCallOrInvoke(TaskFunction, CallArgs);
CGF.EmitStoreThroughLValue(
RValue::get(CGF.Builder.getInt32(/*C=*/0)),
CGF.MakeNaturalAlignAddrLValue(CGF.ReturnValue, KmpInt32Ty));
CGF.FinishFunction();
return TaskEntry;
}
static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
SourceLocation Loc,
QualType KmpInt32Ty,
QualType KmpTaskTWithPrivatesPtrQTy,
QualType KmpTaskTWithPrivatesQTy) {
auto &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty);
ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc,
/*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
FunctionType::ExtInfo Info;
auto &DestructorFnInfo =
CGM.getTypes().arrangeFreeFunctionDeclaration(KmpInt32Ty, Args, Info,
/*isVariadic=*/false);
auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo);
auto *DestructorFn =
llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage,
".omp_task_destructor.", &CGM.getModule());
CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, DestructorFnInfo, DestructorFn);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo,
Args);
auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad(
CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes);
LValue Base =
CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesQTyRD =
cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl());
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
Base = CGF.EmitLValueForField(Base, *FI);
for (auto *Field :
cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) {
if (auto DtorKind = Field->getType().isDestructedType()) {
auto FieldLValue = CGF.EmitLValueForField(Base, Field);
CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
}
}
CGF.FinishFunction();
return DestructorFn;
}
/// \brief Emit a privates mapping function for correct handling of private and
/// firstprivate variables.
/// \code
/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1>
/// **noalias priv1,..., <tyn> **noalias privn) {
/// *priv1 = &.privates.priv1;
/// ...;
/// *privn = &.privates.privn;
/// }
/// \endcode
static llvm::Value *
emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
const ArrayRef<const Expr *> PrivateVars,
const ArrayRef<const Expr *> FirstprivateVars,
QualType PrivatesQTy,
const ArrayRef<PrivateDataTy> Privates) {
auto &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl TaskPrivatesArg(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(PrivatesQTy).withConst().withRestrict());
Args.push_back(&TaskPrivatesArg);
llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos;
unsigned Counter = 1;
for (auto *E: PrivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc,
/*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict()));
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
for (auto *E : FirstprivateVars) {
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc,
/*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict()));
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
}
FunctionType::ExtInfo Info;
auto &TaskPrivatesMapFnInfo =
CGM.getTypes().arrangeFreeFunctionDeclaration(C.VoidTy, Args, Info,
/*isVariadic=*/false);
auto *TaskPrivatesMapTy =
CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo);
auto *TaskPrivatesMap = llvm::Function::Create(
TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage,
".omp_task_privates_map.", &CGM.getModule());
CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, TaskPrivatesMapFnInfo,
TaskPrivatesMap);
TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap,
TaskPrivatesMapFnInfo, Args);
// *privi = &.privates.privi;
auto *TaskPrivatesArgAddr = CGF.Builder.CreateAlignedLoad(
CGF.GetAddrOfLocalVar(&TaskPrivatesArg), CGM.PointerAlignInBytes);
LValue Base =
CGF.MakeNaturalAlignAddrLValue(TaskPrivatesArgAddr, PrivatesQTy);
auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl());
Counter = 0;
for (auto *Field : PrivatesQTyRD->fields()) {
auto FieldLVal = CGF.EmitLValueForField(Base, Field);
auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]];
auto RefLVal = CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(VD),
VD->getType());
auto RefLoadRVal = CGF.EmitLoadOfLValue(RefLVal, Loc);
CGF.EmitStoreOfScalar(
FieldLVal.getAddress(),
CGF.MakeNaturalAlignAddrLValue(RefLoadRVal.getScalarVal(),
RefLVal.getType()->getPointeeType()));
++Counter;
}
CGF.FinishFunction();
return TaskPrivatesMap;
}
static int __cdecl array_pod_sort_comparator(const PrivateDataTy *P1, // HLSL Change - __cdecl
const PrivateDataTy *P2) {
return P1->first < P2->first ? 1 : (P2->first < P1->first ? -1 : 0);
}
void CGOpenMPRuntime::emitTaskCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
llvm::Value *TaskFunction, QualType SharedsTy, llvm::Value *Shareds,
const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
ArrayRef<const Expr *> PrivateCopies,
ArrayRef<const Expr *> FirstprivateVars,
ArrayRef<const Expr *> FirstprivateCopies,
ArrayRef<const Expr *> FirstprivateInits,
ArrayRef<std::pair<OpenMPDependClauseKind, const Expr *>> Dependences) {
auto &C = CGM.getContext();
llvm::SmallVector<PrivateDataTy, 8> Privates;
// Aggregate privates and sort them by the alignment.
auto I = PrivateCopies.begin();
for (auto *E : PrivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.push_back(std::make_pair(
C.getTypeAlignInChars(VD->getType()),
PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
/*PrivateElemInit=*/nullptr)));
++I;
}
I = FirstprivateCopies.begin();
auto IElemInitRef = FirstprivateInits.begin();
for (auto *E : FirstprivateVars) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Privates.push_back(std::make_pair(
C.getTypeAlignInChars(VD->getType()),
PrivateHelpersTy(
VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl()))));
++I, ++IElemInitRef;
}
llvm::array_pod_sort(Privates.begin(), Privates.end(),
array_pod_sort_comparator);
auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
// Build type kmp_routine_entry_t (if not built yet).
emitKmpRoutineEntryT(KmpInt32Ty);
// Build type kmp_task_t (if not built yet).
if (KmpTaskTQTy.isNull()) {
KmpTaskTQTy = C.getRecordType(
createKmpTaskTRecordDecl(CGM, KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl());
// Build particular struct kmp_task_t for the given task.
auto *KmpTaskTWithPrivatesQTyRD =
createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
QualType KmpTaskTWithPrivatesPtrQTy =
C.getPointerType(KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy);
auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo();
auto KmpTaskTWithPrivatesTySize =
CGM.getSize(C.getTypeSizeInChars(KmpTaskTWithPrivatesQTy));
QualType SharedsPtrTy = C.getPointerType(SharedsTy);
// Emit initial values for private copies (if any).
llvm::Value *TaskPrivatesMap = nullptr;
auto *TaskPrivatesMapTy =
std::next(cast<llvm::Function>(TaskFunction)->getArgumentList().begin(),
3)
->getType();
if (!Privates.empty()) {
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
TaskPrivatesMap = emitTaskPrivateMappingFunction(
CGM, Loc, PrivateVars, FirstprivateVars, FI->getType(), Privates);
TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
TaskPrivatesMap, TaskPrivatesMapTy);
} else {
TaskPrivatesMap = llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(TaskPrivatesMapTy));
}
// Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid,
// kmp_task_t *tt);
auto *TaskEntry = emitProxyTaskFunction(
CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTy,
KmpTaskTQTy, SharedsPtrTy, TaskFunction, TaskPrivatesMap);
// Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
// kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
// kmp_routine_entry_t *task_entry);
// Task flags. Format is taken from
// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h,
// description of kmp_tasking_flags struct.
const unsigned TiedFlag = 0x1;
const unsigned FinalFlag = 0x2;
unsigned Flags = Tied ? TiedFlag : 0;
auto *TaskFlags =
Final.getPointer()
? CGF.Builder.CreateSelect(Final.getPointer(),
CGF.Builder.getInt32(FinalFlag),
CGF.Builder.getInt32(/*C=*/0))
: CGF.Builder.getInt32(Final.getInt() ? FinalFlag : 0);
TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags));
auto SharedsSize = C.getTypeSizeInChars(SharedsTy);
llvm::Value *AllocArgs[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), TaskFlags,
KmpTaskTWithPrivatesTySize, CGM.getSize(SharedsSize),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskEntry,
KmpRoutineEntryPtrTy)};
auto *NewTask = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs);
auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
NewTask, KmpTaskTWithPrivatesPtrTy);
LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy,
KmpTaskTWithPrivatesQTy);
LValue TDBase =
CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin());
// Fill the data in the resulting kmp_task_t record.
// Copy shareds if there are any.
llvm::Value *KmpTaskSharedsPtr = nullptr;
if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
KmpTaskSharedsPtr = CGF.EmitLoadOfScalar(
CGF.EmitLValueForField(
TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
Loc);
CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy);
}
// Emit initial values for private copies (if any).
bool NeedsCleanup = false;
if (!Privates.empty()) {
auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin());
auto PrivatesBase = CGF.EmitLValueForField(Base, *FI);
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
LValue SharedsBase;
if (!FirstprivateVars.empty()) {
SharedsBase = CGF.MakeNaturalAlignAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
SharedsTy);
}
CodeGenFunction::CGCapturedStmtInfo CapturesInfo(
cast<CapturedStmt>(*D.getAssociatedStmt()));
for (auto &&Pair : Privates) {
auto *VD = Pair.second.PrivateCopy;
auto *Init = VD->getAnyInitializer();
LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI);
if (Init) {
if (auto *Elem = Pair.second.PrivateElemInit) {
auto *OriginalVD = Pair.second.Original;
auto *SharedField = CapturesInfo.lookup(OriginalVD);
auto SharedRefLValue =
CGF.EmitLValueForField(SharedsBase, SharedField);
QualType Type = OriginalVD->getType();
if (Type->isArrayType()) {
// Initialize firstprivate array.
if (!isa<CXXConstructExpr>(Init) ||
CGF.isTrivialInitializer(Init)) {
// Perform simple memcpy.
CGF.EmitAggregateAssign(PrivateLValue.getAddress(),
SharedRefLValue.getAddress(), Type);
} else {
// Initialize firstprivate array using element-by-element
// intialization.
CGF.EmitOMPAggregateAssign(
PrivateLValue.getAddress(), SharedRefLValue.getAddress(),
Type, [&CGF, Elem, Init, &CapturesInfo](
llvm::Value *DestElement, llvm::Value *SrcElement) {
// Clean up any temporaries needed by the initialization.
CodeGenFunction::OMPPrivateScope InitScope(CGF);
InitScope.addPrivate(Elem, [SrcElement]() -> llvm::Value *{
return SrcElement;
});
(void)InitScope.Privatize();
// Emit initialization for single element.
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
CGF, &CapturesInfo);
CGF.EmitAnyExprToMem(Init, DestElement,
Init->getType().getQualifiers(),
/*IsInitializer=*/false);
});
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
InitScope.addPrivate(Elem, [SharedRefLValue]() -> llvm::Value *{
return SharedRefLValue.getAddress();
});
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
/*capturedByInit=*/false);
}
} else {
CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false);
}
}
NeedsCleanup = NeedsCleanup || FI->getType().isDestructedType();
++FI;
}
}
// Provide pointer to function with destructors for privates.
llvm::Value *DestructorFn =
NeedsCleanup ? emitDestructorsFunction(CGM, Loc, KmpInt32Ty,
KmpTaskTWithPrivatesPtrQTy,
KmpTaskTWithPrivatesQTy)
: llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(KmpRoutineEntryPtrTy));
LValue Destructor = CGF.EmitLValueForField(
TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTDestructors));
CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
DestructorFn, KmpRoutineEntryPtrTy),
Destructor);
// Process list of dependences.
llvm::Value *DependInfo = nullptr;
unsigned DependencesNumber = Dependences.size();
if (!Dependences.empty()) {
// Dependence kind for RTL.
enum RTLDependenceKindTy { DepIn = 1, DepOut = 2, DepInOut = 3 };
enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
RecordDecl *KmpDependInfoRD;
QualType FlagsTy = C.getIntTypeForBitwidth(
C.toBits(C.getTypeSizeInChars(C.BoolTy)), /*Signed=*/false);
llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy);
if (KmpDependInfoTy.isNull()) {
KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info");
KmpDependInfoRD->startDefinition();
addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType());
addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
KmpDependInfoRD->completeDefinition();
KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
} else {
KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
}
// Define type kmp_depend_info[<Dependences.size()>];
QualType KmpDependInfoArrayTy = C.getConstantArrayType(
KmpDependInfoTy, llvm::APInt(/*numBits=*/64, Dependences.size()),
ArrayType::Normal, /*IndexTypeQuals=*/0);
// kmp_depend_info[<Dependences.size()>] deps;
DependInfo = CGF.CreateMemTemp(KmpDependInfoArrayTy);
for (unsigned i = 0; i < DependencesNumber; ++i) {
auto Addr = CGF.EmitLValue(Dependences[i].second);
auto *Size = llvm::ConstantInt::get(
CGF.SizeTy,
C.getTypeSizeInChars(Dependences[i].second->getType()).getQuantity());
auto Base = CGF.MakeNaturalAlignAddrLValue(
CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, DependInfo, i),
KmpDependInfoTy);
// deps[i].base_addr = &<Dependences[i].second>;
auto BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
CGF.EmitStoreOfScalar(
CGF.Builder.CreatePtrToInt(Addr.getAddress(), CGF.IntPtrTy),
BaseAddrLVal);
// deps[i].len = sizeof(<Dependences[i].second>);
auto LenLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Len));
CGF.EmitStoreOfScalar(Size, LenLVal);
// deps[i].flags = <Dependences[i].first>;
RTLDependenceKindTy DepKind;
switch (Dependences[i].first) {
case OMPC_DEPEND_in:
DepKind = DepIn;
break;
case OMPC_DEPEND_out:
DepKind = DepOut;
break;
case OMPC_DEPEND_inout:
DepKind = DepInOut;
break;
case OMPC_DEPEND_unknown:
llvm_unreachable("Unknown task dependence type");
}
auto FlagsLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
FlagsLVal);
}
DependInfo = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, DependInfo, 0),
CGF.VoidPtrTy);
}
// NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc()
// libcall.
// Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t
// *new_task);
// Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid,
// kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
// kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence
// list is not empty
auto *ThreadID = getThreadID(CGF, Loc);
auto *UpLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *TaskArgs[] = {UpLoc, ThreadID, NewTask};
llvm::Value *DepTaskArgs[] = {
UpLoc,
ThreadID,
NewTask,
DependInfo ? CGF.Builder.getInt32(DependencesNumber) : nullptr,
DependInfo,
DependInfo ? CGF.Builder.getInt32(0) : nullptr,
DependInfo ? llvm::ConstantPointerNull::get(CGF.VoidPtrTy) : nullptr};
auto &&ThenCodeGen = [this, DependInfo, &TaskArgs,
&DepTaskArgs](CodeGenFunction &CGF) {
// TODO: add check for untied tasks.
CGF.EmitRuntimeCall(
createRuntimeFunction(DependInfo ? OMPRTL__kmpc_omp_task_with_deps
: OMPRTL__kmpc_omp_task),
DependInfo ? makeArrayRef(DepTaskArgs) : makeArrayRef(TaskArgs));
};
typedef CallEndCleanup<std::extent<decltype(TaskArgs)>::value>
IfCallEndCleanup;
llvm::Value *DepWaitTaskArgs[] = {
UpLoc,
ThreadID,
DependInfo ? CGF.Builder.getInt32(DependencesNumber) : nullptr,
DependInfo,
DependInfo ? CGF.Builder.getInt32(0) : nullptr,
DependInfo ? llvm::ConstantPointerNull::get(CGF.VoidPtrTy) : nullptr};
auto &&ElseCodeGen = [this, &TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry,
DependInfo, &DepWaitTaskArgs](CodeGenFunction &CGF) {
CodeGenFunction::RunCleanupsScope LocalScope(CGF);
// Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid,
// kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
// ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
// is specified.
if (DependInfo)
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps),
DepWaitTaskArgs);
// Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
// kmp_task_t *new_task);
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0),
TaskArgs);
// Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
// kmp_task_t *new_task);
CGF.EHStack.pushCleanup<IfCallEndCleanup>(
NormalAndEHCleanup,
createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0),
llvm::makeArrayRef(TaskArgs));
// Call proxy_task_entry(gtid, new_task);
llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy};
CGF.EmitCallOrInvoke(TaskEntry, OutlinedFnArgs);
};
if (IfCond) {
emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen);
} else {
CodeGenFunction::RunCleanupsScope Scope(CGF);
ThenCodeGen(CGF);
}
}
static llvm::Value *emitReductionFunction(CodeGenModule &CGM,
llvm::Type *ArgsType,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps) {
auto &C = CGM.getContext();
// void reduction_func(void *LHSArg, void *RHSArg);
FunctionArgList Args;
ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
C.VoidPtrTy);
ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr,
C.VoidPtrTy);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
FunctionType::ExtInfo EI;
auto &CGFI = CGM.getTypes().arrangeFreeFunctionDeclaration(
C.VoidTy, Args, EI, /*isVariadic=*/false);
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
".omp.reduction.reduction_func", &CGM.getModule());
CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, CGFI, Fn);
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
// Dst = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg),
CGF.PointerAlignInBytes),
ArgsType);
auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg),
CGF.PointerAlignInBytes),
ArgsType);
// ...
// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
// ...
CodeGenFunction::OMPPrivateScope Scope(CGF);
for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I) {
Scope.addPrivate(
cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl()),
[&]() -> llvm::Value *{
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(
CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, RHS, I),
CGM.PointerAlignInBytes),
CGF.ConvertTypeForMem(C.getPointerType(RHSExprs[I]->getType())));
});
Scope.addPrivate(
cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl()),
[&]() -> llvm::Value *{
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder.CreateAlignedLoad(
CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, LHS, I),
CGM.PointerAlignInBytes),
CGF.ConvertTypeForMem(C.getPointerType(LHSExprs[I]->getType())));
});
}
Scope.Privatize();
for (auto *E : ReductionOps) {
CGF.EmitIgnoredExpr(E);
}
Scope.ForceCleanup();
CGF.FinishFunction();
return Fn;
}
void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
bool WithNowait, bool SimpleReduction) {
// Next code should be emitted for reduction:
//
// static kmp_critical_name lock = { 0 };
//
// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
// *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]);
// ...
// *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1],
// *(Type<n>-1*)rhs[<n>-1]);
// }
//
// ...
// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
// RedList, reduce_func, &<lock>)) {
// case 1:
// ...
// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
// ...
// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
// break;
// case 2:
// ...
// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
// ...
// [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);]
// break;
// default:;
// }
//
// if SimpleReduction is true, only the next code is generated:
// ...
// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
// ...
auto &C = CGM.getContext();
if (SimpleReduction) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
for (auto *E : ReductionOps) {
CGF.EmitIgnoredExpr(E);
}
return;
}
// 1. Build a list of reduction variables.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
llvm::APInt ArraySize(/*unsigned int numBits=*/32, RHSExprs.size());
QualType ReductionArrayTy =
C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
/*IndexTypeQuals=*/0);
auto *ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I) {
auto *Elem = CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, ReductionList, I);
CGF.Builder.CreateAlignedStore(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLValue(RHSExprs[I]).getAddress(), CGF.VoidPtrTy),
Elem, CGM.PointerAlignInBytes);
}
// 2. Emit reduce_func().
auto *ReductionFn = emitReductionFunction(
CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), LHSExprs,
RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
auto *Lock = getCriticalRegionLock(".reduction");
// 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
// RedList, reduce_func, &<lock>);
auto *IdentTLoc = emitUpdateLocation(
CGF, Loc,
static_cast<OpenMPLocationFlags>(OMP_IDENT_KMPC | OMP_ATOMIC_REDUCE));
auto *ThreadId = getThreadID(CGF, Loc);
auto *ReductionArrayTySize = llvm::ConstantInt::get(
CGM.SizeTy, C.getTypeSizeInChars(ReductionArrayTy).getQuantity());
auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList,
CGF.VoidPtrTy);
llvm::Value *Args[] = {
IdentTLoc, // ident_t *<loc>
ThreadId, // i32 <gtid>
CGF.Builder.getInt32(RHSExprs.size()), // i32 <n>
ReductionArrayTySize, // size_type sizeof(RedList)
RL, // void *RedList
ReductionFn, // void (*) (void *, void *) <reduce_func>
Lock // kmp_critical_name *&<lock>
};
auto Res = CGF.EmitRuntimeCall(
createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait
: OMPRTL__kmpc_reduce),
Args);
// 5. Build switch(res)
auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2);
// 6. Build case 1:
// ...
// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
// ...
// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
// break;
auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
CGF.EmitBlock(Case1BB);
{
CodeGenFunction::RunCleanupsScope Scope(CGF);
// Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
llvm::Value *EndArgs[] = {
IdentTLoc, // ident_t *<loc>
ThreadId, // i32 <gtid>
Lock // kmp_critical_name *&<lock>
};
CGF.EHStack
.pushCleanup<CallEndCleanup<std::extent<decltype(EndArgs)>::value>>(
NormalAndEHCleanup,
createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait
: OMPRTL__kmpc_end_reduce),
llvm::makeArrayRef(EndArgs));
for (auto *E : ReductionOps) {
CGF.EmitIgnoredExpr(E);
}
}
CGF.EmitBranch(DefaultBB);
// 7. Build case 2:
// ...
// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
// ...
// break;
auto *Case2BB = CGF.createBasicBlock(".omp.reduction.case2");
SwInst->addCase(CGF.Builder.getInt32(2), Case2BB);
CGF.EmitBlock(Case2BB);
{
CodeGenFunction::RunCleanupsScope Scope(CGF);
if (!WithNowait) {
// Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>);
llvm::Value *EndArgs[] = {
IdentTLoc, // ident_t *<loc>
ThreadId, // i32 <gtid>
Lock // kmp_critical_name *&<lock>
};
CGF.EHStack
.pushCleanup<CallEndCleanup<std::extent<decltype(EndArgs)>::value>>(
NormalAndEHCleanup,
createRuntimeFunction(OMPRTL__kmpc_end_reduce),
llvm::makeArrayRef(EndArgs));
}
auto I = LHSExprs.begin();
for (auto *E : ReductionOps) {
const Expr *XExpr = nullptr;
const Expr *EExpr = nullptr;
const Expr *UpExpr = nullptr;
BinaryOperatorKind BO = BO_Comma;
if (auto *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_Assign) {
XExpr = BO->getLHS();
UpExpr = BO->getRHS();
}
}
// Try to emit update expression as a simple atomic.
auto *RHSExpr = UpExpr;
if (RHSExpr) {
// Analyze RHS part of the whole expression.
if (auto *ACO = dyn_cast<AbstractConditionalOperator>(
RHSExpr->IgnoreParenImpCasts())) {
// If this is a conditional operator, analyze its condition for
// min/max reduction operator.
RHSExpr = ACO->getCond();
}
if (auto *BORHS =
dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) {
EExpr = BORHS->getRHS();
BO = BORHS->getOpcode();
}
}
if (XExpr) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
LValue X = CGF.EmitLValue(XExpr);
RValue E;
if (EExpr)
E = CGF.EmitAnyExpr(EExpr);
CGF.EmitOMPAtomicSimpleUpdateExpr(
X, E, BO, /*IsXLHSInRHSPart=*/true, llvm::Monotonic, Loc,
[&CGF, UpExpr, VD](RValue XRValue) {
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
PrivateScope.addPrivate(
VD, [&CGF, VD, XRValue]() -> llvm::Value *{
auto *LHSTemp = CGF.CreateMemTemp(VD->getType());
CGF.EmitStoreThroughLValue(
XRValue,
CGF.MakeNaturalAlignAddrLValue(LHSTemp, VD->getType()));
return LHSTemp;
});
(void)PrivateScope.Privatize();
return CGF.EmitAnyExpr(UpExpr);
});
} else {
// Emit as a critical region.
emitCriticalRegion(CGF, ".atomic_reduction", [E](CodeGenFunction &CGF) {
CGF.EmitIgnoredExpr(E);
}, Loc);
}
++I;
}
}
CGF.EmitBranch(DefaultBB);
CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
}
void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
SourceLocation Loc) {
// Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
// global_tid);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
// Ignore return result until untied tasks are supported.
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args);
}
void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnerKind,
const RegionCodeGenTy &CodeGen) {
InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind);
CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
}
namespace {
enum RTCancelKind {
CancelNoreq = 0,
CancelParallel = 1,
CancelLoop = 2,
CancelSections = 3,
CancelTaskgroup = 4
};
}
static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) {
RTCancelKind CancelKind = CancelNoreq;
if (CancelRegion == OMPD_parallel)
CancelKind = CancelParallel;
else if (CancelRegion == OMPD_for)
CancelKind = CancelLoop;
else if (CancelRegion == OMPD_sections)
CancelKind = CancelSections;
else {
assert(CancelRegion == OMPD_taskgroup);
CancelKind = CancelTaskgroup;
}
return CancelKind;
}
void CGOpenMPRuntime::emitCancellationPointCall(
CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) {
// Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
// global_tid, kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
auto CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
if (CancelDest.isValid()) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
auto *Result = CGF.EmitRuntimeCall(
createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args);
// if (__kmpc_cancellationpoint()) {
// __kmpc_cancel_barrier();
// exit from construct;
// }
auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
auto *ContBB = CGF.createBasicBlock(".cancel.continue");
auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// __kmpc_cancel_barrier();
emitBarrierCall(CGF, Loc, OMPD_unknown, /*CheckForCancel=*/false);
// exit from construct;
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
}
}
}
void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) {
// Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind);
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
auto CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
if (CancelDest.isValid()) {
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(getCancellationKind(CancelRegion))};
// Ignore return result until untied tasks are supported.
auto *Result =
CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_cancel), Args);
// if (__kmpc_cancel()) {
// __kmpc_cancel_barrier();
// exit from construct;
// }
auto *ExitBB = CGF.createBasicBlock(".cancel.exit");
auto *ContBB = CGF.createBasicBlock(".cancel.continue");
auto *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
// __kmpc_cancel_barrier();
emitBarrierCall(CGF, Loc, OMPD_unknown, /*CheckForCancel=*/false);
// exit from construct;
CGF.EmitBranchThroughCleanup(CancelDest);
CGF.EmitBlock(ContBB, /*IsFinished=*/true);
}
}
}
#endif // HLSL Change
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGExprComplex.cpp | //===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Expr nodes with complex types as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
#include <algorithm>
using namespace clang;
using namespace CodeGen;
//===----------------------------------------------------------------------===//
// Complex Expression Emitter
//===----------------------------------------------------------------------===//
typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
/// Return the complex type that we are meant to emit.
static const ComplexType *getComplexType(QualType type) {
type = type.getCanonicalType();
if (const ComplexType *comp = dyn_cast<ComplexType>(type)) {
return comp;
} else {
return cast<ComplexType>(cast<AtomicType>(type)->getValueType());
}
}
namespace {
class ComplexExprEmitter
: public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
CodeGenFunction &CGF;
CGBuilderTy &Builder;
bool IgnoreReal;
bool IgnoreImag;
public:
ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
: CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
}
//===--------------------------------------------------------------------===//
// Utilities
//===--------------------------------------------------------------------===//
bool TestAndClearIgnoreReal() {
bool I = IgnoreReal;
IgnoreReal = false;
return I;
}
bool TestAndClearIgnoreImag() {
bool I = IgnoreImag;
IgnoreImag = false;
return I;
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
ComplexPairTy EmitLoadOfLValue(const Expr *E) {
return EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc());
}
ComplexPairTy EmitLoadOfLValue(LValue LV, SourceLocation Loc);
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
void EmitStoreOfComplex(ComplexPairTy Val, LValue LV, bool isInit);
/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
QualType DestType);
/// EmitComplexToComplexCast - Emit a cast from scalar value Val to DestType.
ComplexPairTy EmitScalarToComplexCast(llvm::Value *Val, QualType SrcType,
QualType DestType);
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
ComplexPairTy Visit(Expr *E) {
ApplyDebugLocation DL(CGF, E);
return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E);
}
ComplexPairTy VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
llvm_unreachable("Stmt can't have complex result type!");
}
ComplexPairTy VisitExpr(Expr *S);
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
}
ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
ComplexPairTy
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
return Visit(PE->getReplacement());
}
// l-values.
ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
if (result.isReference())
return EmitLoadOfLValue(result.getReferenceLValue(CGF, E),
E->getExprLoc());
llvm::Constant *pair = result.getValue();
return ComplexPairTy(pair->getAggregateElement(0U),
pair->getAggregateElement(1U));
}
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
return CGF.EmitObjCMessageExpr(E).getComplexVal();
}
ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
return CGF.getOpaqueRValueMapping(E).getComplexVal();
}
ComplexPairTy VisitPseudoObjectExpr(PseudoObjectExpr *E) {
return CGF.EmitPseudoObjectRValue(E).getComplexVal();
}
// FIXME: CompoundLiteralExpr
ComplexPairTy EmitCast(CastKind CK, Expr *Op, QualType DestTy);
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
// here.
return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCastExpr(CastExpr *E) {
return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCallExpr(const CallExpr *E);
ComplexPairTy VisitStmtExpr(const StmtExpr *E);
// Operators.
ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
bool isInc, bool isPre) {
LValue LV = CGF.EmitLValue(E->getSubExpr());
return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
}
ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
return VisitPrePostIncDec(E, false, false);
}
ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
return VisitPrePostIncDec(E, true, false);
}
ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
return VisitPrePostIncDec(E, false, true);
}
ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
return VisitPrePostIncDec(E, true, true);
}
ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
return Visit(E->getSubExpr());
}
ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
// LNot,Real,Imag never return complex.
ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
ComplexPairTy VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
return Visit(DIE->getExpr());
}
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
return Visit(E->getSubExpr());
}
ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
QualType Elem = E->getType()->castAs<ComplexType>()->getElementType();
llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
return ComplexPairTy(Null, Null);
}
ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
QualType Elem = E->getType()->castAs<ComplexType>()->getElementType();
llvm::Constant *Null =
llvm::Constant::getNullValue(CGF.ConvertType(Elem));
return ComplexPairTy(Null, Null);
}
struct BinOpInfo {
ComplexPairTy LHS;
ComplexPairTy RHS;
QualType Ty; // Computation Type.
};
BinOpInfo EmitBinOps(const BinaryOperator *E);
LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &),
RValue &Val);
ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &));
ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
ComplexPairTy EmitBinSub(const BinOpInfo &Op);
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
ComplexPairTy EmitComplexBinOpLibCall(StringRef LibCallName,
const BinOpInfo &Op);
ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
return EmitBinAdd(EmitBinOps(E));
}
ComplexPairTy VisitBinSub(const BinaryOperator *E) {
return EmitBinSub(EmitBinOps(E));
}
ComplexPairTy VisitBinMul(const BinaryOperator *E) {
return EmitBinMul(EmitBinOps(E));
}
ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
return EmitBinDiv(EmitBinOps(E));
}
// Compound assignments.
ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
}
ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
}
ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
}
ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
}
// GCC rejects rem/and/or/xor for integer complex.
// Logical and/or always return int, never complex.
// No comparisons produce a complex result.
LValue EmitBinAssignLValue(const BinaryOperator *E,
ComplexPairTy &Val);
ComplexPairTy VisitBinAssign (const BinaryOperator *E);
ComplexPairTy VisitBinComma (const BinaryOperator *E);
ComplexPairTy
VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
ComplexPairTy VisitInitListExpr(InitListExpr *E);
ComplexPairTy VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
return CGF.EmitAtomicExpr(E).getComplexVal();
}
};
} // end anonymous namespace.
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to
/// load the real and imaginary pieces, returning them as Real/Imag.
ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
SourceLocation loc) {
assert(lvalue.isSimple() && "non-simple complex l-value?");
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
llvm::Value *SrcPtr = lvalue.getAddress();
bool isVolatile = lvalue.isVolatileQualified();
unsigned AlignR = lvalue.getAlignment().getQuantity();
ASTContext &C = CGF.getContext();
QualType ComplexTy = lvalue.getType();
unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
unsigned AlignI = std::min(AlignR, ComplexAlign);
llvm::Value *Real=nullptr, *Imag=nullptr;
if (!IgnoreReal || isVolatile) {
llvm::Value *RealP = Builder.CreateStructGEP(nullptr, SrcPtr, 0,
SrcPtr->getName() + ".realp");
Real = Builder.CreateAlignedLoad(RealP, AlignR, isVolatile,
SrcPtr->getName() + ".real");
}
if (!IgnoreImag || isVolatile) {
llvm::Value *ImagP = Builder.CreateStructGEP(nullptr, SrcPtr, 1,
SrcPtr->getName() + ".imagp");
Imag = Builder.CreateAlignedLoad(ImagP, AlignI, isVolatile,
SrcPtr->getName() + ".imag");
}
return ComplexPairTy(Real, Imag);
}
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
bool isInit) {
if (lvalue.getType()->isAtomicType() ||
(!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
llvm::Value *Ptr = lvalue.getAddress();
llvm::Value *RealPtr = Builder.CreateStructGEP(nullptr, Ptr, 0, "real");
llvm::Value *ImagPtr = Builder.CreateStructGEP(nullptr, Ptr, 1, "imag");
unsigned AlignR = lvalue.getAlignment().getQuantity();
ASTContext &C = CGF.getContext();
QualType ComplexTy = lvalue.getType();
unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
unsigned AlignI = std::min(AlignR, ComplexAlign);
Builder.CreateAlignedStore(Val.first, RealPtr, AlignR,
lvalue.isVolatileQualified());
Builder.CreateAlignedStore(Val.second, ImagPtr, AlignI,
lvalue.isVolatileQualified());
}
//===----------------------------------------------------------------------===//
// Visitor Methods
//===----------------------------------------------------------------------===//
ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
CGF.ErrorUnsupported(E, "complex expression");
llvm::Type *EltTy =
CGF.ConvertType(getComplexType(E->getType())->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
}
ComplexPairTy ComplexExprEmitter::
VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
}
ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
if (E->getCallReturnType(CGF.getContext())->isReferenceType())
return EmitLoadOfLValue(E);
return CGF.EmitCallExpr(E).getComplexVal();
}
ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), true);
assert(RetAlloca && "Expected complex return value");
return EmitLoadOfLValue(CGF.MakeAddrLValue(RetAlloca, E->getType()),
E->getExprLoc());
}
/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
QualType SrcType,
QualType DestType) {
// Get the src/dest element type.
SrcType = SrcType->castAs<ComplexType>()->getElementType();
DestType = DestType->castAs<ComplexType>()->getElementType();
// C99 6.3.1.6: When a value of complex type is converted to another
// complex type, both the real and imaginary parts follow the conversion
// rules for the corresponding real types.
Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
return Val;
}
ComplexPairTy ComplexExprEmitter::EmitScalarToComplexCast(llvm::Value *Val,
QualType SrcType,
QualType DestType) {
// Convert the input element to the element type of the complex.
DestType = DestType->castAs<ComplexType>()->getElementType();
Val = CGF.EmitScalarConversion(Val, SrcType, DestType);
// Return (realval, 0).
return ComplexPairTy(Val, llvm::Constant::getNullValue(Val->getType()));
}
ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
QualType DestTy) {
switch (CK) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
// Atomic to non-atomic casts may be more than a no-op for some platforms and
// for some types.
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_LValueToRValue:
case CK_UserDefinedConversion:
return Visit(Op);
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
llvm::Value *V = origLV.getAddress();
V = Builder.CreateBitCast(V,
CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy,
origLV.getAlignment()),
Op->getExprLoc());
}
case CK_BitCast:
case CK_BaseToDerived:
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
case CK_Dynamic:
case CK_ToUnion:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_NullToPointer:
case CK_NullToMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer:
case CK_MemberPointerToBoolean:
case CK_ReinterpretMemberPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
case CK_ToVoid:
case CK_VectorSplat:
case CK_IntegralCast:
case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingComplexToReal:
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToReal:
case CK_IntegralComplexToBoolean:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
case CK_AddressSpaceConversion:
// HLSL Change Start
case CK_FlatConversion:
case CK_HLSLVectorSplat:
case CK_HLSLMatrixSplat:
case CK_HLSLVectorToScalarCast:
case CK_HLSLMatrixToScalarCast:
case CK_HLSLVectorTruncationCast:
case CK_HLSLMatrixTruncationCast:
case CK_HLSLVectorToMatrixCast:
case CK_HLSLMatrixToVectorCast:
case CK_HLSLDerivedToBase:
case CK_HLSLCC_IntegralCast:
case CK_HLSLCC_IntegralToBoolean:
case CK_HLSLCC_IntegralToFloating:
case CK_HLSLCC_FloatingToIntegral:
case CK_HLSLCC_FloatingToBoolean:
case CK_HLSLCC_FloatingCast:
// HLSL Change End
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
case CK_IntegralRealToComplex:
return EmitScalarToComplexCast(CGF.EmitScalarExpr(Op),
Op->getType(), DestTy);
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
}
llvm_unreachable("unknown cast resulting in complex value");
}
ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResR, *ResI;
if (Op.first->getType()->isFloatingPointTy()) {
ResR = Builder.CreateFNeg(Op.first, "neg.r");
ResI = Builder.CreateFNeg(Op.second, "neg.i");
} else {
ResR = Builder.CreateNeg(Op.first, "neg.r");
ResI = Builder.CreateNeg(Op.second, "neg.i");
}
return ComplexPairTy(ResR, ResI);
}
ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
// ~(a+ib) = a + i*-b
ComplexPairTy Op = Visit(E->getSubExpr());
llvm::Value *ResI;
if (Op.second->getType()->isFloatingPointTy())
ResI = Builder.CreateFNeg(Op.second, "conj.i");
else
ResI = Builder.CreateNeg(Op.second, "conj.i");
return ComplexPairTy(Op.first, ResI);
}
ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
llvm::Value *ResR, *ResI;
if (Op.LHS.first->getType()->isFloatingPointTy()) {
ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
if (Op.LHS.second && Op.RHS.second)
ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
else
ResI = Op.LHS.second ? Op.LHS.second : Op.RHS.second;
assert(ResI && "Only one operand may be real!");
} else {
ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
assert(Op.LHS.second && Op.RHS.second &&
"Both operands of integer complex operators must be complex!");
ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
}
return ComplexPairTy(ResR, ResI);
}
ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
llvm::Value *ResR, *ResI;
if (Op.LHS.first->getType()->isFloatingPointTy()) {
ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
if (Op.LHS.second && Op.RHS.second)
ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
else
ResI = Op.LHS.second ? Op.LHS.second
: Builder.CreateFNeg(Op.RHS.second, "sub.i");
assert(ResI && "Only one operand may be real!");
} else {
ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
assert(Op.LHS.second && Op.RHS.second &&
"Both operands of integer complex operators must be complex!");
ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
}
return ComplexPairTy(ResR, ResI);
}
/// \brief Emit a libcall for a binary operation on complex types.
ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
const BinOpInfo &Op) {
CallArgList Args;
Args.add(RValue::get(Op.LHS.first),
Op.Ty->castAs<ComplexType>()->getElementType());
Args.add(RValue::get(Op.LHS.second),
Op.Ty->castAs<ComplexType>()->getElementType());
Args.add(RValue::get(Op.RHS.first),
Op.Ty->castAs<ComplexType>()->getElementType());
Args.add(RValue::get(Op.RHS.second),
Op.Ty->castAs<ComplexType>()->getElementType());
// We *must* use the full CG function call building logic here because the
// complex type has special ABI handling. We also should not forget about
// special calling convention which may be used for compiler builtins.
const CGFunctionInfo &FuncInfo =
CGF.CGM.getTypes().arrangeFreeFunctionCall(
Op.Ty, Args, FunctionType::ExtInfo(/* No CC here - will be added later */),
RequiredArgs::All);
llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
llvm::Instruction *Call;
RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
nullptr, &Call);
cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
cast<llvm::CallInst>(Call)->setDoesNotThrow();
return Res.getComplexVal();
}
/// \brief Lookup the libcall name for a given floating point type complex
/// multiply.
static StringRef getComplexMultiplyLibCallName(llvm::Type *Ty) {
switch (Ty->getTypeID()) {
default:
llvm_unreachable("Unsupported floating point type!");
case llvm::Type::HalfTyID:
return "__mulhc3";
case llvm::Type::FloatTyID:
return "__mulsc3";
case llvm::Type::DoubleTyID:
return "__muldc3";
case llvm::Type::PPC_FP128TyID:
return "__multc3";
case llvm::Type::X86_FP80TyID:
return "__mulxc3";
case llvm::Type::FP128TyID:
return "__multc3";
}
}
// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
// typed values.
ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
using llvm::Value;
Value *ResR, *ResI;
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
if (Op.LHS.first->getType()->isFloatingPointTy()) {
// The general formulation is:
// (a + ib) * (c + id) = (a * c - b * d) + i(a * d + b * c)
//
// But we can fold away components which would be zero due to a real
// operand according to C11 Annex G.5.1p2.
// FIXME: C11 also provides for imaginary types which would allow folding
// still more of this within the type system.
if (Op.LHS.second && Op.RHS.second) {
// If both operands are complex, emit the core math directly, and then
// test for NaNs. If we find NaNs in the result, we delegate to a libcall
// to carefully re-compute the correct infinity representation if
// possible. The expectation is that the presence of NaNs here is
// *extremely* rare, and so the cost of the libcall is almost irrelevant.
// This is good, because the libcall re-computes the core multiplication
// exactly the same as we do here and re-tests for NaNs in order to be
// a generic complex*complex libcall.
// First compute the four products.
Value *AC = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul_ac");
Value *BD = Builder.CreateFMul(Op.LHS.second, Op.RHS.second, "mul_bd");
Value *AD = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul_ad");
Value *BC = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul_bc");
// The real part is the difference of the first two, the imaginary part is
// the sum of the second.
ResR = Builder.CreateFSub(AC, BD, "mul_r");
ResI = Builder.CreateFAdd(AD, BC, "mul_i");
// Emit the test for the real part becoming NaN and create a branch to
// handle it. We test for NaN by comparing the number to itself.
Value *IsRNaN = Builder.CreateFCmpUNO(ResR, ResR, "isnan_cmp");
llvm::BasicBlock *ContBB = CGF.createBasicBlock("complex_mul_cont");
llvm::BasicBlock *INaNBB = CGF.createBasicBlock("complex_mul_imag_nan");
llvm::Instruction *Branch = Builder.CreateCondBr(IsRNaN, INaNBB, ContBB);
llvm::BasicBlock *OrigBB = Branch->getParent();
// Give hint that we very much don't expect to see NaNs.
// Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
llvm::MDNode *BrWeight = MDHelper.createBranchWeights(1, (1U << 20) - 1);
Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
// Now test the imaginary part and create its branch.
CGF.EmitBlock(INaNBB);
Value *IsINaN = Builder.CreateFCmpUNO(ResI, ResI, "isnan_cmp");
llvm::BasicBlock *LibCallBB = CGF.createBasicBlock("complex_mul_libcall");
Branch = Builder.CreateCondBr(IsINaN, LibCallBB, ContBB);
Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
// Now emit the libcall on this slowest of the slow paths.
CGF.EmitBlock(LibCallBB);
Value *LibCallR, *LibCallI;
std::tie(LibCallR, LibCallI) = EmitComplexBinOpLibCall(
getComplexMultiplyLibCallName(Op.LHS.first->getType()), Op);
Builder.CreateBr(ContBB);
// Finally continue execution by phi-ing together the different
// computation paths.
CGF.EmitBlock(ContBB);
llvm::PHINode *RealPHI = Builder.CreatePHI(ResR->getType(), 3, "real_mul_phi");
RealPHI->addIncoming(ResR, OrigBB);
RealPHI->addIncoming(ResR, INaNBB);
RealPHI->addIncoming(LibCallR, LibCallBB);
llvm::PHINode *ImagPHI = Builder.CreatePHI(ResI->getType(), 3, "imag_mul_phi");
ImagPHI->addIncoming(ResI, OrigBB);
ImagPHI->addIncoming(ResI, INaNBB);
ImagPHI->addIncoming(LibCallI, LibCallBB);
return ComplexPairTy(RealPHI, ImagPHI);
}
assert((Op.LHS.second || Op.RHS.second) &&
"At least one operand must be complex!");
// If either of the operands is a real rather than a complex, the
// imaginary component is ignored when computing the real component of the
// result.
ResR = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
ResI = Op.LHS.second
? Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il")
: Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
} else {
assert(Op.LHS.second && Op.RHS.second &&
"Both operands of integer complex operators must be complex!");
Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second, "mul.rr");
ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
}
return ComplexPairTy(ResR, ResI);
}
// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
// typed values.
ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
llvm::Value *DSTr, *DSTi;
if (LHSr->getType()->isFloatingPointTy()) {
// If we have a complex operand on the RHS, we delegate to a libcall to
// handle all of the complexities and minimize underflow/overflow cases.
//
// FIXME: We would be able to avoid the libcall in many places if we
// supported imaginary types in addition to complex types.
if (RHSi) {
BinOpInfo LibCallOp = Op;
// If LHS was a real, supply a null imaginary part.
if (!LHSi)
LibCallOp.LHS.second = llvm::Constant::getNullValue(LHSr->getType());
StringRef LibCallName;
switch (LHSr->getType()->getTypeID()) {
default:
llvm_unreachable("Unsupported floating point type!");
case llvm::Type::HalfTyID:
return EmitComplexBinOpLibCall("__divhc3", LibCallOp);
case llvm::Type::FloatTyID:
return EmitComplexBinOpLibCall("__divsc3", LibCallOp);
case llvm::Type::DoubleTyID:
return EmitComplexBinOpLibCall("__divdc3", LibCallOp);
case llvm::Type::PPC_FP128TyID:
return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
case llvm::Type::X86_FP80TyID:
return EmitComplexBinOpLibCall("__divxc3", LibCallOp);
case llvm::Type::FP128TyID:
return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
}
}
assert(LHSi && "Can have at most one non-complex operand!");
DSTr = Builder.CreateFDiv(LHSr, RHSr);
DSTi = Builder.CreateFDiv(LHSi, RHSr);
} else {
assert(Op.LHS.second && Op.RHS.second &&
"Both operands of integer complex operators must be complex!");
// (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2); // ac+bd
llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
if (Op.Ty->castAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
DSTr = Builder.CreateUDiv(Tmp3, Tmp6);
DSTi = Builder.CreateUDiv(Tmp9, Tmp6);
} else {
DSTr = Builder.CreateSDiv(Tmp3, Tmp6);
DSTi = Builder.CreateSDiv(Tmp9, Tmp6);
}
}
return ComplexPairTy(DSTr, DSTi);
}
ComplexExprEmitter::BinOpInfo
ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
BinOpInfo Ops;
if (E->getLHS()->getType()->isRealFloatingType())
Ops.LHS = ComplexPairTy(CGF.EmitScalarExpr(E->getLHS()), nullptr);
else
Ops.LHS = Visit(E->getLHS());
if (E->getRHS()->getType()->isRealFloatingType())
Ops.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
else
Ops.RHS = Visit(E->getRHS());
Ops.Ty = E->getType();
return Ops;
}
LValue ComplexExprEmitter::
EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
RValue &Val) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
QualType LHSTy = E->getLHS()->getType();
if (const AtomicType *AT = LHSTy->getAs<AtomicType>())
LHSTy = AT->getValueType();
BinOpInfo OpInfo;
// Load the RHS and LHS operands.
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little.
OpInfo.Ty = E->getComputationResultType();
QualType ComplexElementTy = cast<ComplexType>(OpInfo.Ty)->getElementType();
// The RHS should have been converted to the computation type.
if (E->getRHS()->getType()->isRealFloatingType()) {
assert(
CGF.getContext()
.hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType()));
OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
} else {
assert(CGF.getContext()
.hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType()));
OpInfo.RHS = Visit(E->getRHS());
}
LValue LHS = CGF.EmitLValue(E->getLHS());
// Load from the l-value and convert it.
if (LHSTy->isAnyComplexType()) {
ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, E->getExprLoc());
OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
} else {
llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
// For floating point real operands we can directly pass the scalar form
// to the binary operator emission and potentially get more efficient code.
if (LHSTy->isRealFloatingType()) {
if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy);
OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
} else {
OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
}
}
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
// Truncate the result and store it into the LHS lvalue.
if (LHSTy->isAnyComplexType()) {
ComplexPairTy ResVal = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
Val = RValue::getComplex(ResVal);
} else {
llvm::Value *ResVal =
CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy);
CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
Val = RValue::get(ResVal);
}
return LHS;
}
// Compound assignments.
ComplexPairTy ComplexExprEmitter::
EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
RValue Val;
LValue LV = EmitCompoundAssignLValue(E, Func, Val);
// The result of an assignment in C is the assigned r-value.
if (!CGF.getLangOpts().CPlusPlus)
return Val.getComplexVal();
// If the lvalue is non-volatile, return the computed value of the assignment.
if (!LV.isVolatileQualified())
return Val.getComplexVal();
return EmitLoadOfLValue(LV, E->getExprLoc());
}
LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
ComplexPairTy &Val) {
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType()) &&
"Invalid assignment");
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
// Emit the RHS. __block variables need the RHS evaluated first.
Val = Visit(E->getRHS());
// Compute the address to store into.
LValue LHS = CGF.EmitLValue(E->getLHS());
// Store the result value into the LHS lvalue.
EmitStoreOfComplex(Val, LHS, /*isInit*/ false);
return LHS;
}
ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
ComplexPairTy Val;
LValue LV = EmitBinAssignLValue(E, Val);
// The result of an assignment in C is the assigned r-value.
if (!CGF.getLangOpts().CPlusPlus)
return Val;
// If the lvalue is non-volatile, return the computed value of the assignment.
if (!LV.isVolatileQualified())
return Val;
return EmitLoadOfLValue(LV, E->getExprLoc());
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
CGF.EmitIgnoredExpr(E->getLHS());
return Visit(E->getRHS());
}
ComplexPairTy ComplexExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
// Bind the common expression if necessary.
CodeGenFunction::OpaqueValueMapping binding(CGF, E);
CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
CGF.getProfileCount(E));
eval.begin(CGF);
CGF.EmitBlock(LHSBlock);
CGF.incrementProfileCounter(E);
ComplexPairTy LHS = Visit(E->getTrueExpr());
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
eval.end(CGF);
eval.begin(CGF);
CGF.EmitBlock(RHSBlock);
ComplexPairTy RHS = Visit(E->getFalseExpr());
RHSBlock = Builder.GetInsertBlock();
CGF.EmitBlock(ContBlock);
eval.end(CGF);
// Create a PHI node for the real part.
llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.r");
RealPN->addIncoming(LHS.first, LHSBlock);
RealPN->addIncoming(RHS.first, RHSBlock);
// Create a PHI node for the imaginary part.
llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.i");
ImagPN->addIncoming(LHS.second, LHSBlock);
ImagPN->addIncoming(RHS.second, RHSBlock);
return ComplexPairTy(RealPN, ImagPN);
}
ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
return Visit(E->getChosenSubExpr());
}
ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
bool Ignore = TestAndClearIgnoreReal();
(void)Ignore;
assert (Ignore == false && "init list ignored");
Ignore = TestAndClearIgnoreImag();
(void)Ignore;
assert (Ignore == false && "init list ignored");
if (E->getNumInits() == 2) {
llvm::Value *Real = CGF.EmitScalarExpr(E->getInit(0));
llvm::Value *Imag = CGF.EmitScalarExpr(E->getInit(1));
return ComplexPairTy(Real, Imag);
} else if (E->getNumInits() == 1) {
return Visit(E->getInit(0));
}
// Empty init list intializes to null
assert(E->getNumInits() == 0 && "Unexpected number of inits");
QualType Ty = E->getType()->castAs<ComplexType>()->getElementType();
llvm::Type* LTy = CGF.ConvertType(Ty);
llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
return ComplexPairTy(zeroConstant, zeroConstant);
}
ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
if (!ArgPtr) {
CGF.ErrorUnsupported(E, "complex va_arg expression");
llvm::Type *EltTy =
CGF.ConvertType(E->getType()->castAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
}
return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(ArgPtr, E->getType()),
E->getExprLoc());
}
//===----------------------------------------------------------------------===//
// Entry Point into this File
//===----------------------------------------------------------------------===//
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, ignoring the result.
ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
bool IgnoreImag) {
assert(E && getComplexType(E->getType()) &&
"Invalid complex expression to emit");
return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag)
.Visit(const_cast<Expr *>(E));
}
void CodeGenFunction::EmitComplexExprIntoLValue(const Expr *E, LValue dest,
bool isInit) {
assert(E && getComplexType(E->getType()) &&
"Invalid complex expression to emit");
ComplexExprEmitter Emitter(*this);
ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
Emitter.EmitStoreOfComplex(Val, dest, isInit);
}
/// EmitStoreOfComplex - Store a complex number into the specified l-value.
void CodeGenFunction::EmitStoreOfComplex(ComplexPairTy V, LValue dest,
bool isInit) {
ComplexExprEmitter(*this).EmitStoreOfComplex(V, dest, isInit);
}
/// EmitLoadOfComplex - Load a complex number from the specified address.
ComplexPairTy CodeGenFunction::EmitLoadOfComplex(LValue src,
SourceLocation loc) {
return ComplexExprEmitter(*this).EmitLoadOfLValue(src, loc);
}
LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
assert(E->getOpcode() == BO_Assign);
ComplexPairTy Val; // ignored
return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
}
typedef ComplexPairTy (ComplexExprEmitter::*CompoundFunc)(
const ComplexExprEmitter::BinOpInfo &);
static CompoundFunc getComplexOp(BinaryOperatorKind Op) {
switch (Op) {
case BO_MulAssign: return &ComplexExprEmitter::EmitBinMul;
case BO_DivAssign: return &ComplexExprEmitter::EmitBinDiv;
case BO_SubAssign: return &ComplexExprEmitter::EmitBinSub;
case BO_AddAssign: return &ComplexExprEmitter::EmitBinAdd;
default:
llvm_unreachable("unexpected complex compound assignment");
}
}
LValue CodeGenFunction::
EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
CompoundFunc Op = getComplexOp(E->getOpcode());
RValue Val;
return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
}
LValue CodeGenFunction::
EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
llvm::Value *&Result) {
CompoundFunc Op = getComplexOp(E->getOpcode());
RValue Val;
LValue Ret = ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
Result = Val.getScalarVal();
return Ret;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGDeclCXX.cpp | //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with code generation of C++ declarations
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Path.h"
using namespace clang;
using namespace CodeGen;
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
ASTContext &Context = CGF.getContext();
CharUnits alignment = Context.getDeclAlign(&D);
QualType type = D.getType();
LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
const Expr *Init = D.getInit();
switch (CGF.getEvaluationKind(type)) {
case TEK_Scalar: {
CodeGenModule &CGM = CGF.CGM;
if (lv.isObjCStrong())
CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr, D.getTLSKind());
else if (lv.isObjCWeak())
CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr);
else
CGF.EmitScalarInit(Init, &D, lv, false);
return;
}
case TEK_Complex:
CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
return;
case TEK_Aggregate:
CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
return;
}
llvm_unreachable("bad evaluation kind");
}
/// Emit code to cause the destruction of the given variable with
/// static storage duration.
static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *addr) {
CodeGenModule &CGM = CGF.CGM;
// FIXME: __attribute__((cleanup)) ?
QualType type = D.getType();
QualType::DestructionKind dtorKind = type.isDestructedType();
switch (dtorKind) {
case QualType::DK_none:
return;
case QualType::DK_cxx_destructor:
break;
case QualType::DK_objc_strong_lifetime:
case QualType::DK_objc_weak_lifetime:
// We don't care about releasing objects during process teardown.
assert(!D.getTLSKind() && "should have rejected this");
return;
}
llvm::Constant *function;
llvm::Constant *argument;
// Special-case non-array C++ destructors, where there's a function
// with the right signature that we can just call.
const CXXRecordDecl *record = nullptr;
if (dtorKind == QualType::DK_cxx_destructor &&
(record = type->getAsCXXRecordDecl())) {
assert(!record->hasTrivialDestructor());
CXXDestructorDecl *dtor = record->getDestructor();
function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
argument = llvm::ConstantExpr::getBitCast(
addr, CGF.getTypes().ConvertType(type)->getPointerTo());
// Otherwise, the standard logic requires a helper function.
} else {
function = CodeGenFunction(CGM)
.generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind),
CGF.needsEHCleanup(dtorKind), &D);
argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
}
CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument);
}
/// Emit code to cause the variable at the given address to be considered as
/// constant from this point onwards.
static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *Addr) {
// Don't emit the intrinsic if we're not optimizing.
if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
return;
// HLSL Change Begins.
// Don't emit the intrinsic for hlsl.
// Enable this will require SROA_HLSL to support the intrinsic.
// Will do it later when support invariant marker in HLSL.
if (CGF.CGM.getLangOpts().HLSL)
return;
// HLSL Change Ends.
// Grab the llvm.invariant.start intrinsic.
llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
// Emit a call with the size in bytes of the object.
CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
uint64_t Width = WidthChars.getQuantity();
llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
CGF.Builder.CreateCall(InvariantStart, Args);
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
llvm::Constant *DeclPtr,
bool PerformInit) {
const Expr *Init = D.getInit();
QualType T = D.getType();
// The address space of a static local variable (DeclPtr) may be different
// from the address space of the "this" argument of the constructor. In that
// case, we need an addrspacecast before calling the constructor.
//
// struct StructWithCtor {
// __device__ StructWithCtor() {...}
// };
// __device__ void foo() {
// __shared__ StructWithCtor s;
// ...
// }
//
// For example, in the above CUDA code, the static local variable s has a
// "shared" address space qualifier, but the constructor of StructWithCtor
// expects "this" in the "generic" address space.
unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
if (ActualAddrSpace != ExpectedAddrSpace) {
llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
}
if (!T->isReferenceType()) {
if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
(void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
&D, DeclPtr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
PerformInit, this);
if (PerformInit)
EmitDeclInit(*this, D, DeclPtr);
if (CGM.isTypeConstant(D.getType(), true))
EmitDeclInvariant(*this, D, DeclPtr);
else
EmitDeclDestroy(*this, D, DeclPtr);
return;
}
assert(PerformInit && "cannot have constant initializer which needs "
"destruction for reference");
unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
RValue RV = EmitReferenceBindingToExpr(Init);
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
/// Create a stub function, suitable for being passed to atexit,
/// which passes the given address to the given destructor function.
llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
llvm::Constant *dtor,
llvm::Constant *addr) {
// Get the destructor function type, void(*)(void).
llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
SmallString<256> FnName;
{
llvm::raw_svector_ostream Out(FnName);
CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
}
llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(),
VD.getLocation());
CodeGenFunction CGF(CGM);
CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn,
CGM.getTypes().arrangeNullaryFunction(), FunctionArgList());
llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
// Make sure the call and the callee agree on calling convention.
if (llvm::Function *dtorFn =
dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
call->setCallingConv(dtorFn->getCallingConv());
CGF.FinishFunction();
return fn;
}
/// Register a global destructor using the C atexit runtime function.
void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::Constant *dtor,
llvm::Constant *addr) {
// Create a function which calls the destructor.
llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *atexitTy =
llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::Constant *atexit =
CGM.CreateRuntimeFunction(atexitTy, "atexit");
if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
atexitFn->setDoesNotThrow();
EmitNounwindRuntimeCall(atexit, dtorStub);
}
void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
bool PerformInit) {
// If we've been asked to forbid guard variables, emit an error now.
// This diagnostic is hard-coded for Darwin's use case; we can find
// better phrasing if someone else needs it.
if (CGM.getCodeGenOpts().ForbidGuardVariables)
CGM.Error(D.getLocation(),
"this initialization requires a guard variable, which "
"the kernel does not support");
CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
}
llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
llvm::FunctionType *FTy, const Twine &Name, SourceLocation Loc, bool TLS) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
Name, &getModule());
if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
if (const char *Section = getTarget().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
SetLLVMFunctionAttributes(nullptr, getTypes().arrangeNullaryFunction(), Fn);
Fn->setCallingConv(getRuntimeCC());
if (!getLangOpts().Exceptions)
Fn->setDoesNotThrow();
if (!isInSanitizerBlacklist(Fn, Loc)) {
if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
SanitizerKind::KernelAddress))
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
if (getLangOpts().Sanitize.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
if (getLangOpts().Sanitize.has(SanitizerKind::Memory))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack))
Fn->addFnAttr(llvm::Attribute::SafeStack);
}
return Fn;
}
/// Create a global pointer to a function that will initialize a global
/// variable. The user has requested that this pointer be emitted in a specific
/// section.
void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
llvm::GlobalVariable *GV,
llvm::Function *InitFunc,
InitSegAttr *ISA) {
llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
TheModule, InitFunc->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
PtrArray->setSection(ISA->getSection());
addUsedGlobal(PtrArray);
// If the GV is already in a comdat group, then we have to join it.
if (llvm::Comdat *C = GV->getComdat())
PtrArray->setComdat(C);
}
void
CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit) {
// Check if we've already initialized this decl.
auto I = DelayedCXXInitPosition.find(D);
if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
SmallString<256> FnName;
{
llvm::raw_svector_ostream Out(FnName);
getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
}
// Create a variable initialization function.
llvm::Function *Fn =
CreateGlobalInitOrDestructFunction(FTy, FnName.str(), D->getLocation());
auto *ISA = D->getAttr<InitSegAttr>();
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
PerformInit);
llvm::GlobalVariable *COMDATKey =
supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
if (D->getTLSKind()) {
// FIXME: Should we support init_priority for thread_local?
// FIXME: Ideally, initialization of instantiated thread_local static data
// members of class templates should not trigger initialization of other
// entities in the TU.
// FIXME: We only need to register one __cxa_thread_atexit function for the
// entire TU.
CXXThreadLocalInits.push_back(Fn);
CXXThreadLocalInitVars.push_back(Addr);
} else if (PerformInit && ISA) {
EmitPointerToInitFunc(D, Addr, Fn, ISA);
} else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
} else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) {
// C++ [basic.start.init]p2:
// Definitions of explicitly specialized class template static data
// members have ordered initialization. Other class template static data
// members (i.e., implicitly or explicitly instantiated specializations)
// have unordered initialization.
//
// As a consequence, we can put them into their own llvm.global_ctors entry.
//
// If the global is externally visible, put the initializer into a COMDAT
// group with the global being initialized. On most platforms, this is a
// minor startup time optimization. In the MS C++ ABI, there are no guard
// variables, so this COMDAT key is required for correctness.
AddGlobalCtor(Fn, 65535, COMDATKey);
} else if (D->hasAttr<SelectAnyAttr>()) {
// SelectAny globals will be comdat-folded. Put the initializer into a
// COMDAT group associated with the global, so the initializers get folded
// too.
AddGlobalCtor(Fn, 65535, COMDATKey);
} else {
I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
if (I == DelayedCXXInitPosition.end()) {
CXXGlobalInits.push_back(Fn);
} else if (I->second != ~0U) {
assert(I->second < CXXGlobalInits.size() &&
CXXGlobalInits[I->second] == nullptr);
CXXGlobalInits[I->second] = Fn;
}
}
// Remember that we already emitted the initializer for this global.
DelayedCXXInitPosition[D] = ~0U;
}
void CodeGenModule::EmitCXXThreadLocalInitFunc() {
getCXXABI().EmitThreadLocalInitFuncs(
*this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
CXXThreadLocalInits.clear();
CXXThreadLocalInitVars.clear();
CXXThreadLocals.clear();
}
void
CodeGenModule::EmitCXXGlobalInitFunc() {
while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
CXXGlobalInits.pop_back();
if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
PrioritizedCXXGlobalInits.end());
// Iterate over "chunks" of ctors with same priority and emit each chunk
// into separate function. Note - everything is sorted first by priority,
// second - by lex order, so we emit ctor functions in proper order.
for (SmallVectorImpl<GlobalInitData >::iterator
I = PrioritizedCXXGlobalInits.begin(),
E = PrioritizedCXXGlobalInits.end(); I != E; ) {
SmallVectorImpl<GlobalInitData >::iterator
PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
LocalCXXGlobalInits.clear();
unsigned Priority = I->first.priority;
// Compute the function suffix from priority. Prepend with zeroes to make
// sure the function names are also ordered as priorities.
std::string PrioritySuffix = llvm::utostr(Priority);
// Priority is always <= 65535 (enforced by sema).
PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
FTy, "_GLOBAL__I_" + PrioritySuffix);
for (; I < PrioE; ++I)
LocalCXXGlobalInits.push_back(I->second);
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
AddGlobalCtor(Fn, Priority);
}
PrioritizedCXXGlobalInits.clear();
}
SmallString<128> FileName;
SourceManager &SM = Context.getSourceManager();
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
// Include the filename in the symbol name. Including "sub_" matches gcc and
// makes sure these symbols appear lexicographically behind the symbols with
// priority emitted above.
FileName = llvm::sys::path::filename(MainFile->getName());
} else {
FileName = "<null>";
}
for (size_t i = 0; i < FileName.size(); ++i) {
// Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
// to be the set of C preprocessing numbers.
if (!isPreprocessingNumberBody(FileName[i]))
FileName[i] = '_';
}
llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
FTy, llvm::Twine("_GLOBAL__sub_I_", FileName));
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
CXXGlobalInits.clear();
}
void CodeGenModule::EmitCXXGlobalDtorFunc() {
if (CXXGlobalDtors.empty())
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global destructor function.
llvm::Function *Fn = CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a");
CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
}
/// Emit the code necessary to initialize the given global variable.
void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit) {
// Check if we need to emit debug info for variable initializer.
if (D->hasAttr<NoDebugAttr>())
DebugInfo = nullptr; // disable debug info indefinitely for this function
CurEHLocation = D->getLocStart();
StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), D->getLocation(),
D->getInit()->getExprLoc());
// Use guarded initialization if the global variable is weak. This
// occurs for, e.g., instantiated static data members and
// definitions explicitly marked weak.
if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage()) {
EmitCXXGuardedInit(*D, Addr, PerformInit);
} else {
EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
}
FinishFunction();
}
void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> Decls,
llvm::GlobalVariable *Guard) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(), FunctionArgList());
// Emit an artificial location for this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::BasicBlock *ExitBlock = nullptr;
if (Guard) {
// If we have a guard variable, check whether we've already performed
// these initializations. This happens for TLS initialization functions.
llvm::Value *GuardVal = Builder.CreateLoad(Guard);
llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
"guard.uninitialized");
// Mark as initialized before initializing anything else. If the
// initializers use previously-initialized thread_local vars, that's
// probably supposed to be OK, but the standard doesn't say.
Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
llvm::BasicBlock *InitBlock = createBasicBlock("init");
ExitBlock = createBasicBlock("exit");
Builder.CreateCondBr(Uninit, InitBlock, ExitBlock);
EmitBlock(InitBlock);
}
RunCleanupsScope Scope(*this);
// When building in Objective-C++ ARC mode, create an autorelease pool
// around the global initializers.
#if 0 // HLSL Change - no ObjC support
if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EmitObjCAutoreleasePoolCleanup(token);
}
#endif // HLSL Change - no ObjC support
for (unsigned i = 0, e = Decls.size(); i != e; ++i)
if (Decls[i])
EmitRuntimeCall(Decls[i]);
Scope.ForceCleanup();
if (ExitBlock) {
Builder.CreateBr(ExitBlock);
EmitBlock(ExitBlock);
}
}
FinishFunction();
}
void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
&DtorsAndObjects) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(), FunctionArgList());
// Emit an artificial location for this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
// Emit the dtors, in reverse order from construction.
for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
llvm::CallInst *CI = Builder.CreateCall(Callee,
DtorsAndObjects[e - i - 1].second);
// Make sure the call and the callee agree on calling convention.
if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
CI->setCallingConv(F->getCallingConv());
}
}
FinishFunction();
}
/// generateDestroyHelper - Generates a helper function which, when
/// invoked, destroys the given object.
llvm::Function *CodeGenFunction::generateDestroyHelper(
llvm::Constant *addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray, const VarDecl *VD) {
FunctionArgList args;
ImplicitParamDecl dst(getContext(), nullptr, SourceLocation(), nullptr,
getContext().VoidPtrTy);
args.push_back(&dst);
const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
getContext().VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
FTy, "__cxx_global_array_dtor", VD->getLocation());
CurEHLocation = VD->getLocStart();
StartFunction(VD, getContext().VoidTy, fn, FI, args);
emitDestroy(addr, type, destroyer, useEHCleanupForArray);
FinishFunction();
return fn;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CMakeLists.txt | set(HLSL_IGNORE_SOURCES CGObjCGNU.cpp CGObjCMac.cpp CGObjCRuntime.cpp CGOpenCLRuntime.cpp CGOpenMPRuntime.cpp)
set(LLVM_LINK_COMPONENTS
Analysis
BitReader
BitWriter
Core
DXIL
DxilRootSignature
DxcBindingTable
IPO
IRReader
InstCombine
# Instrumentation # HLSL Change
Linker
# MC # HLSL Change
# ObjCARCOpts # HLSL Change
# Object # HLSL Change
ProfileData
ScalarOpts
Support
Target
TransformUtils
)
# In a standard Clang+LLVM build, we need to generate intrinsics before
# building codegen. In a standalone build, LLVM is already built and we don't
# need this dependency. Furthermore, LLVM doesn't export it so we can't have
# this dependency.
set(codegen_deps intrinsics_gen)
if (CLANG_BUILT_STANDALONE)
set(codegen_deps)
endif()
add_clang_library(clangCodeGen
BackendUtil.cpp
CGAtomic.cpp
CGBlocks.cpp
CGBuiltin.cpp
CGCUDANV.cpp
CGCUDARuntime.cpp
CGCXX.cpp
CGCXXABI.cpp
CGCall.cpp
CGClass.cpp
CGCleanup.cpp
CGDebugInfo.cpp
CGDecl.cpp
CGDeclCXX.cpp
CGException.cpp
CGExpr.cpp
CGExprAgg.cpp
CGExprCXX.cpp
CGExprComplex.cpp
CGExprConstant.cpp
CGExprScalar.cpp
CGHLSLRuntime.cpp
CGHLSLMS.cpp
CGHLSLMSFinishCodeGen.cpp
CGHLSLRootSignature.cpp
CGLoopInfo.cpp
CGObjC.cpp
# CGObjCGNU.cpp # HLSL Change - no definitions used
# CGObjCMac.cpp # HLSL Change - no definitions used
# CGObjCRuntime.cpp # HLSL Change - no definitions used
# CGOpenCLRuntime.cpp # HLSL Change - no definitions used
# CGOpenMPRuntime.cpp # HLSL Change - no definitions used
CGRecordLayoutBuilder.cpp
CGStmt.cpp
CGStmtOpenMP.cpp
CGVTT.cpp
CGVTables.cpp
CodeGenABITypes.cpp
CodeGenAction.cpp
CodeGenFunction.cpp
CodeGenModule.cpp
CodeGenPGO.cpp
CodeGenTBAA.cpp
CodeGenTypes.cpp
CoverageMappingGen.cpp
ItaniumCXXABI.cpp
MicrosoftCXXABI.cpp
ModuleBuilder.cpp
ObjectFilePCHContainerOperations.cpp
SanitizerMetadata.cpp
TargetInfo.cpp
DEPENDS
${codegen_deps}
LINK_LIBS
clangAST
clangBasic
clangFrontend
clangLex
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGRecordLayout.h | //===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGRECORDLAYOUT_H
#define LLVM_CLANG_LIB_CODEGEN_CGRECORDLAYOUT_H
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/DerivedTypes.h"
namespace llvm {
class StructType;
}
namespace clang {
namespace CodeGen {
/// \brief Structure with information about how a bitfield should be accessed.
///
/// Often we layout a sequence of bitfields as a contiguous sequence of bits.
/// When the AST record layout does this, we represent it in the LLVM IR's type
/// as either a sequence of i8 members or a byte array to reserve the number of
/// bytes touched without forcing any particular alignment beyond the basic
/// character alignment.
///
/// Then accessing a particular bitfield involves converting this byte array
/// into a single integer of that size (i24 or i40 -- may not be power-of-two
/// size), loading it, and shifting and masking to extract the particular
/// subsequence of bits which make up that particular bitfield. This structure
/// encodes the information used to construct the extraction code sequences.
/// The CGRecordLayout also has a field index which encodes which byte-sequence
/// this bitfield falls within. Let's assume the following C struct:
///
/// struct S {
/// char a, b, c;
/// unsigned bits : 3;
/// unsigned more_bits : 4;
/// unsigned still_more_bits : 7;
/// };
///
/// This will end up as the following LLVM type. The first array is the
/// bitfield, and the second is the padding out to a 4-byte alignmnet.
///
/// %t = type { i8, i8, i8, i8, i8, [3 x i8] }
///
/// When generating code to access more_bits, we'll generate something
/// essentially like this:
///
/// define i32 @foo(%t* %base) {
/// %0 = gep %t* %base, i32 0, i32 3
/// %2 = load i8* %1
/// %3 = lshr i8 %2, 3
/// %4 = and i8 %3, 15
/// %5 = zext i8 %4 to i32
/// ret i32 %i
/// }
///
struct CGBitFieldInfo {
/// The offset within a contiguous run of bitfields that are represented as
/// a single "field" within the LLVM struct type. This offset is in bits.
unsigned Offset : 16;
/// The total size of the bit-field, in bits.
unsigned Size : 15;
/// Whether the bit-field is signed.
unsigned IsSigned : 1;
/// The storage size in bits which should be used when accessing this
/// bitfield.
unsigned StorageSize;
/// The offset of the bitfield storage from the start of the struct.
CharUnits StorageOffset;
CGBitFieldInfo()
: Offset(), Size(), IsSigned(), StorageSize(), StorageOffset() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
unsigned StorageSize, CharUnits StorageOffset)
: Offset(Offset), Size(Size), IsSigned(IsSigned),
StorageSize(StorageSize), StorageOffset(StorageOffset) {}
void print(raw_ostream &OS) const;
void dump() const;
/// \brief Given a bit-field decl, build an appropriate helper object for
/// accessing that field (which is expected to have the given offset and
/// size).
static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types,
const FieldDecl *FD,
uint64_t Offset, uint64_t Size,
uint64_t StorageSize,
CharUnits StorageOffset);
};
/// CGRecordLayout - This class handles struct and union layout info while
/// lowering AST types to LLVM types.
///
/// These layout objects are only created on demand as IR generation requires.
class CGRecordLayout {
friend class CodeGenTypes;
CGRecordLayout(const CGRecordLayout &) = delete;
void operator=(const CGRecordLayout &) = delete;
private:
/// The LLVM type corresponding to this record layout; used when
/// laying it out as a complete object.
llvm::StructType *CompleteObjectType;
/// The LLVM type for the non-virtual part of this record layout;
/// used when laying it out as a base subobject.
llvm::StructType *BaseSubobjectType;
/// Map from (non-bit-field) struct field to the corresponding llvm struct
/// type field no. This info is populated by record builder.
llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
/// Map from (bit-field) struct field to the corresponding llvm struct type
/// field no. This info is populated by record builder.
llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
// FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
// map for both virtual and non-virtual bases.
llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
/// Map from virtual bases to their field index in the complete object.
llvm::DenseMap<const CXXRecordDecl *, unsigned> CompleteObjectVirtualBases;
/// False if any direct or indirect subobject of this class, when
/// considered as a complete object, requires a non-zero bitpattern
/// when zero-initialized.
bool IsZeroInitializable : 1;
/// False if any direct or indirect subobject of this class, when
/// considered as a base subobject, requires a non-zero bitpattern
/// when zero-initialized.
bool IsZeroInitializableAsBase : 1;
public:
CGRecordLayout(llvm::StructType *CompleteObjectType,
llvm::StructType *BaseSubobjectType,
bool IsZeroInitializable,
bool IsZeroInitializableAsBase)
: CompleteObjectType(CompleteObjectType),
BaseSubobjectType(BaseSubobjectType),
IsZeroInitializable(IsZeroInitializable),
IsZeroInitializableAsBase(IsZeroInitializableAsBase) {}
/// \brief Return the "complete object" LLVM type associated with
/// this record.
llvm::StructType *getLLVMType() const {
return CompleteObjectType;
}
/// \brief Return the "base subobject" LLVM type associated with
/// this record.
llvm::StructType *getBaseSubobjectLLVMType() const {
return BaseSubobjectType;
}
/// \brief Check whether this struct can be C++ zero-initialized
/// with a zeroinitializer.
bool isZeroInitializable() const {
return IsZeroInitializable;
}
/// \brief Check whether this struct can be C++ zero-initialized
/// with a zeroinitializer when considered as a base subobject.
bool isZeroInitializableAsBase() const {
return IsZeroInitializableAsBase;
}
/// \brief Return llvm::StructType element number that corresponds to the
/// field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const {
FD = FD->getCanonicalDecl();
assert(FieldInfo.count(FD) && "Invalid field for record!");
return FieldInfo.lookup(FD);
}
unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
return NonVirtualBases.lookup(RD);
}
/// \brief Return the LLVM field index corresponding to the given
/// virtual base. Only valid when operating on the complete object.
unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const {
assert(CompleteObjectVirtualBases.count(base) && "Invalid virtual base!");
return CompleteObjectVirtualBases.lookup(base);
}
/// \brief Return the BitFieldInfo that corresponds to the field FD.
const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
FD = FD->getCanonicalDecl();
assert(FD->isBitField() && "Invalid call for non-bit-field decl!");
llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
it = BitFields.find(FD);
assert(it != BitFields.end() && "Unable to find bitfield info");
return it->second;
}
void print(raw_ostream &OS) const;
void dump() const;
};
} // end namespace CodeGen
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCleanup.h | //===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes support the generation of LLVM IR for cleanups.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
#define LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
#include "EHScopeStack.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
namespace llvm {
class BasicBlock;
class Value;
class ConstantInt;
class AllocaInst;
}
namespace clang {
namespace CodeGen {
/// A protected scope for zero-cost EH handling.
class EHScope {
llvm::BasicBlock *CachedLandingPad;
llvm::BasicBlock *CachedEHDispatchBlock;
EHScopeStack::stable_iterator EnclosingEHScope;
class CommonBitFields {
friend class EHScope;
unsigned Kind : 2;
};
enum { NumCommonBits = 2 };
protected:
class CatchBitFields {
friend class EHCatchScope;
unsigned : NumCommonBits;
unsigned NumHandlers : 32 - NumCommonBits;
};
class CleanupBitFields {
friend class EHCleanupScope;
unsigned : NumCommonBits;
/// Whether this cleanup needs to be run along normal edges.
unsigned IsNormalCleanup : 1;
/// Whether this cleanup needs to be run along exception edges.
unsigned IsEHCleanup : 1;
/// Whether this cleanup is currently active.
unsigned IsActive : 1;
/// Whether this cleanup is a lifetime marker
unsigned IsLifetimeMarker : 1;
/// Whether the normal cleanup should test the activation flag.
unsigned TestFlagInNormalCleanup : 1;
/// Whether the EH cleanup should test the activation flag.
unsigned TestFlagInEHCleanup : 1;
/// The amount of extra storage needed by the Cleanup.
/// Always a multiple of the scope-stack alignment.
unsigned CleanupSize : 12;
/// The number of fixups required by enclosing scopes (not including
/// this one). If this is the top cleanup scope, all the fixups
/// from this index onwards belong to this scope.
unsigned FixupDepth : 32 - 18 - NumCommonBits; // currently 13
};
class FilterBitFields {
friend class EHFilterScope;
unsigned : NumCommonBits;
unsigned NumFilters : 32 - NumCommonBits;
};
union {
CommonBitFields CommonBits;
CatchBitFields CatchBits;
CleanupBitFields CleanupBits;
FilterBitFields FilterBits;
};
public:
enum Kind { Cleanup, Catch, Terminate, Filter };
EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
: CachedLandingPad(nullptr), CachedEHDispatchBlock(nullptr),
EnclosingEHScope(enclosingEHScope) {
CommonBits.Kind = kind;
}
Kind getKind() const { return static_cast<Kind>(CommonBits.Kind); }
llvm::BasicBlock *getCachedLandingPad() const {
return CachedLandingPad;
}
void setCachedLandingPad(llvm::BasicBlock *block) {
CachedLandingPad = block;
}
llvm::BasicBlock *getCachedEHDispatchBlock() const {
return CachedEHDispatchBlock;
}
void setCachedEHDispatchBlock(llvm::BasicBlock *block) {
CachedEHDispatchBlock = block;
}
bool hasEHBranches() const {
if (llvm::BasicBlock *block = getCachedEHDispatchBlock())
return !block->use_empty();
return false;
}
EHScopeStack::stable_iterator getEnclosingEHScope() const {
return EnclosingEHScope;
}
};
/// A scope which attempts to handle some, possibly all, types of
/// exceptions.
///
/// Objective C \@finally blocks are represented using a cleanup scope
/// after the catch scope.
class EHCatchScope : public EHScope {
// In effect, we have a flexible array member
// Handler Handlers[0];
// But that's only standard in C99, not C++, so we have to do
// annoying pointer arithmetic instead.
public:
struct Handler {
/// A type info value, or null (C++ null, not an LLVM null pointer)
/// for a catch-all.
llvm::Constant *Type;
/// The catch handler for this type.
llvm::BasicBlock *Block;
bool isCatchAll() const { return Type == nullptr; }
};
private:
friend class EHScopeStack;
Handler *getHandlers() {
return reinterpret_cast<Handler*>(this+1);
}
const Handler *getHandlers() const {
return reinterpret_cast<const Handler*>(this+1);
}
public:
static size_t getSizeForNumHandlers(unsigned N) {
return sizeof(EHCatchScope) + N * sizeof(Handler);
}
EHCatchScope(unsigned numHandlers,
EHScopeStack::stable_iterator enclosingEHScope)
: EHScope(Catch, enclosingEHScope) {
CatchBits.NumHandlers = numHandlers;
}
unsigned getNumHandlers() const {
return CatchBits.NumHandlers;
}
void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
setHandler(I, /*catchall*/ nullptr, Block);
}
void setHandler(unsigned I, llvm::Constant *Type, llvm::BasicBlock *Block) {
assert(I < getNumHandlers());
getHandlers()[I].Type = Type;
getHandlers()[I].Block = Block;
}
const Handler &getHandler(unsigned I) const {
assert(I < getNumHandlers());
return getHandlers()[I];
}
// Clear all handler blocks.
// FIXME: it's better to always call clearHandlerBlocks in DTOR and have a
// 'takeHandler' or some such function which removes ownership from the
// EHCatchScope object if the handlers should live longer than EHCatchScope.
void clearHandlerBlocks() {
for (unsigned I = 0, N = getNumHandlers(); I != N; ++I)
delete getHandler(I).Block;
}
typedef const Handler *iterator;
iterator begin() const { return getHandlers(); }
iterator end() const { return getHandlers() + getNumHandlers(); }
static bool classof(const EHScope *Scope) {
return Scope->getKind() == Catch;
}
};
/// A cleanup scope which generates the cleanup blocks lazily.
class EHCleanupScope : public EHScope {
/// The nearest normal cleanup scope enclosing this one.
EHScopeStack::stable_iterator EnclosingNormal;
/// The nearest EH scope enclosing this one.
EHScopeStack::stable_iterator EnclosingEH;
/// The dual entry/exit block along the normal edge. This is lazily
/// created if needed before the cleanup is popped.
llvm::BasicBlock *NormalBlock;
/// An optional i1 variable indicating whether this cleanup has been
/// activated yet.
llvm::AllocaInst *ActiveFlag;
/// Extra information required for cleanups that have resolved
/// branches through them. This has to be allocated on the side
/// because everything on the cleanup stack has be trivially
/// movable.
struct ExtInfo {
/// The destinations of normal branch-afters and branch-throughs.
llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
/// Normal branch-afters.
SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
BranchAfters;
};
mutable struct ExtInfo *ExtInfo;
struct ExtInfo &getExtInfo() {
if (!ExtInfo) ExtInfo = new struct ExtInfo();
return *ExtInfo;
}
const struct ExtInfo &getExtInfo() const {
if (!ExtInfo) ExtInfo = new struct ExtInfo();
return *ExtInfo;
}
public:
/// Gets the size required for a lazy cleanup scope with the given
/// cleanup-data requirements.
static size_t getSizeForCleanupSize(size_t Size) {
return sizeof(EHCleanupScope) + Size;
}
size_t getAllocatedSize() const {
return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
}
EHCleanupScope(bool isNormal, bool isEH, bool isActive,
unsigned cleanupSize, unsigned fixupDepth,
EHScopeStack::stable_iterator enclosingNormal,
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH), EnclosingNormal(enclosingNormal),
NormalBlock(nullptr), ActiveFlag(nullptr), ExtInfo(nullptr) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
CleanupBits.IsActive = isActive;
CleanupBits.IsLifetimeMarker = false;
CleanupBits.TestFlagInNormalCleanup = false;
CleanupBits.TestFlagInEHCleanup = false;
CleanupBits.CleanupSize = cleanupSize;
CleanupBits.FixupDepth = fixupDepth;
assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow");
}
void Destroy() {
delete ExtInfo;
}
// Objects of EHCleanupScope are not destructed. Use Destroy().
~EHCleanupScope() = delete;
bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; }
llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
bool isEHCleanup() const { return CleanupBits.IsEHCleanup; }
bool isActive() const { return CleanupBits.IsActive; }
void setActive(bool A) { CleanupBits.IsActive = A; }
bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; }
void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; }
llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
void setTestFlagInNormalCleanup() {
CleanupBits.TestFlagInNormalCleanup = true;
}
bool shouldTestFlagInNormalCleanup() const {
return CleanupBits.TestFlagInNormalCleanup;
}
void setTestFlagInEHCleanup() {
CleanupBits.TestFlagInEHCleanup = true;
}
bool shouldTestFlagInEHCleanup() const {
return CleanupBits.TestFlagInEHCleanup;
}
unsigned getFixupDepth() const { return CleanupBits.FixupDepth; }
EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
return EnclosingNormal;
}
size_t getCleanupSize() const { return CleanupBits.CleanupSize; }
void *getCleanupBuffer() { return this + 1; }
EHScopeStack::Cleanup *getCleanup() {
return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
}
/// True if this cleanup scope has any branch-afters or branch-throughs.
bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
/// Add a branch-after to this cleanup scope. A branch-after is a
/// branch from a point protected by this (normal) cleanup to a
/// point in the normal cleanup scope immediately containing it.
/// For example,
/// for (;;) { A a; break; }
/// contains a branch-after.
///
/// Branch-afters each have their own destination out of the
/// cleanup, guaranteed distinct from anything else threaded through
/// it. Therefore branch-afters usually force a switch after the
/// cleanup.
void addBranchAfter(llvm::ConstantInt *Index,
llvm::BasicBlock *Block) {
struct ExtInfo &ExtInfo = getExtInfo();
if (ExtInfo.Branches.insert(Block).second)
ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
}
/// Return the number of unique branch-afters on this scope.
unsigned getNumBranchAfters() const {
return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
}
llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
assert(I < getNumBranchAfters());
return ExtInfo->BranchAfters[I].first;
}
llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
assert(I < getNumBranchAfters());
return ExtInfo->BranchAfters[I].second;
}
/// Add a branch-through to this cleanup scope. A branch-through is
/// a branch from a scope protected by this (normal) cleanup to an
/// enclosing scope other than the immediately-enclosing normal
/// cleanup scope.
///
/// In the following example, the branch through B's scope is a
/// branch-through, while the branch through A's scope is a
/// branch-after:
/// for (;;) { A a; B b; break; }
///
/// All branch-throughs have a common destination out of the
/// cleanup, one possibly shared with the fall-through. Therefore
/// branch-throughs usually don't force a switch after the cleanup.
///
/// \return true if the branch-through was new to this scope
bool addBranchThrough(llvm::BasicBlock *Block) {
return getExtInfo().Branches.insert(Block).second;
}
/// Determines if this cleanup scope has any branch throughs.
bool hasBranchThroughs() const {
if (!ExtInfo) return false;
return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
}
static bool classof(const EHScope *Scope) {
return (Scope->getKind() == Cleanup);
}
};
/// An exceptions scope which filters exceptions thrown through it.
/// Only exceptions matching the filter types will be permitted to be
/// thrown.
///
/// This is used to implement C++ exception specifications.
class EHFilterScope : public EHScope {
// Essentially ends in a flexible array member:
// llvm::Value *FilterTypes[0];
llvm::Value **getFilters() {
return reinterpret_cast<llvm::Value**>(this+1);
}
llvm::Value * const *getFilters() const {
return reinterpret_cast<llvm::Value* const *>(this+1);
}
public:
EHFilterScope(unsigned numFilters)
: EHScope(Filter, EHScopeStack::stable_end()) {
FilterBits.NumFilters = numFilters;
}
static size_t getSizeForNumFilters(unsigned numFilters) {
return sizeof(EHFilterScope) + numFilters * sizeof(llvm::Value*);
}
unsigned getNumFilters() const { return FilterBits.NumFilters; }
void setFilter(unsigned i, llvm::Value *filterValue) {
assert(i < getNumFilters());
getFilters()[i] = filterValue;
}
llvm::Value *getFilter(unsigned i) const {
assert(i < getNumFilters());
return getFilters()[i];
}
static bool classof(const EHScope *scope) {
return scope->getKind() == Filter;
}
};
/// An exceptions scope which calls std::terminate if any exception
/// reaches it.
class EHTerminateScope : public EHScope {
public:
EHTerminateScope(EHScopeStack::stable_iterator enclosingEHScope)
: EHScope(Terminate, enclosingEHScope) {}
static size_t getSize() { return sizeof(EHTerminateScope); }
static bool classof(const EHScope *scope) {
return scope->getKind() == Terminate;
}
};
/// A non-stable pointer into the scope stack.
class EHScopeStack::iterator {
char *Ptr;
friend class EHScopeStack;
explicit iterator(char *Ptr) : Ptr(Ptr) {}
public:
iterator() : Ptr(nullptr) {}
EHScope *get() const {
return reinterpret_cast<EHScope*>(Ptr);
}
EHScope *operator->() const { return get(); }
EHScope &operator*() const { return *get(); }
iterator &operator++() {
switch (get()->getKind()) {
case EHScope::Catch:
Ptr += EHCatchScope::getSizeForNumHandlers(
static_cast<const EHCatchScope*>(get())->getNumHandlers());
break;
case EHScope::Filter:
Ptr += EHFilterScope::getSizeForNumFilters(
static_cast<const EHFilterScope*>(get())->getNumFilters());
break;
case EHScope::Cleanup:
Ptr += static_cast<const EHCleanupScope*>(get())
->getAllocatedSize();
break;
case EHScope::Terminate:
Ptr += EHTerminateScope::getSize();
break;
}
return *this;
}
iterator next() {
iterator copy = *this;
++copy;
return copy;
}
iterator operator++(int) {
iterator copy = *this;
operator++();
return copy;
}
bool encloses(iterator other) const { return Ptr >= other.Ptr; }
bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
bool operator==(iterator other) const { return Ptr == other.Ptr; }
bool operator!=(iterator other) const { return Ptr != other.Ptr; }
};
inline EHScopeStack::iterator EHScopeStack::begin() const {
return iterator(StartOfData);
}
inline EHScopeStack::iterator EHScopeStack::end() const {
return iterator(EndOfBuffer);
}
inline void EHScopeStack::popCatch() {
assert(!empty() && "popping exception stack when not empty");
EHCatchScope &scope = cast<EHCatchScope>(*begin());
InnermostEHScope = scope.getEnclosingEHScope();
StartOfData += EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers());
}
inline void EHScopeStack::popTerminate() {
assert(!empty() && "popping exception stack when not empty");
EHTerminateScope &scope = cast<EHTerminateScope>(*begin());
InnermostEHScope = scope.getEnclosingEHScope();
StartOfData += EHTerminateScope::getSize();
}
inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
assert(sp.isValid() && "finding invalid savepoint");
assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
return iterator(EndOfBuffer - sp.Size);
}
inline EHScopeStack::stable_iterator
EHScopeStack::stabilize(iterator ir) const {
assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
return stable_iterator(EndOfBuffer - ir.Ptr);
}
}
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGOpenCLRuntime.h | //===----- CGOpenCLRuntime.h - Interface to OpenCL Runtimes -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for OpenCL code generation. Concrete
// subclasses of this implement code generation for specific OpenCL
// runtime libraries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
#include "clang/AST/Type.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
namespace clang {
class VarDecl;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
class CGOpenCLRuntime {
protected:
CodeGenModule &CGM;
public:
CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
virtual ~CGOpenCLRuntime();
/// Emit the IR required for a work-group-local variable declaration, and add
/// an entry to CGF's LocalDeclMap for D. The base class does this using
/// CodeGenFunction::EmitStaticVarDecl to emit an internal global for D.
virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
const VarDecl &D);
virtual llvm::Type *convertOpenCLSpecificType(const Type *T);
};
}
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGObjCRuntime.cpp | //==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This abstract class defines the interface for Objective-C runtime-specific
// code generation. It provides some concrete helper methods for functionality
// shared between all (or most) of the Objective-C runtimes supported by clang.
//
//===----------------------------------------------------------------------===//
#include "CGObjCRuntime.h"
#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/IR/CallSite.h"
using namespace clang;
using namespace CodeGen;
// HLSL Change Starts
// No ObjC codegen support, so simply skip all of this compilation.
// Here are enough stubs to link the current targets.
#if 0
// HLSL Change Ends
static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
const ObjCInterfaceDecl *OID,
const ObjCImplementationDecl *ID,
const ObjCIvarDecl *Ivar) {
const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
// FIXME: We should eliminate the need to have ObjCImplementationDecl passed
// in here; it should never be necessary because that should be the lexical
// decl context for the ivar.
// If we know have an implementation (and the ivar is in it) then
// look up in the implementation layout.
const ASTRecordLayout *RL;
if (ID && declaresSameEntity(ID->getClassInterface(), Container))
RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
else
RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
// Compute field index.
//
// FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
// implemented. This should be fixed to get the information from the layout
// directly.
unsigned Index = 0;
for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
if (Ivar == IVD)
break;
++Index;
}
assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
return RL->getFieldOffset(Index);
}
uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
const ObjCInterfaceDecl *OID,
const ObjCIvarDecl *Ivar) {
return LookupFieldBitOffset(CGM, OID, nullptr, Ivar) /
CGM.getContext().getCharWidth();
}
uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
const ObjCImplementationDecl *OID,
const ObjCIvarDecl *Ivar) {
return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) /
CGM.getContext().getCharWidth();
}
unsigned CGObjCRuntime::ComputeBitfieldBitOffset(
CodeGen::CodeGenModule &CGM,
const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar) {
return LookupFieldBitOffset(CGM, ID, ID->getImplementation(), Ivar);
}
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *OID,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
QualType IvarTy = Ivar->getType();
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
if (!Ivar->isBitField()) {
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
LV.getQuals().addCVRQualifiers(CVRQualifiers);
return LV;
}
// We need to compute an access strategy for this bit-field. We are given the
// offset to the first byte in the bit-field, the sub-byte offset is taken
// from the original layout. We reuse the normal bit-field access strategy by
// treating this as an access to a struct where the bit-field is in byte 0,
// and adjust the containing type size as appropriate.
//
// FIXME: Note that currently we make a very conservative estimate of the
// alignment of the bit-field, because (a) it is not clear what guarantees the
// runtime makes us, and (b) we don't have a way to specify that the struct is
// at an alignment plus offset.
//
// Note, there is a subtle invariant here: we can only call this routine on
// non-synthesized ivars but we may be called for synthesized ivars. However,
// a synthesized ivar can never be a bit-field, so this is safe.
uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, nullptr, Ivar);
uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign();
uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
CharUnits StorageSize =
CGF.CGM.getContext().toCharUnitsFromBits(
llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits));
CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits);
// Allocate a new CGBitFieldInfo object to describe this access.
//
// FIXME: This is incredibly wasteful, these should be uniqued or part of some
// layout object. However, this is blocked on other cleanups to the
// Objective-C code, so for now we just live with allocating a bunch of these
// objects.
CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
CGF.CGM.getContext().toBits(StorageSize),
CharUnits::fromQuantity(0)));
V = CGF.Builder.CreateBitCast(V,
llvm::Type::getIntNPtrTy(CGF.getLLVMContext(),
Info->StorageSize));
return LValue::MakeBitfield(V, *Info,
IvarTy.withCVRQualifiers(CVRQualifiers),
Alignment);
}
namespace {
struct CatchHandler {
const VarDecl *Variable;
const Stmt *Body;
llvm::BasicBlock *Block;
llvm::Constant *TypeInfo;
};
struct CallObjCEndCatch : EHScopeStack::Cleanup {
CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
MightThrow(MightThrow), Fn(Fn) {}
bool MightThrow;
llvm::Value *Fn;
void Emit(CodeGenFunction &CGF, Flags flags) override {
if (!MightThrow) {
CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
return;
}
CGF.EmitRuntimeCallOrInvoke(Fn);
}
};
}
void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
const ObjCAtTryStmt &S,
llvm::Constant *beginCatchFn,
llvm::Constant *endCatchFn,
llvm::Constant *exceptionRethrowFn) {
// Jump destination for falling out of catch bodies.
CodeGenFunction::JumpDest Cont;
if (S.getNumCatchStmts())
Cont = CGF.getJumpDestInCurrentScope("eh.cont");
CodeGenFunction::FinallyInfo FinallyInfo;
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
FinallyInfo.enter(CGF, Finally->getFinallyBody(),
beginCatchFn, endCatchFn, exceptionRethrowFn);
SmallVector<CatchHandler, 8> Handlers;
// Enter the catch, if there is one.
if (S.getNumCatchStmts()) {
for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
Handlers.push_back(CatchHandler());
CatchHandler &Handler = Handlers.back();
Handler.Variable = CatchDecl;
Handler.Body = CatchStmt->getCatchBody();
Handler.Block = CGF.createBasicBlock("catch");
// @catch(...) always matches.
if (!CatchDecl) {
Handler.TypeInfo = nullptr; // catch-all
// Don't consider any other catches.
break;
}
Handler.TypeInfo = GetEHType(CatchDecl->getType());
}
EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
}
// Emit the try body.
CGF.EmitStmt(S.getTryBody());
// Leave the try.
if (S.getNumCatchStmts())
CGF.popCatchScope();
// Remember where we were.
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
// Emit the handlers.
for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
CatchHandler &Handler = Handlers[I];
CGF.EmitBlock(Handler.Block);
llvm::Value *RawExn = CGF.getExceptionFromSlot();
// Enter the catch.
llvm::Value *Exn = RawExn;
if (beginCatchFn) {
Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
cast<llvm::CallInst>(Exn)->setDoesNotThrow();
}
CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
if (endCatchFn) {
// Add a cleanup to leave the catch.
bool EndCatchMightThrow = (Handler.Variable == nullptr);
CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
EndCatchMightThrow,
endCatchFn);
}
// Bind the catch parameter if it exists.
if (const VarDecl *CatchParam = Handler.Variable) {
llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
CGF.EmitAutoVarDecl(*CatchParam);
llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
CastExn = CGF.EmitARCRetainNonBlock(CastExn);
// fallthrough
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
CGF.Builder.CreateStore(CastExn, CatchParamAddr);
break;
case Qualifiers::OCL_Weak:
CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
break;
}
}
CGF.ObjCEHValueStack.push_back(Exn);
CGF.EmitStmt(Handler.Body);
CGF.ObjCEHValueStack.pop_back();
// Leave any cleanups associated with the catch.
cleanups.ForceCleanup();
CGF.EmitBranchThroughCleanup(Cont);
}
// Go back to the try-statement fallthrough.
CGF.Builder.restoreIP(SavedIP);
// Pop out of the finally.
if (S.getFinallyStmt())
FinallyInfo.exit(CGF);
if (Cont.isValid())
CGF.EmitBlock(Cont.getBlock());
}
namespace {
struct CallSyncExit : EHScopeStack::Cleanup {
llvm::Value *SyncExitFn;
llvm::Value *SyncArg;
CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
: SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
}
};
}
void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
const ObjCAtSynchronizedStmt &S,
llvm::Function *syncEnterFn,
llvm::Function *syncExitFn) {
CodeGenFunction::RunCleanupsScope cleanups(CGF);
// Evaluate the lock operand. This is guaranteed to dominate the
// ARC release and lock-release cleanups.
const Expr *lockExpr = S.getSynchExpr();
llvm::Value *lock;
if (CGF.getLangOpts().ObjCAutoRefCount) {
lock = CGF.EmitARCRetainScalarExpr(lockExpr);
lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
} else {
lock = CGF.EmitScalarExpr(lockExpr);
}
lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy);
// Acquire the lock.
CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow();
// Register an all-paths cleanup to release the lock.
CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock);
// Emit the body of the statement.
CGF.EmitStmt(S.getSynchBody());
}
/// Compute the pointer-to-function type to which a message send
/// should be casted in order to correctly call the given method
/// with the given arguments.
///
/// \param method - may be null
/// \param resultType - the result type to use if there's no method
/// \param callArgs - the actual arguments, including implicit ones
CGObjCRuntime::MessageSendInfo
CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
QualType resultType,
CallArgList &callArgs) {
// If there's a method, use information from that.
if (method) {
const CGFunctionInfo &signature =
CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
llvm::PointerType *signatureType =
CGM.getTypes().GetFunctionType(signature)->getPointerTo();
// If that's not variadic, there's no need to recompute the ABI
// arrangement.
if (!signature.isVariadic())
return MessageSendInfo(signature, signatureType);
// Otherwise, there is.
FunctionType::ExtInfo einfo = signature.getExtInfo();
const CGFunctionInfo &argsInfo =
CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs, einfo,
signature.getRequiredArgs());
return MessageSendInfo(argsInfo, signatureType);
}
// There's no method; just use a default CC.
const CGFunctionInfo &argsInfo =
CGM.getTypes().arrangeFreeFunctionCall(resultType, callArgs,
FunctionType::ExtInfo(),
RequiredArgs::All);
// Derive the signature to call from that.
llvm::PointerType *signatureType =
CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
return MessageSendInfo(argsInfo, signatureType);
}
#endif // HLSL Change
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGException.cpp | //===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with C++ exception related code generation.
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGObjCRuntime.h"
#include "TargetInfo.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace CodeGen;
// HLSL Change Starts
// No exeption codegen support, so simply skip all of this compilation.
// Here are enough stubs to link the current targets.
#if 1
// No GCC compat requirements, no notion of personality for HLSL.
void CodeGenModule::SimplifyPersonality() { }
// No exception support.
void CodeGenFunction::EmitStartEHSpec(const Decl *) { }
void CodeGenFunction::EmitEndEHSpec(const Decl *) { }
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *, bool) {
llvm_unreachable("HLSL does not generate throw expressions");
}
llvm::Constant *CodeGenModule::getTerminateFn() {
llvm_unreachable("HLSL does not terminate");
return nullptr;
}
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
llvm_unreachable("HLSL does not support exceptions");
return nullptr;
}
llvm::Value *CodeGenFunction::getSelectorFromSlot() {
llvm_unreachable("HLSL does not support exceptions");
return nullptr;
}
#else
// HLSL Change Ends
static llvm::Constant *getFreeExceptionFn(CodeGenModule &CGM) {
// void __cxa_free_exception(void *thrown_exception);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
}
static llvm::Constant *getUnexpectedFn(CodeGenModule &CGM) {
// void __cxa_call_unexpected(void *thrown_exception);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
return CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
llvm::Constant *CodeGenModule::getTerminateFn() {
// void __terminate();
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*IsVarArgs=*/false);
StringRef name;
// In C++, use std::terminate().
if (getLangOpts().CPlusPlus &&
getTarget().getCXXABI().isItaniumFamily()) {
name = "_ZSt9terminatev";
} else if (getLangOpts().CPlusPlus &&
getTarget().getCXXABI().isMicrosoft()) {
if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015))
name = "__std_terminate";
else
name = "\01?terminate@@YAXXZ";
} else if (getLangOpts().ObjC1 &&
getLangOpts().ObjCRuntime.hasTerminate())
name = "objc_terminate";
else
name = "abort";
return CreateRuntimeFunction(FTy, name);
}
static llvm::Constant *getCatchallRethrowFn(CodeGenModule &CGM,
StringRef Name) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
return CGM.CreateRuntimeFunction(FTy, Name);
}
namespace {
/// The exceptions personality for a function.
struct EHPersonality {
const char *PersonalityFn;
// If this is non-null, this personality requires a non-standard
// function for rethrowing an exception after a catchall cleanup.
// This function must have prototype void(void*).
const char *CatchallRethrowFn;
static const EHPersonality &get(CodeGenModule &CGM,
const FunctionDecl *FD);
static const EHPersonality &get(CodeGenFunction &CGF) {
return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(CGF.CurCodeDecl));
}
static const EHPersonality GNU_C;
static const EHPersonality GNU_C_SJLJ;
static const EHPersonality GNU_C_SEH;
static const EHPersonality GNU_ObjC;
static const EHPersonality GNUstep_ObjC;
static const EHPersonality GNU_ObjCXX;
static const EHPersonality NeXT_ObjC;
static const EHPersonality GNU_CPlusPlus;
static const EHPersonality GNU_CPlusPlus_SJLJ;
static const EHPersonality GNU_CPlusPlus_SEH;
static const EHPersonality MSVC_except_handler;
static const EHPersonality MSVC_C_specific_handler;
static const EHPersonality MSVC_CxxFrameHandler3;
};
}
const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", nullptr };
const EHPersonality
EHPersonality::GNU_C_SEH = { "__gcc_personality_seh0", nullptr };
const EHPersonality
EHPersonality::NeXT_ObjC = { "__objc_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", nullptr };
const EHPersonality
EHPersonality::GNU_CPlusPlus_SEH = { "__gxx_personality_seh0", nullptr };
const EHPersonality
EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
const EHPersonality
EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNUstep_ObjC = { "__gnustep_objc_personality_v0", nullptr };
const EHPersonality
EHPersonality::MSVC_except_handler = { "_except_handler3", nullptr };
const EHPersonality
EHPersonality::MSVC_C_specific_handler = { "__C_specific_handler", nullptr };
const EHPersonality
EHPersonality::MSVC_CxxFrameHandler3 = { "__CxxFrameHandler3", nullptr };
/// On Win64, use libgcc's SEH personality function. We fall back to dwarf on
/// other platforms, unless the user asked for SjLj exceptions.
static bool useLibGCCSEHPersonality(const llvm::Triple &T) {
return T.isOSWindows() && T.getArch() == llvm::Triple::x86_64;
}
static const EHPersonality &getCPersonality(const llvm::Triple &T,
const LangOptions &L) {
if (L.SjLjExceptions)
return EHPersonality::GNU_C_SJLJ;
else if (useLibGCCSEHPersonality(T))
return EHPersonality::GNU_C_SEH;
return EHPersonality::GNU_C;
}
static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
const LangOptions &L) {
switch (L.ObjCRuntime.getKind()) {
case ObjCRuntime::FragileMacOSX:
return getCPersonality(T, L);
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
return EHPersonality::NeXT_ObjC;
case ObjCRuntime::GNUstep:
if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
return EHPersonality::GNUstep_ObjC;
// fallthrough
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
return EHPersonality::GNU_ObjC;
}
llvm_unreachable("bad runtime kind");
}
static const EHPersonality &getCXXPersonality(const llvm::Triple &T,
const LangOptions &L) {
if (L.SjLjExceptions)
return EHPersonality::GNU_CPlusPlus_SJLJ;
else if (useLibGCCSEHPersonality(T))
return EHPersonality::GNU_CPlusPlus_SEH;
return EHPersonality::GNU_CPlusPlus;
}
/// Determines the personality function to use when both C++
/// and Objective-C exceptions are being caught.
static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
const LangOptions &L) {
switch (L.ObjCRuntime.getKind()) {
// The ObjC personality defers to the C++ personality for non-ObjC
// handlers. Unlike the C++ case, we use the same personality
// function on targets using (backend-driven) SJLJ EH.
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
return EHPersonality::NeXT_ObjC;
// In the fragile ABI, just use C++ exception handling and hope
// they're not doing crazy exception mixing.
case ObjCRuntime::FragileMacOSX:
return getCXXPersonality(T, L);
// The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the C++ personality just to avoid returning null.
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW: // XXX: this will change soon
return EHPersonality::GNU_ObjC;
case ObjCRuntime::GNUstep:
return EHPersonality::GNU_ObjCXX;
}
llvm_unreachable("bad runtime kind");
}
static const EHPersonality &getSEHPersonalityMSVC(const llvm::Triple &T) {
if (T.getArch() == llvm::Triple::x86)
return EHPersonality::MSVC_except_handler;
return EHPersonality::MSVC_C_specific_handler;
}
const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
const FunctionDecl *FD) {
const llvm::Triple &T = CGM.getTarget().getTriple();
const LangOptions &L = CGM.getLangOpts();
// Try to pick a personality function that is compatible with MSVC if we're
// not compiling Obj-C. Obj-C users better have an Obj-C runtime that supports
// the GCC-style personality function.
if (T.isWindowsMSVCEnvironment() && !L.ObjC1) {
if (L.SjLjExceptions)
return EHPersonality::GNU_CPlusPlus_SJLJ;
else if (FD && FD->usesSEHTry())
return getSEHPersonalityMSVC(T);
else
return EHPersonality::MSVC_CxxFrameHandler3;
}
if (L.CPlusPlus && L.ObjC1)
return getObjCXXPersonality(T, L);
else if (L.CPlusPlus)
return getCXXPersonality(T, L);
else if (L.ObjC1)
return getObjCPersonality(T, L);
else
return getCPersonality(T, L);
}
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::Constant *Fn =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
Personality.PersonalityFn);
return Fn;
}
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
}
/// Check whether a personality function could reasonably be swapped
/// for a C++ personality function.
static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
for (llvm::User *U : Fn->users()) {
// Conditionally white-list bitcasts.
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(U)) {
if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
if (!PersonalityHasOnlyCXXUses(CE))
return false;
continue;
}
// Otherwise, it has to be a landingpad instruction.
llvm::LandingPadInst *LPI = dyn_cast<llvm::LandingPadInst>(U);
if (!LPI) return false;
for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
// Look for something that would've been returned by the ObjC
// runtime's GetEHType() method.
llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
if (LPI->isCatch(I)) {
// Check if the catch value has the ObjC prefix.
if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
// ObjC EH selector entries are always global variables with
// names starting like this.
if (GV->getName().startswith("OBJC_EHTYPE"))
return false;
} else {
// Check if any of the filter values have the ObjC prefix.
llvm::Constant *CVal = cast<llvm::Constant>(Val);
for (llvm::User::op_iterator
II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
if (llvm::GlobalVariable *GV =
cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
// ObjC EH selector entries are always global variables with
// names starting like this.
if (GV->getName().startswith("OBJC_EHTYPE"))
return false;
}
}
}
}
return true;
}
/// Try to use the C++ personality function in ObjC++. Not doing this
/// can cause some incompatibilities with gcc, which is more
/// aggressive about only using the ObjC++ personality in a function
/// when it really needs it.
void CodeGenModule::SimplifyPersonality() {
// If we're not in ObjC++ -fexceptions, there's nothing to do.
if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
return;
// Both the problem this endeavors to fix and the way the logic
// above works is specific to the NeXT runtime.
if (!LangOpts.ObjCRuntime.isNeXTFamily())
return;
const EHPersonality &ObjCXX = EHPersonality::get(*this, /*FD=*/nullptr);
const EHPersonality &CXX =
getCXXPersonality(getTarget().getTriple(), LangOpts);
if (&ObjCXX == &CXX)
return;
assert(std::strcmp(ObjCXX.PersonalityFn, CXX.PersonalityFn) != 0 &&
"Different EHPersonalities using the same personality function.");
llvm::Function *Fn = getModule().getFunction(ObjCXX.PersonalityFn);
// Nothing to do if it's unused.
if (!Fn || Fn->use_empty()) return;
// Can't do the optimization if it has non-C++ uses.
if (!PersonalityHasOnlyCXXUses(Fn)) return;
// Create the C++ personality function and kill off the old
// function.
llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
// This can happen if the user is screwing with us.
if (Fn->getType() != CXXFn->getType()) return;
Fn->replaceAllUsesWith(CXXFn);
Fn->eraseFromParent();
}
/// Returns the value to inject into a selector to indicate the
/// presence of a catch-all.
static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
// Possibly we should use @llvm.eh.catch.all.value here.
return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
}
namespace {
/// A cleanup to free the exception object if its initialization
/// throws.
struct FreeException : EHScopeStack::Cleanup {
llvm::Value *exn;
FreeException(llvm::Value *exn) : exn(exn) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitNounwindRuntimeCall(getFreeExceptionFn(CGF.CGM), exn);
}
};
}
// Emits an exception expression into the given location. This
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
// call is required, an exception within that copy ctor causes
// std::terminate to be invoked.
void CodeGenFunction::EmitAnyExprToExn(const Expr *e, llvm::Value *addr) {
// Make sure the exception object is cleaned up if there's an
// exception during initialization.
pushFullExprCleanup<FreeException>(EHCleanup, addr);
EHScopeStack::stable_iterator cleanup = EHStack.stable_begin();
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
llvm::Value *typedAddr = Builder.CreateBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
// must call std::terminate() if that constructor throws, because
// technically that copy occurs after the exception expression is
// evaluated but before the exception is caught. But the best way
// to handle that is to teach EmitAggExpr to do the final copy
// differently if it can't be elided.
EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
/*IsInit*/ true);
// Deactivate the cleanup block.
DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
}
llvm::Value *CodeGenFunction::getExceptionSlot() {
if (!ExceptionSlot)
ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
return ExceptionSlot;
}
llvm::Value *CodeGenFunction::getEHSelectorSlot() {
if (!EHSelectorSlot)
EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
return EHSelectorSlot;
}
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
return Builder.CreateLoad(getExceptionSlot(), "exn");
}
llvm::Value *CodeGenFunction::getSelectorFromSlot() {
return Builder.CreateLoad(getEHSelectorSlot(), "sel");
}
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
bool KeepInsertionPoint) {
if (const Expr *SubExpr = E->getSubExpr()) {
QualType ThrowType = SubExpr->getType();
if (ThrowType->isObjCObjectPointerType()) {
const Stmt *ThrowStmt = E->getSubExpr();
const ObjCAtThrowStmt S(E->getExprLoc(), const_cast<Stmt *>(ThrowStmt));
CGM.getObjCRuntime().EmitThrowStmt(*this, S, false);
} else {
CGM.getCXXABI().emitThrow(*this, E);
}
} else {
CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
}
// throw is an expression, and the expression emitters expect us
// to leave ourselves at a valid insertion point.
if (KeepInsertionPoint)
EmitBlock(createBasicBlock("throw.cont"));
}
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
if (!CGM.getLangOpts().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) {
// Check if CapturedDecl is nothrow and create terminate scope for it.
if (const CapturedDecl* CD = dyn_cast_or_null<CapturedDecl>(D)) {
if (CD->isNothrow())
EHStack.pushTerminate();
}
return;
}
const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
if (!Proto)
return;
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
if (isNoexceptExceptionSpec(EST)) {
if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
// noexcept functions are simple terminate scopes.
EHStack.pushTerminate();
}
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
return;
unsigned NumExceptions = Proto->getNumExceptions();
EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
for (unsigned I = 0; I != NumExceptions; ++I) {
QualType Ty = Proto->getExceptionType(I);
QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
/*ForEH=*/true);
Filter->setFilter(I, EHType);
}
}
}
/// Emit the dispatch block for a filter scope if necessary.
static void emitFilterDispatchBlock(CodeGenFunction &CGF,
EHFilterScope &filterScope) {
llvm::BasicBlock *dispatchBlock = filterScope.getCachedEHDispatchBlock();
if (!dispatchBlock) return;
if (dispatchBlock->use_empty()) {
delete dispatchBlock;
return;
}
CGF.EmitBlockAfterUses(dispatchBlock);
// If this isn't a catch-all filter, we need to check whether we got
// here because the filter triggered.
if (filterScope.getNumFilters()) {
// Load the selector value.
llvm::Value *selector = CGF.getSelectorFromSlot();
llvm::BasicBlock *unexpectedBB = CGF.createBasicBlock("ehspec.unexpected");
llvm::Value *zero = CGF.Builder.getInt32(0);
llvm::Value *failsFilter =
CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
CGF.Builder.CreateCondBr(failsFilter, unexpectedBB,
CGF.getEHResumeBlock(false));
CGF.EmitBlock(unexpectedBB);
}
// Call __cxa_call_unexpected. This doesn't need to be an invoke
// because __cxa_call_unexpected magically filters exceptions
// according to the last landing pad the exception was thrown
// into. Seriously.
llvm::Value *exn = CGF.getExceptionFromSlot();
CGF.EmitRuntimeCall(getUnexpectedFn(CGF.CGM), exn)
->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
if (!CGM.getLangOpts().CXXExceptions)
return;
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD) {
// Check if CapturedDecl is nothrow and pop terminate scope for it.
if (const CapturedDecl* CD = dyn_cast_or_null<CapturedDecl>(D)) {
if (CD->isNothrow())
EHStack.popTerminate();
}
return;
}
const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
if (!Proto)
return;
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
if (isNoexceptExceptionSpec(EST)) {
if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
EHStack.popTerminate();
}
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
return;
EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
emitFilterDispatchBlock(*this, filterScope);
EHStack.popFilter();
}
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
EnterCXXTryStmt(S);
EmitStmt(S.getTryBlock());
ExitCXXTryStmt(S);
}
void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
unsigned NumHandlers = S.getNumHandlers();
EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
for (unsigned I = 0; I != NumHandlers; ++I) {
const CXXCatchStmt *C = S.getHandler(I);
llvm::BasicBlock *Handler = createBasicBlock("catch");
if (C->getExceptionDecl()) {
// FIXME: Dropping the reference type on the type into makes it
// impossible to correctly implement catch-by-reference
// semantics for pointers. Unfortunately, this is what all
// existing compilers do, and it's not clear that the standard
// personality routine is capable of doing this right. See C++ DR 388:
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
Qualifiers CaughtTypeQuals;
QualType CaughtType = CGM.getContext().getUnqualifiedArrayType(
C->getCaughtType().getNonReferenceType(), CaughtTypeQuals);
llvm::Constant *TypeInfo = nullptr;
if (CaughtType->isObjCObjectPointerType())
TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
else
TypeInfo =
CGM.getAddrOfCXXCatchHandlerType(CaughtType, C->getCaughtType());
CatchScope->setHandler(I, TypeInfo, Handler);
} else {
// No exception decl indicates '...', a catch-all.
CatchScope->setCatchAllHandler(I, Handler);
}
}
}
llvm::BasicBlock *
CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
// The dispatch block for the end of the scope chain is a block that
// just resumes unwinding.
if (si == EHStack.stable_end())
return getEHResumeBlock(true);
// Otherwise, we should look at the actual scope.
EHScope &scope = *EHStack.find(si);
llvm::BasicBlock *dispatchBlock = scope.getCachedEHDispatchBlock();
if (!dispatchBlock) {
switch (scope.getKind()) {
case EHScope::Catch: {
// Apply a special case to a single catch-all.
EHCatchScope &catchScope = cast<EHCatchScope>(scope);
if (catchScope.getNumHandlers() == 1 &&
catchScope.getHandler(0).isCatchAll()) {
dispatchBlock = catchScope.getHandler(0).Block;
// Otherwise, make a dispatch block.
} else {
dispatchBlock = createBasicBlock("catch.dispatch");
}
break;
}
case EHScope::Cleanup:
dispatchBlock = createBasicBlock("ehcleanup");
break;
case EHScope::Filter:
dispatchBlock = createBasicBlock("filter.dispatch");
break;
case EHScope::Terminate:
dispatchBlock = getTerminateHandler();
break;
}
scope.setCachedEHDispatchBlock(dispatchBlock);
}
return dispatchBlock;
}
/// Check whether this is a non-EH scope, i.e. a scope which doesn't
/// affect exception handling. Currently, the only non-EH scopes are
/// normal-only cleanup scopes.
static bool isNonEHScope(const EHScope &S) {
switch (S.getKind()) {
case EHScope::Cleanup:
return !cast<EHCleanupScope>(S).isEHCleanup();
case EHScope::Filter:
case EHScope::Catch:
case EHScope::Terminate:
return false;
}
llvm_unreachable("Invalid EHScope Kind!");
}
llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
assert(EHStack.requiresLandingPad());
assert(!EHStack.empty());
// If exceptions are disabled, there are usually no landingpads. However, when
// SEH is enabled, functions using SEH still get landingpads.
const LangOptions &LO = CGM.getLangOpts();
if (!LO.Exceptions) {
if (!LO.Borland && !LO.MicrosoftExt)
return nullptr;
if (!currentFunctionUsesSEHTry())
return nullptr;
}
// Check the innermost scope for a cached landing pad. If this is
// a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
if (LP) return LP;
// Build the landing pad for this scope.
LP = EmitLandingPad();
assert(LP);
// Cache the landing pad on the innermost scope. If this is a
// non-EH scope, cache the landing pad on the enclosing scope, too.
for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
ir->setCachedLandingPad(LP);
if (!isNonEHScope(*ir)) break;
}
return LP;
}
llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
assert(EHStack.requiresLandingPad());
EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
switch (innermostEHScope.getKind()) {
case EHScope::Terminate:
return getTerminateLandingPad();
case EHScope::Catch:
case EHScope::Cleanup:
case EHScope::Filter:
if (llvm::BasicBlock *lpad = innermostEHScope.getCachedLandingPad())
return lpad;
}
// Save the current IR generation state.
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, CurEHLocation);
const EHPersonality &personality = EHPersonality::get(*this);
if (!CurFn->hasPersonalityFn())
CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, personality));
// Create and configure the landing pad.
llvm::BasicBlock *lpad = createBasicBlock("lpad");
EmitBlock(lpad);
llvm::LandingPadInst *LPadInst = Builder.CreateLandingPad(
llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr), 0);
llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
Builder.CreateStore(LPadExn, getExceptionSlot());
llvm::Value *LPadSel = Builder.CreateExtractValue(LPadInst, 1);
Builder.CreateStore(LPadSel, getEHSelectorSlot());
// Save the exception pointer. It's safe to use a single exception
// pointer per function because EH cleanups can never have nested
// try/catches.
// Build the landingpad instruction.
// Accumulate all the handlers in scope.
bool hasCatchAll = false;
bool hasCleanup = false;
bool hasFilter = false;
SmallVector<llvm::Value*, 4> filterTypes;
llvm::SmallPtrSet<llvm::Value*, 4> catchTypes;
for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); I != E;
++I) {
switch (I->getKind()) {
case EHScope::Cleanup:
// If we have a cleanup, remember that.
hasCleanup = (hasCleanup || cast<EHCleanupScope>(*I).isEHCleanup());
continue;
case EHScope::Filter: {
assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
assert(!hasCatchAll && "EH filter reached after catch-all");
// Filter scopes get added to the landingpad in weird ways.
EHFilterScope &filter = cast<EHFilterScope>(*I);
hasFilter = true;
// Add all the filter values.
for (unsigned i = 0, e = filter.getNumFilters(); i != e; ++i)
filterTypes.push_back(filter.getFilter(i));
goto done;
}
case EHScope::Terminate:
// Terminate scopes are basically catch-alls.
assert(!hasCatchAll);
hasCatchAll = true;
goto done;
case EHScope::Catch:
break;
}
EHCatchScope &catchScope = cast<EHCatchScope>(*I);
for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) {
EHCatchScope::Handler handler = catchScope.getHandler(hi);
// If this is a catch-all, register that and abort.
if (!handler.Type) {
assert(!hasCatchAll);
hasCatchAll = true;
goto done;
}
// Check whether we already have a handler for this type.
if (catchTypes.insert(handler.Type).second)
// If not, add it directly to the landingpad.
LPadInst->addClause(handler.Type);
}
}
done:
// If we have a catch-all, add null to the landingpad.
assert(!(hasCatchAll && hasFilter));
if (hasCatchAll) {
LPadInst->addClause(getCatchAllValue(*this));
// If we have an EH filter, we need to add those handlers in the
// right place in the landingpad, which is to say, at the end.
} else if (hasFilter) {
// Create a filter expression: a constant array indicating which filter
// types there are. The personality routine only lands here if the filter
// doesn't match.
SmallVector<llvm::Constant*, 8> Filters;
llvm::ArrayType *AType =
llvm::ArrayType::get(!filterTypes.empty() ?
filterTypes[0]->getType() : Int8PtrTy,
filterTypes.size());
for (unsigned i = 0, e = filterTypes.size(); i != e; ++i)
Filters.push_back(cast<llvm::Constant>(filterTypes[i]));
llvm::Constant *FilterArray = llvm::ConstantArray::get(AType, Filters);
LPadInst->addClause(FilterArray);
// Also check whether we need a cleanup.
if (hasCleanup)
LPadInst->setCleanup(true);
// Otherwise, signal that we at least have cleanups.
} else if (hasCleanup) {
LPadInst->setCleanup(true);
}
assert((LPadInst->getNumClauses() > 0 || LPadInst->isCleanup()) &&
"landingpad instruction has no clauses!");
// Tell the backend how to generate the landing pad.
Builder.CreateBr(getEHDispatchBlock(EHStack.getInnermostEHScope()));
// Restore the old IR generation state.
Builder.restoreIP(savedIP);
return lpad;
}
/// Emit the structure of the dispatch block for the given catch scope.
/// It is an invariant that the dispatch block already exists.
static void emitCatchDispatchBlock(CodeGenFunction &CGF,
EHCatchScope &catchScope) {
llvm::BasicBlock *dispatchBlock = catchScope.getCachedEHDispatchBlock();
assert(dispatchBlock);
// If there's only a single catch-all, getEHDispatchBlock returned
// that catch-all as the dispatch block.
if (catchScope.getNumHandlers() == 1 &&
catchScope.getHandler(0).isCatchAll()) {
assert(dispatchBlock == catchScope.getHandler(0).Block);
return;
}
CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveIP();
CGF.EmitBlockAfterUses(dispatchBlock);
// Select the right handler.
llvm::Value *llvm_eh_typeid_for =
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
// Load the selector value.
llvm::Value *selector = CGF.getSelectorFromSlot();
// Test against each of the exception types we claim to catch.
for (unsigned i = 0, e = catchScope.getNumHandlers(); ; ++i) {
assert(i < e && "ran off end of handlers!");
const EHCatchScope::Handler &handler = catchScope.getHandler(i);
llvm::Value *typeValue = handler.Type;
assert(typeValue && "fell into catch-all case!");
typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
// Figure out the next block.
bool nextIsEnd;
llvm::BasicBlock *nextBlock;
// If this is the last handler, we're at the end, and the next
// block is the block for the enclosing EH scope.
if (i + 1 == e) {
nextBlock = CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope());
nextIsEnd = true;
// If the next handler is a catch-all, we're at the end, and the
// next block is that handler.
} else if (catchScope.getHandler(i+1).isCatchAll()) {
nextBlock = catchScope.getHandler(i+1).Block;
nextIsEnd = true;
// Otherwise, we're not at the end and we need a new block.
} else {
nextBlock = CGF.createBasicBlock("catch.fallthrough");
nextIsEnd = false;
}
// Figure out the catch type's index in the LSDA's type table.
llvm::CallInst *typeIndex =
CGF.Builder.CreateCall(llvm_eh_typeid_for, typeValue);
typeIndex->setDoesNotThrow();
llvm::Value *matchesTypeIndex =
CGF.Builder.CreateICmpEQ(selector, typeIndex, "matches");
CGF.Builder.CreateCondBr(matchesTypeIndex, handler.Block, nextBlock);
// If the next handler is a catch-all, we're completely done.
if (nextIsEnd) {
CGF.Builder.restoreIP(savedIP);
return;
}
// Otherwise we need to emit and continue at that block.
CGF.EmitBlock(nextBlock);
}
}
void CodeGenFunction::popCatchScope() {
EHCatchScope &catchScope = cast<EHCatchScope>(*EHStack.begin());
if (catchScope.hasEHBranches())
emitCatchDispatchBlock(*this, catchScope);
EHStack.popCatch();
}
void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
unsigned NumHandlers = S.getNumHandlers();
EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
assert(CatchScope.getNumHandlers() == NumHandlers);
// If the catch was not required, bail out now.
if (!CatchScope.hasEHBranches()) {
CatchScope.clearHandlerBlocks();
EHStack.popCatch();
return;
}
// Emit the structure of the EH dispatch for this catch.
emitCatchDispatchBlock(*this, CatchScope);
// Copy the handler blocks off before we pop the EH stack. Emitting
// the handlers might scribble on this memory.
SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
memcpy(Handlers.data(), CatchScope.begin(),
NumHandlers * sizeof(EHCatchScope::Handler));
EHStack.popCatch();
// The fall-through block.
llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
// We just emitted the body of the try; jump to the continue block.
if (HaveInsertPoint())
Builder.CreateBr(ContBB);
// Determine if we need an implicit rethrow for all these catch handlers;
// see the comment below.
bool doImplicitRethrow = false;
if (IsFnTryBlock)
doImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
isa<CXXConstructorDecl>(CurCodeDecl);
// Perversely, we emit the handlers backwards precisely because we
// want them to appear in source order. In all of these cases, the
// catch block will have exactly one predecessor, which will be a
// particular block in the catch dispatch. However, in the case of
// a catch-all, one of the dispatch blocks will branch to two
// different handlers, and EmitBlockAfterUses will cause the second
// handler to be moved before the first.
for (unsigned I = NumHandlers; I != 0; --I) {
llvm::BasicBlock *CatchBlock = Handlers[I-1].Block;
EmitBlockAfterUses(CatchBlock);
// Catch the exception if this isn't a catch-all.
const CXXCatchStmt *C = S.getHandler(I-1);
// Enter a cleanup scope, including the catch variable and the
// end-catch.
RunCleanupsScope CatchScope(*this);
// Initialize the catch variable and set up the cleanups.
CGM.getCXXABI().emitBeginCatch(*this, C);
// Emit the PGO counter increment.
incrementProfileCounter(C);
// Perform the body of the catch.
EmitStmt(C->getHandlerBlock());
// [except.handle]p11:
// The currently handled exception is rethrown if control
// reaches the end of a handler of the function-try-block of a
// constructor or destructor.
// It is important that we only do this on fallthrough and not on
// return. Note that it's illegal to put a return in a
// constructor function-try-block's catch handler (p14), so this
// really only applies to destructors.
if (doImplicitRethrow && HaveInsertPoint()) {
CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/false);
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
}
// Fall out through the catch cleanups.
CatchScope.ForceCleanup();
// Branch out of the try.
if (HaveInsertPoint())
Builder.CreateBr(ContBB);
}
EmitBlock(ContBB);
incrementProfileCounter(&S);
}
namespace {
struct CallEndCatchForFinally : EHScopeStack::Cleanup {
llvm::Value *ForEHVar;
llvm::Value *EndCatchFn;
CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
: ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
llvm::BasicBlock *CleanupContBB =
CGF.createBasicBlock("finally.cleanup.cont");
llvm::Value *ShouldEndCatch =
CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
CGF.EmitBlock(EndCatchBB);
CGF.EmitRuntimeCallOrInvoke(EndCatchFn); // catch-all, so might throw
CGF.EmitBlock(CleanupContBB);
}
};
struct PerformFinally : EHScopeStack::Cleanup {
const Stmt *Body;
llvm::Value *ForEHVar;
llvm::Value *EndCatchFn;
llvm::Value *RethrowFn;
llvm::Value *SavedExnVar;
PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
llvm::Value *EndCatchFn,
llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
: Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Enter a cleanup to call the end-catch function if one was provided.
if (EndCatchFn)
CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
ForEHVar, EndCatchFn);
// Save the current cleanup destination in case there are
// cleanups in the finally block.
llvm::Value *SavedCleanupDest =
CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot(),
"cleanup.dest.saved");
// Emit the finally block.
CGF.EmitStmt(Body);
// If the end of the finally is reachable, check whether this was
// for EH. If so, rethrow.
if (CGF.HaveInsertPoint()) {
llvm::BasicBlock *RethrowBB = CGF.createBasicBlock("finally.rethrow");
llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
llvm::Value *ShouldRethrow =
CGF.Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
CGF.EmitBlock(RethrowBB);
if (SavedExnVar) {
CGF.EmitRuntimeCallOrInvoke(RethrowFn,
CGF.Builder.CreateLoad(SavedExnVar));
} else {
CGF.EmitRuntimeCallOrInvoke(RethrowFn);
}
CGF.Builder.CreateUnreachable();
CGF.EmitBlock(ContBB);
// Restore the cleanup destination.
CGF.Builder.CreateStore(SavedCleanupDest,
CGF.getNormalCleanupDestSlot());
}
// Leave the end-catch cleanup. As an optimization, pretend that
// the fallthrough path was inaccessible; we've dynamically proven
// that we're not in the EH case along that path.
if (EndCatchFn) {
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
CGF.PopCleanupBlock();
CGF.Builder.restoreIP(SavedIP);
}
// Now make sure we actually have an insertion point or the
// cleanup gods will hate us.
CGF.EnsureInsertPoint();
}
};
}
/// Enters a finally block for an implementation using zero-cost
/// exceptions. This is mostly general, but hard-codes some
/// language/ABI-specific behavior in the catch-all sections.
void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
const Stmt *body,
llvm::Constant *beginCatchFn,
llvm::Constant *endCatchFn,
llvm::Constant *rethrowFn) {
assert((beginCatchFn != nullptr) == (endCatchFn != nullptr) &&
"begin/end catch functions not paired");
assert(rethrowFn && "rethrow function is required");
BeginCatchFn = beginCatchFn;
// The rethrow function has one of the following two types:
// void (*)()
// void (*)(void*)
// In the latter case we need to pass it the exception object.
// But we can't use the exception slot because the @finally might
// have a landing pad (which would overwrite the exception slot).
llvm::FunctionType *rethrowFnTy =
cast<llvm::FunctionType>(
cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
SavedExnVar = nullptr;
if (rethrowFnTy->getNumParams())
SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
// A finally block is a statement which must be executed on any edge
// out of a given scope. Unlike a cleanup, the finally block may
// contain arbitrary control flow leading out of itself. In
// addition, finally blocks should always be executed, even if there
// are no catch handlers higher on the stack. Therefore, we
// surround the protected scope with a combination of a normal
// cleanup (to catch attempts to break out of the block via normal
// control flow) and an EH catch-all (semantically "outside" any try
// statement to which the finally block might have been attached).
// The finally block itself is generated in the context of a cleanup
// which conditionally leaves the catch-all.
// Jump destination for performing the finally block on an exception
// edge. We'll never actually reach this block, so unreachable is
// fine.
RethrowDest = CGF.getJumpDestInCurrentScope(CGF.getUnreachableBlock());
// Whether the finally block is being executed for EH purposes.
ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
CGF.Builder.CreateStore(CGF.Builder.getFalse(), ForEHVar);
// Enter a normal cleanup which will perform the @finally block.
CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
ForEHVar, endCatchFn,
rethrowFn, SavedExnVar);
// Enter a catch-all scope.
llvm::BasicBlock *catchBB = CGF.createBasicBlock("finally.catchall");
EHCatchScope *catchScope = CGF.EHStack.pushCatch(1);
catchScope->setCatchAllHandler(0, catchBB);
}
void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
// Leave the finally catch-all.
EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
CGF.popCatchScope();
// If there are any references to the catch-all block, emit it.
if (catchBB->use_empty()) {
delete catchBB;
} else {
CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveAndClearIP();
CGF.EmitBlock(catchBB);
llvm::Value *exn = nullptr;
// If there's a begin-catch function, call it.
if (BeginCatchFn) {
exn = CGF.getExceptionFromSlot();
CGF.EmitNounwindRuntimeCall(BeginCatchFn, exn);
}
// If we need to remember the exception pointer to rethrow later, do so.
if (SavedExnVar) {
if (!exn) exn = CGF.getExceptionFromSlot();
CGF.Builder.CreateStore(exn, SavedExnVar);
}
// Tell the cleanups in the finally block that we're do this for EH.
CGF.Builder.CreateStore(CGF.Builder.getTrue(), ForEHVar);
// Thread a jump through the finally cleanup.
CGF.EmitBranchThroughCleanup(RethrowDest);
CGF.Builder.restoreIP(savedIP);
}
// Finally, leave the @finally cleanup.
CGF.PopCleanupBlock();
}
llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
if (TerminateLandingPad)
return TerminateLandingPad;
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
// This will get inserted at the end of the function.
TerminateLandingPad = createBasicBlock("terminate.lpad");
Builder.SetInsertPoint(TerminateLandingPad);
// Tell the backend that this is a landing pad.
const EHPersonality &Personality = EHPersonality::get(*this);
if (!CurFn->hasPersonalityFn())
CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality));
llvm::LandingPadInst *LPadInst = Builder.CreateLandingPad(
llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr), 0);
LPadInst->addClause(getCatchAllValue(*this));
llvm::Value *Exn = 0;
if (getLangOpts().CPlusPlus)
Exn = Builder.CreateExtractValue(LPadInst, 0);
llvm::CallInst *terminateCall =
CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
terminateCall->setDoesNotReturn();
Builder.CreateUnreachable();
// Restore the saved insertion state.
Builder.restoreIP(SavedIP);
return TerminateLandingPad;
}
llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
if (TerminateHandler)
return TerminateHandler;
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
// Set up the terminate handler. This block is inserted at the very
// end of the function by FinishFunction.
TerminateHandler = createBasicBlock("terminate.handler");
Builder.SetInsertPoint(TerminateHandler);
llvm::Value *Exn = 0;
if (getLangOpts().CPlusPlus)
Exn = getExceptionFromSlot();
llvm::CallInst *terminateCall =
CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
terminateCall->setDoesNotReturn();
Builder.CreateUnreachable();
// Restore the saved insertion state.
Builder.restoreIP(SavedIP);
return TerminateHandler;
}
llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
if (EHResumeBlock) return EHResumeBlock;
CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
// We emit a jump to a notional label at the outermost unwind state.
EHResumeBlock = createBasicBlock("eh.resume");
Builder.SetInsertPoint(EHResumeBlock);
const EHPersonality &Personality = EHPersonality::get(*this);
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
const char *RethrowName = Personality.CatchallRethrowFn;
if (RethrowName != nullptr && !isCleanup) {
EmitRuntimeCall(getCatchallRethrowFn(CGM, RethrowName),
getExceptionFromSlot())->setDoesNotReturn();
Builder.CreateUnreachable();
Builder.restoreIP(SavedIP);
return EHResumeBlock;
}
// Recreate the landingpad's return value for the 'resume' instruction.
llvm::Value *Exn = getExceptionFromSlot();
llvm::Value *Sel = getSelectorFromSlot();
llvm::Type *LPadType = llvm::StructType::get(Exn->getType(),
Sel->getType(), nullptr);
llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
Builder.CreateResume(LPadVal);
Builder.restoreIP(SavedIP);
return EHResumeBlock;
}
void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) {
EnterSEHTryStmt(S);
{
JumpDest TryExit = getJumpDestInCurrentScope("__try.__leave");
SEHTryEpilogueStack.push_back(&TryExit);
EmitStmt(S.getTryBlock());
SEHTryEpilogueStack.pop_back();
if (!TryExit.getBlock()->use_empty())
EmitBlock(TryExit.getBlock(), /*IsFinished=*/true);
else
delete TryExit.getBlock();
}
ExitSEHTryStmt(S);
}
namespace {
struct PerformSEHFinally : EHScopeStack::Cleanup {
llvm::Function *OutlinedFinally;
PerformSEHFinally(llvm::Function *OutlinedFinally)
: OutlinedFinally(OutlinedFinally) {}
void Emit(CodeGenFunction &CGF, Flags F) override {
ASTContext &Context = CGF.getContext();
CodeGenModule &CGM = CGF.CGM;
CallArgList Args;
// Compute the two argument values.
QualType ArgTys[2] = {Context.UnsignedCharTy, Context.VoidPtrTy};
llvm::Value *LocalAddrFn = CGM.getIntrinsic(llvm::Intrinsic::localaddress);
llvm::Value *FP = CGF.Builder.CreateCall(LocalAddrFn);
llvm::Value *IsForEH =
llvm::ConstantInt::get(CGF.ConvertType(ArgTys[0]), F.isForEHCleanup());
Args.add(RValue::get(IsForEH), ArgTys[0]);
Args.add(RValue::get(FP), ArgTys[1]);
// Arrange a two-arg function info and type.
FunctionProtoType::ExtProtoInfo EPI;
const auto *FPT = cast<FunctionProtoType>(
Context.getFunctionType(Context.VoidTy, ArgTys, EPI));
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeFreeFunctionCall(Args, FPT,
/*chainCall=*/false);
CGF.EmitCall(FnInfo, OutlinedFinally, ReturnValueSlot(), Args);
}
};
}
namespace {
/// Find all local variable captures in the statement.
struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
CodeGenFunction &ParentCGF;
const VarDecl *ParentThis;
SmallVector<const VarDecl *, 4> Captures;
llvm::Value *SEHCodeSlot = nullptr;
CaptureFinder(CodeGenFunction &ParentCGF, const VarDecl *ParentThis)
: ParentCGF(ParentCGF), ParentThis(ParentThis) {}
// Return true if we need to do any capturing work.
bool foundCaptures() {
return !Captures.empty() || SEHCodeSlot;
}
void Visit(const Stmt *S) {
// See if this is a capture, then recurse.
ConstStmtVisitor<CaptureFinder>::Visit(S);
for (const Stmt *Child : S->children())
if (Child)
Visit(Child);
}
void VisitDeclRefExpr(const DeclRefExpr *E) {
// If this is already a capture, just make sure we capture 'this'.
if (E->refersToEnclosingVariableOrCapture()) {
Captures.push_back(ParentThis);
return;
}
const auto *D = dyn_cast<VarDecl>(E->getDecl());
if (D && D->isLocalVarDeclOrParm() && D->hasLocalStorage())
Captures.push_back(D);
}
void VisitCXXThisExpr(const CXXThisExpr *E) {
Captures.push_back(ParentThis);
}
void VisitCallExpr(const CallExpr *E) {
// We only need to add parent frame allocations for these builtins in x86.
if (ParentCGF.getTarget().getTriple().getArch() != llvm::Triple::x86)
return;
unsigned ID = E->getBuiltinCallee();
switch (ID) {
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
// This is the simple case where we are the outermost finally. All we
// have to do here is make sure we escape this and recover it in the
// outlined handler.
if (!SEHCodeSlot)
SEHCodeSlot = ParentCGF.SEHCodeSlotStack.back();
break;
}
}
};
}
llvm::Value *CodeGenFunction::recoverAddrOfEscapedLocal(
CodeGenFunction &ParentCGF, llvm::Value *ParentVar, llvm::Value *ParentFP) {
llvm::CallInst *RecoverCall = nullptr;
CGBuilderTy Builder(AllocaInsertPt);
if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar)) {
// Mark the variable escaped if nobody else referenced it and compute the
// localescape index.
auto InsertPair = ParentCGF.EscapedLocals.insert(
std::make_pair(ParentAlloca, ParentCGF.EscapedLocals.size()));
int FrameEscapeIdx = InsertPair.first->second;
// call i8* @llvm.localrecover(i8* bitcast(@parentFn), i8* %fp, i32 N)
llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::localrecover);
llvm::Constant *ParentI8Fn =
llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
RecoverCall = Builder.CreateCall(
FrameRecoverFn, {ParentI8Fn, ParentFP,
llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
} else {
// If the parent didn't have an alloca, we're doing some nested outlining.
// Just clone the existing localrecover call, but tweak the FP argument to
// use our FP value. All other arguments are constants.
auto *ParentRecover =
cast<llvm::IntrinsicInst>(ParentVar->stripPointerCasts());
assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover &&
"expected alloca or localrecover in parent LocalDeclMap");
RecoverCall = cast<llvm::CallInst>(ParentRecover->clone());
RecoverCall->setArgOperand(1, ParentFP);
RecoverCall->insertBefore(AllocaInsertPt);
}
// Bitcast the variable, rename it, and insert it in the local decl map.
llvm::Value *ChildVar =
Builder.CreateBitCast(RecoverCall, ParentVar->getType());
ChildVar->setName(ParentVar->getName());
return ChildVar;
}
void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
const Stmt *OutlinedStmt,
bool IsFilter) {
// Find all captures in the Stmt.
CaptureFinder Finder(ParentCGF, ParentCGF.CXXABIThisDecl);
Finder.Visit(OutlinedStmt);
// We can exit early on x86_64 when there are no captures. We just have to
// save the exception code in filters so that __exception_code() works.
if (!Finder.foundCaptures() &&
CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
if (IsFilter)
EmitSEHExceptionCodeSave(ParentCGF, nullptr, nullptr);
return;
}
llvm::Value *EntryEBP = nullptr;
llvm::Value *ParentFP;
if (IsFilter && CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) {
// 32-bit SEH filters need to be careful about FP recovery. The end of the
// EH registration is passed in as the EBP physical register. We can
// recover that with llvm.frameaddress(1), and adjust that to recover the
// parent's true frame pointer.
CGBuilderTy Builder(AllocaInsertPt);
EntryEBP = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::frameaddress), {Builder.getInt32(1)});
llvm::Function *RecoverFPIntrin =
CGM.getIntrinsic(llvm::Intrinsic::x86_seh_recoverfp);
llvm::Constant *ParentI8Fn =
llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryEBP});
} else {
// Otherwise, for x64 and 32-bit finally functions, the parent FP is the
// second parameter.
auto AI = CurFn->arg_begin();
++AI;
ParentFP = AI;
}
// Create llvm.localrecover calls for all captures.
for (const VarDecl *VD : Finder.Captures) {
if (isa<ImplicitParamDecl>(VD)) {
CGM.ErrorUnsupported(VD, "'this' captured by SEH");
CXXThisValue = llvm::UndefValue::get(ConvertTypeForMem(VD->getType()));
continue;
}
if (VD->getType()->isVariablyModifiedType()) {
CGM.ErrorUnsupported(VD, "VLA captured by SEH");
continue;
}
assert((isa<ImplicitParamDecl>(VD) || VD->isLocalVarDeclOrParm()) &&
"captured non-local variable");
// If this decl hasn't been declared yet, it will be declared in the
// OutlinedStmt.
auto I = ParentCGF.LocalDeclMap.find(VD);
if (I == ParentCGF.LocalDeclMap.end())
continue;
llvm::Value *ParentVar = I->second;
LocalDeclMap[VD] =
recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP);
}
if (Finder.SEHCodeSlot) {
SEHCodeSlotStack.push_back(
recoverAddrOfEscapedLocal(ParentCGF, Finder.SEHCodeSlot, ParentFP));
}
if (IsFilter)
EmitSEHExceptionCodeSave(ParentCGF, ParentFP, EntryEBP);
}
/// Arrange a function prototype that can be called by Windows exception
/// handling personalities. On Win64, the prototype looks like:
/// RetTy func(void *EHPtrs, void *ParentFP);
void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
bool IsFilter,
const Stmt *OutlinedStmt) {
SourceLocation StartLoc = OutlinedStmt->getLocStart();
// Get the mangled function name.
SmallString<128> Name;
{
llvm::raw_svector_ostream OS(Name);
const Decl *ParentCodeDecl = ParentCGF.CurCodeDecl;
const NamedDecl *Parent = dyn_cast_or_null<NamedDecl>(ParentCodeDecl);
assert(Parent && "FIXME: handle unnamed decls (lambdas, blocks) with SEH");
MangleContext &Mangler = CGM.getCXXABI().getMangleContext();
if (IsFilter)
Mangler.mangleSEHFilterExpression(Parent, OS);
else
Mangler.mangleSEHFinallyBlock(Parent, OS);
}
FunctionArgList Args;
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86 || !IsFilter) {
// All SEH finally functions take two parameters. Win64 filters take two
// parameters. Win32 filters take no parameters.
if (IsFilter) {
Args.push_back(ImplicitParamDecl::Create(
getContext(), nullptr, StartLoc,
&getContext().Idents.get("exception_pointers"),
getContext().VoidPtrTy));
} else {
Args.push_back(ImplicitParamDecl::Create(
getContext(), nullptr, StartLoc,
&getContext().Idents.get("abnormal_termination"),
getContext().UnsignedCharTy));
}
Args.push_back(ImplicitParamDecl::Create(
getContext(), nullptr, StartLoc,
&getContext().Idents.get("frame_pointer"), getContext().VoidPtrTy));
}
QualType RetTy = IsFilter ? getContext().LongTy : getContext().VoidTy;
llvm::Function *ParentFn = ParentCGF.CurFn;
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionDeclaration(
RetTy, Args, FunctionType::ExtInfo(), /*isVariadic=*/false);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Function *Fn = llvm::Function::Create(
FnTy, llvm::GlobalValue::InternalLinkage, Name.str(), &CGM.getModule());
// The filter is either in the same comdat as the function, or it's internal.
if (llvm::Comdat *C = ParentFn->getComdat()) {
Fn->setComdat(C);
} else if (ParentFn->hasWeakLinkage() || ParentFn->hasLinkOnceLinkage()) {
llvm::Comdat *C = CGM.getModule().getOrInsertComdat(ParentFn->getName());
ParentFn->setComdat(C);
Fn->setComdat(C);
} else {
Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
}
IsOutlinedSEHHelper = true;
StartFunction(GlobalDecl(), RetTy, Fn, FnInfo, Args,
OutlinedStmt->getLocStart(), OutlinedStmt->getLocStart());
CGM.SetLLVMFunctionAttributes(nullptr, FnInfo, CurFn);
EmitCapturedLocals(ParentCGF, OutlinedStmt, IsFilter);
}
/// Create a stub filter function that will ultimately hold the code of the
/// filter expression. The EH preparation passes in LLVM will outline the code
/// from the main function body into this stub.
llvm::Function *
CodeGenFunction::GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
const SEHExceptStmt &Except) {
const Expr *FilterExpr = Except.getFilterExpr();
startOutlinedSEHHelper(ParentCGF, true, FilterExpr);
// Emit the original filter expression, convert to i32, and return.
llvm::Value *R = EmitScalarExpr(FilterExpr);
R = Builder.CreateIntCast(R, ConvertType(getContext().LongTy),
FilterExpr->getType()->isSignedIntegerType());
Builder.CreateStore(R, ReturnValue);
FinishFunction(FilterExpr->getLocEnd());
return CurFn;
}
llvm::Function *
CodeGenFunction::GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
const SEHFinallyStmt &Finally) {
const Stmt *FinallyBlock = Finally.getBlock();
startOutlinedSEHHelper(ParentCGF, false, FinallyBlock);
// Mark finally block calls as nounwind and noinline to make LLVM's job a
// little easier.
// FIXME: Remove these restrictions in the future.
CurFn->addFnAttr(llvm::Attribute::NoUnwind);
CurFn->addFnAttr(llvm::Attribute::NoInline);
// Emit the original filter expression, convert to i32, and return.
EmitStmt(FinallyBlock);
FinishFunction(FinallyBlock->getLocEnd());
return CurFn;
}
void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
llvm::Value *ParentFP,
llvm::Value *EntryEBP) {
// Get the pointer to the EXCEPTION_POINTERS struct. This is returned by the
// __exception_info intrinsic.
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
// On Win64, the info is passed as the first parameter to the filter.
auto AI = CurFn->arg_begin();
SEHInfo = AI;
SEHCodeSlotStack.push_back(
CreateMemTemp(getContext().IntTy, "__exception_code"));
} else {
// On Win32, the EBP on entry to the filter points to the end of an
// exception registration object. It contains 6 32-bit fields, and the info
// pointer is stored in the second field. So, GEP 20 bytes backwards and
// load the pointer.
SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryEBP, -20);
SEHInfo = Builder.CreateBitCast(SEHInfo, Int8PtrTy->getPointerTo());
SEHInfo = Builder.CreateLoad(Int8PtrTy, SEHInfo);
SEHCodeSlotStack.push_back(recoverAddrOfEscapedLocal(
ParentCGF, ParentCGF.SEHCodeSlotStack.back(), ParentFP));
}
// Save the exception code in the exception slot to unify exception access in
// the filter function and the landing pad.
// struct EXCEPTION_POINTERS {
// EXCEPTION_RECORD *ExceptionRecord;
// CONTEXT *ContextRecord;
// };
// int exceptioncode = exception_pointers->ExceptionRecord->ExceptionCode;
llvm::Type *RecordTy = CGM.Int32Ty->getPointerTo();
llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy, nullptr);
llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
Rec = Builder.CreateLoad(Rec);
llvm::Value *Code = Builder.CreateLoad(Rec);
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
Builder.CreateStore(Code, SEHCodeSlotStack.back());
}
llvm::Value *CodeGenFunction::EmitSEHExceptionInfo() {
// Sema should diagnose calling this builtin outside of a filter context, but
// don't crash if we screw up.
if (!SEHInfo)
return llvm::UndefValue::get(Int8PtrTy);
assert(SEHInfo->getType() == Int8PtrTy);
return SEHInfo;
}
llvm::Value *CodeGenFunction::EmitSEHExceptionCode() {
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
return Builder.CreateLoad(Int32Ty, SEHCodeSlotStack.back());
}
llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() {
// Abnormal termination is just the first parameter to the outlined finally
// helper.
auto AI = CurFn->arg_begin();
return Builder.CreateZExt(&*AI, Int32Ty);
}
void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) {
// Outline the finally block.
llvm::Function *FinallyFunc =
HelperCGF.GenerateSEHFinallyFunction(*this, *Finally);
// Push a cleanup for __finally blocks.
EHStack.pushCleanup<PerformSEHFinally>(NormalAndEHCleanup, FinallyFunc);
return;
}
// Otherwise, we must have an __except block.
const SEHExceptStmt *Except = S.getExceptHandler();
assert(Except);
EHCatchScope *CatchScope = EHStack.pushCatch(1);
SEHCodeSlotStack.push_back(
CreateMemTemp(getContext().IntTy, "__exception_code"));
// If the filter is known to evaluate to 1, then we can use the clause
// "catch i8* null". We can't do this on x86 because the filter has to save
// the exception code.
llvm::Constant *C =
CGM.EmitConstantExpr(Except->getFilterExpr(), getContext().IntTy, this);
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86 && C &&
C->isOneValue()) {
CatchScope->setCatchAllHandler(0, createBasicBlock("__except"));
return;
}
// In general, we have to emit an outlined filter function. Use the function
// in place of the RTTI typeinfo global that C++ EH uses.
llvm::Function *FilterFunc =
HelperCGF.GenerateSEHFilterFunction(*this, *Except);
llvm::Constant *OpaqueFunc =
llvm::ConstantExpr::getBitCast(FilterFunc, Int8PtrTy);
CatchScope->setHandler(0, OpaqueFunc, createBasicBlock("__except"));
}
void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) {
// Just pop the cleanup if it's a __finally block.
if (S.getFinallyHandler()) {
PopCleanupBlock();
return;
}
// Otherwise, we must have an __except block.
const SEHExceptStmt *Except = S.getExceptHandler();
assert(Except && "__try must have __finally xor __except");
EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
// Don't emit the __except block if the __try block lacked invokes.
// TODO: Model unwind edges from instructions, either with iload / istore or
// a try body function.
if (!CatchScope.hasEHBranches()) {
CatchScope.clearHandlerBlocks();
EHStack.popCatch();
SEHCodeSlotStack.pop_back();
return;
}
// The fall-through block.
llvm::BasicBlock *ContBB = createBasicBlock("__try.cont");
// We just emitted the body of the __try; jump to the continue block.
if (HaveInsertPoint())
Builder.CreateBr(ContBB);
// Check if our filter function returned true.
emitCatchDispatchBlock(*this, CatchScope);
// Grab the block before we pop the handler.
llvm::BasicBlock *ExceptBB = CatchScope.getHandler(0).Block;
EHStack.popCatch();
EmitBlockAfterUses(ExceptBB);
// On Win64, the exception pointer is the exception code. Copy it to the slot.
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
llvm::Value *Code =
Builder.CreatePtrToInt(getExceptionFromSlot(), IntPtrTy);
Code = Builder.CreateTrunc(Code, Int32Ty);
Builder.CreateStore(Code, SEHCodeSlotStack.back());
}
// Emit the __except body.
EmitStmt(Except->getBlock());
// End the lifetime of the exception code.
SEHCodeSlotStack.pop_back();
if (HaveInsertPoint())
Builder.CreateBr(ContBB);
EmitBlock(ContBB);
}
void CodeGenFunction::EmitSEHLeaveStmt(const SEHLeaveStmt &S) {
// If this code is reachable then emit a stop point (if generating
// debug info). We have to do this ourselves because we are on the
// "simple" statement path.
if (HaveInsertPoint())
EmitStopPoint(&S);
// This must be a __leave from a __finally block, which we warn on and is UB.
// Just emit unreachable.
if (!isSEHTryScope()) {
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
return;
}
EmitBranchThroughCleanup(*SEHTryEpilogueStack.back());
}
#endif // HLSL Change
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGValue.h | //===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes implement wrappers around llvm::Value in order to
// fully represent the range of values for C L- and R- values.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
#define LLVM_CLANG_LIB_CODEGEN_CGVALUE_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Type.h"
namespace llvm {
class Constant;
class MDNode;
}
namespace clang {
namespace CodeGen {
class AggValueSlot;
struct CGBitFieldInfo;
/// RValue - This trivial value class is used to represent the result of an
/// expression that is evaluated. It can be one of three things: either a
/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
/// address of an aggregate value in memory.
class RValue {
enum Flavor { Scalar, Complex, Aggregate };
// Stores first value and flavor.
llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
// Stores second value and volatility.
llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
public:
bool isScalar() const { return V1.getInt() == Scalar; }
bool isComplex() const { return V1.getInt() == Complex; }
bool isAggregate() const { return V1.getInt() == Aggregate; }
bool isVolatileQualified() const { return V2.getInt(); }
/// getScalarVal() - Return the Value* of this scalar value.
llvm::Value *getScalarVal() const {
assert(isScalar() && "Not a scalar!");
return V1.getPointer();
}
/// getComplexVal - Return the real/imag components of this complex value.
///
std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
return std::make_pair(V1.getPointer(), V2.getPointer());
}
/// getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value *getAggregateAddr() const {
assert(isAggregate() && "Not an aggregate!");
return V1.getPointer();
}
static RValue get(llvm::Value *V) {
RValue ER;
ER.V1.setPointer(V);
ER.V1.setInt(Scalar);
ER.V2.setInt(false);
return ER;
}
static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
RValue ER;
ER.V1.setPointer(V1);
ER.V2.setPointer(V2);
ER.V1.setInt(Complex);
ER.V2.setInt(false);
return ER;
}
static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
return getComplex(C.first, C.second);
}
// FIXME: Aggregate rvalues need to retain information about whether they are
// volatile or not. Remove default to find all places that probably get this
// wrong.
static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
RValue ER;
ER.V1.setPointer(V);
ER.V1.setInt(Aggregate);
ER.V2.setInt(Volatile);
return ER;
}
};
/// Does an ARC strong l-value have precise lifetime?
enum ARCPreciseLifetime_t {
ARCImpreciseLifetime, ARCPreciseLifetime
};
/// LValue - This represents an lvalue references. Because C/C++ allow
/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
/// bitrange.
class LValue {
enum {
Simple, // This is a normal l-value, use getAddress().
VectorElt, // This is a vector element l-value (V[i]), use getVector*
BitField, // This is a bitfield l-value, use getBitfield*.
ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
ExtMatrixElt, // This is an extended matrix subset, use getExtMatrixComp - HLSL Change
GlobalReg // This is a register l-value, use getGlobalReg()
} LVType;
llvm::Value *V;
union {
// Index into a vector subscript: V[i]
llvm::Value *VectorIdx;
// ExtVector element subset: V.xyx
llvm::Constant *VectorElts;
// BitField start bit and size
const CGBitFieldInfo *BitFieldInfo;
};
QualType Type;
// 'const' is unused here
Qualifiers Quals;
// The alignment to use when accessing this lvalue. (For vector elements,
// this is the alignment of the whole vector.)
int64_t Alignment;
// objective-c's ivar
bool Ivar:1;
// objective-c's ivar is an array
bool ObjIsArray:1;
// LValue is non-gc'able for any reason, including being a parameter or local
// variable.
bool NonGC: 1;
// Lvalue is a global reference of an objective-c object
bool GlobalObjCRef : 1;
// Lvalue is a thread local reference
bool ThreadLocalRef : 1;
// Lvalue has ARC imprecise lifetime. We store this inverted to try
// to make the default bitfield pattern all-zeroes.
bool ImpreciseLifetime : 1;
Expr *BaseIvarExp;
/// Used by struct-path-aware TBAA.
QualType TBAABaseType;
/// Offset relative to the base type.
uint64_t TBAAOffset;
/// TBAAInfo - TBAA information to attach to dereferences of this LValue.
llvm::MDNode *TBAAInfo;
private:
void Initialize(QualType Type, Qualifiers Quals,
CharUnits Alignment,
llvm::MDNode *TBAAInfo = nullptr) {
this->Type = Type;
this->Quals = Quals;
this->Alignment = Alignment.getQuantity();
assert(this->Alignment == Alignment.getQuantity() &&
"Alignment exceeds allowed max!");
// Initialize Objective-C flags.
this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
this->ImpreciseLifetime = false;
this->ThreadLocalRef = false;
this->BaseIvarExp = nullptr;
// Initialize fields for TBAA.
this->TBAABaseType = Type;
this->TBAAOffset = 0;
this->TBAAInfo = TBAAInfo;
}
public:
bool isSimple() const { return LVType == Simple; }
bool isVectorElt() const { return LVType == VectorElt; }
bool isBitField() const { return LVType == BitField; }
bool isExtVectorElt() const { return LVType == ExtVectorElt; }
bool isGlobalReg() const { return LVType == GlobalReg; }
bool isExtMatrixElt() const { return LVType == ExtMatrixElt; } // HLSL Change
bool isVolatileQualified() const { return Quals.hasVolatile(); }
bool isRestrictQualified() const { return Quals.hasRestrict(); }
unsigned getVRQualifiers() const {
return Quals.getCVRQualifiers() & ~Qualifiers::Const;
}
QualType getType() const { return Type; }
Qualifiers::ObjCLifetime getObjCLifetime() const {
return Quals.getObjCLifetime();
}
bool isObjCIvar() const { return Ivar; }
void setObjCIvar(bool Value) { Ivar = Value; }
bool isObjCArray() const { return ObjIsArray; }
void setObjCArray(bool Value) { ObjIsArray = Value; }
bool isNonGC () const { return NonGC; }
void setNonGC(bool Value) { NonGC = Value; }
bool isGlobalObjCRef() const { return GlobalObjCRef; }
void setGlobalObjCRef(bool Value) { GlobalObjCRef = Value; }
bool isThreadLocalRef() const { return ThreadLocalRef; }
void setThreadLocalRef(bool Value) { ThreadLocalRef = Value;}
ARCPreciseLifetime_t isARCPreciseLifetime() const {
return ARCPreciseLifetime_t(!ImpreciseLifetime);
}
void setARCPreciseLifetime(ARCPreciseLifetime_t value) {
ImpreciseLifetime = (value == ARCImpreciseLifetime);
}
bool isObjCWeak() const {
return Quals.getObjCGCAttr() == Qualifiers::Weak;
}
bool isObjCStrong() const {
return Quals.getObjCGCAttr() == Qualifiers::Strong;
}
bool isVolatile() const {
return Quals.hasVolatile();
}
Expr *getBaseIvarExp() const { return BaseIvarExp; }
void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
QualType getTBAABaseType() const { return TBAABaseType; }
void setTBAABaseType(QualType T) { TBAABaseType = T; }
uint64_t getTBAAOffset() const { return TBAAOffset; }
void setTBAAOffset(uint64_t O) { TBAAOffset = O; }
llvm::MDNode *getTBAAInfo() const { return TBAAInfo; }
void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; }
const Qualifiers &getQuals() const { return Quals; }
Qualifiers &getQuals() { return Quals; }
unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
// simple lvalue
llvm::Value *getAddress() const { assert(isSimple()); return V; }
void setAddress(llvm::Value *address) {
assert(isSimple());
V = address;
}
// vector elt lvalue
llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
// extended vector elements.
llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
llvm::Constant *getExtVectorElts() const {
assert(isExtVectorElt());
return VectorElts;
}
// bitfield lvalue
llvm::Value *getBitFieldAddr() const {
assert(isBitField());
return V;
}
const CGBitFieldInfo &getBitFieldInfo() const {
assert(isBitField());
return *BitFieldInfo;
}
// global register lvalue
llvm::Value *getGlobalReg() const { assert(isGlobalReg()); return V; }
static LValue MakeAddr(llvm::Value *address, QualType type,
CharUnits alignment, ASTContext &Context,
llvm::MDNode *TBAAInfo = nullptr) {
Qualifiers qs = type.getQualifiers();
qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
LValue R;
R.LVType = Simple;
assert(address->getType()->isPointerTy());
R.V = address;
R.Initialize(type, qs, alignment, TBAAInfo);
return R;
}
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
QualType type, CharUnits Alignment) {
LValue R;
R.LVType = VectorElt;
R.V = Vec;
R.VectorIdx = Idx;
R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
QualType type, CharUnits Alignment) {
LValue R;
R.LVType = ExtVectorElt;
R.V = Vec;
R.VectorElts = Elts;
R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
/// \brief Create a new object to represent a bit-field access.
///
/// \param Addr - The base address of the bit-field sequence this
/// bit-field refers to.
/// \param Info - The information describing how to perform the bit-field
/// access.
static LValue MakeBitfield(llvm::Value *Addr,
const CGBitFieldInfo &Info,
QualType type, CharUnits Alignment) {
LValue R;
R.LVType = BitField;
R.V = Addr;
R.BitFieldInfo = &Info;
R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
static LValue MakeGlobalReg(llvm::Value *Reg,
QualType type,
CharUnits Alignment) {
LValue R;
R.LVType = GlobalReg;
R.V = Reg;
R.Initialize(type, type.getQualifiers(), Alignment);
return R;
}
RValue asAggregateRValue() const {
// FIMXE: Alignment
return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
/// An aggregate value slot.
class AggValueSlot {
/// The address.
llvm::Value *Addr;
// Qualifiers
Qualifiers Quals;
unsigned short Alignment;
/// DestructedFlag - This is set to true if some external code is
/// responsible for setting up a destructor for the slot. Otherwise
/// the code which constructs it should push the appropriate cleanup.
bool DestructedFlag : 1;
/// ObjCGCFlag - This is set to true if writing to the memory in the
/// slot might require calling an appropriate Objective-C GC
/// barrier. The exact interaction here is unnecessarily mysterious.
bool ObjCGCFlag : 1;
/// ZeroedFlag - This is set to true if the memory in the slot is
/// known to be zero before the assignment into it. This means that
/// zero fields don't need to be set.
bool ZeroedFlag : 1;
/// AliasedFlag - This is set to true if the slot might be aliased
/// and it's not undefined behavior to access it through such an
/// alias. Note that it's always undefined behavior to access a C++
/// object that's under construction through an alias derived from
/// outside the construction process.
///
/// This flag controls whether calls that produce the aggregate
/// value may be evaluated directly into the slot, or whether they
/// must be evaluated into an unaliased temporary and then memcpy'ed
/// over. Since it's invalid in general to memcpy a non-POD C++
/// object, it's important that this flag never be set when
/// evaluating an expression which constructs such an object.
bool AliasedFlag : 1;
public:
enum IsAliased_t { IsNotAliased, IsAliased };
enum IsDestructed_t { IsNotDestructed, IsDestructed };
enum IsZeroed_t { IsNotZeroed, IsZeroed };
enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
/// ignored - Returns an aggregate value slot indicating that the
/// aggregate value is being ignored.
static AggValueSlot ignored() {
return forAddr(nullptr, CharUnits(), Qualifiers(), IsNotDestructed,
DoesNotNeedGCBarriers, IsNotAliased);
}
/// forAddr - Make a slot for an aggregate value.
///
/// \param quals - The qualifiers that dictate how the slot should
/// be initialied. Only 'volatile' and the Objective-C lifetime
/// qualifiers matter.
///
/// \param isDestructed - true if something else is responsible
/// for calling destructors on this object
/// \param needsGC - true if the slot is potentially located
/// somewhere that ObjC GC calls should be emitted for
static AggValueSlot forAddr(llvm::Value *addr, CharUnits align,
Qualifiers quals,
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
AggValueSlot AV;
AV.Addr = addr;
AV.Alignment = align.getQuantity();
AV.Quals = quals;
AV.DestructedFlag = isDestructed;
AV.ObjCGCFlag = needsGC;
AV.ZeroedFlag = isZeroed;
AV.AliasedFlag = isAliased;
return AV;
}
static AggValueSlot forLValue(const LValue &LV,
IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC,
IsAliased_t isAliased,
IsZeroed_t isZeroed = IsNotZeroed) {
return forAddr(LV.getAddress(), LV.getAlignment(),
LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
}
IsDestructed_t isExternallyDestructed() const {
return IsDestructed_t(DestructedFlag);
}
void setExternallyDestructed(bool destructed = true) {
DestructedFlag = destructed;
}
Qualifiers getQualifiers() const { return Quals; }
bool isVolatile() const {
return Quals.hasVolatile();
}
void setVolatile(bool flag) {
Quals.setVolatile(flag);
}
Qualifiers::ObjCLifetime getObjCLifetime() const {
return Quals.getObjCLifetime();
}
NeedsGCBarriers_t requiresGCollection() const {
return NeedsGCBarriers_t(ObjCGCFlag);
}
llvm::Value *getAddr() const {
return Addr;
}
bool isIgnored() const {
return Addr == nullptr;
}
CharUnits getAlignment() const {
return CharUnits::fromQuantity(Alignment);
}
IsAliased_t isPotentiallyAliased() const {
return IsAliased_t(AliasedFlag);
}
// FIXME: Alignment?
RValue asRValue() const {
return RValue::getAggregate(getAddr(), isVolatile());
}
void setZeroed(bool V = true) { ZeroedFlag = V; }
IsZeroed_t isZeroed() const {
return IsZeroed_t(ZeroedFlag);
}
};
} // end namespace CodeGen
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGVTT.cpp | //===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with C++ code generation of VTTs (vtable tables).
//
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
#include "CGCXXABI.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/VTTBuilder.h"
using namespace clang;
using namespace CodeGen;
static llvm::GlobalVariable *
GetAddrOfVTTVTable(CodeGenVTables &CGVT, CodeGenModule &CGM,
const CXXRecordDecl *MostDerivedClass,
const VTTVTable &VTable,
llvm::GlobalVariable::LinkageTypes Linkage,
llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
if (VTable.getBase() == MostDerivedClass) {
assert(VTable.getBaseOffset().isZero() &&
"Most derived class vtable must have a zero offset!");
// This is a regular vtable.
return CGM.getCXXABI().getAddrOfVTable(MostDerivedClass, CharUnits());
}
return CGVT.GenerateConstructionVTable(MostDerivedClass,
VTable.getBaseSubobject(),
VTable.isVirtual(),
Linkage,
AddressPoints);
}
void
CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int64Ty = CGM.Int64Ty;
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
SmallVector<llvm::GlobalVariable *, 8> VTables;
SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
for (const VTTVTable *i = Builder.getVTTVTables().begin(),
*e = Builder.getVTTVTables().end(); i != e; ++i) {
VTableAddressPoints.push_back(VTableAddressPointsMapTy());
VTables.push_back(GetAddrOfVTTVTable(*this, CGM, RD, *i, Linkage,
VTableAddressPoints.back()));
}
SmallVector<llvm::Constant *, 8> VTTComponents;
for (const VTTComponent *i = Builder.getVTTComponents().begin(),
*e = Builder.getVTTComponents().end(); i != e; ++i) {
const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex];
llvm::GlobalVariable *VTable = VTables[i->VTableIndex];
uint64_t AddressPoint;
if (VTTVT.getBase() == RD) {
// Just get the address point for the regular vtable.
AddressPoint =
getItaniumVTableContext().getVTableLayout(RD).getAddressPoint(
i->VTableBase);
assert(AddressPoint != 0 && "Did not find vtable address point!");
} else {
AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
}
llvm::Value *Idxs[] = {
llvm::ConstantInt::get(Int64Ty, 0),
llvm::ConstantInt::get(Int64Ty, AddressPoint)
};
llvm::Constant *Init = llvm::ConstantExpr::getInBoundsGetElementPtr(
VTable->getValueType(), VTable, Idxs);
Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
VTTComponents.push_back(Init);
}
llvm::Constant *Init = llvm::ConstantArray::get(ArrayType, VTTComponents);
VTT->setInitializer(Init);
// Set the correct linkage.
VTT->setLinkage(Linkage);
if (CGM.supportsCOMDAT() && VTT->isWeakForLinker())
VTT->setComdat(CGM.getModule().getOrInsertComdat(VTT->getName()));
// Set the right visibility.
CGM.setGlobalVisibility(VTT, RD);
}
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
.mangleCXXVTT(RD, Out);
Out.flush();
StringRef Name = OutName.str();
// This will also defer the definition of the VTT.
(void) CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
llvm::ArrayType *ArrayType =
llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
llvm::GlobalVariable *GV =
CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
llvm::GlobalValue::ExternalLinkage);
GV->setUnnamedAddr(true);
return GV;
}
uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
BaseSubobject Base) {
BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
if (I != SubVTTIndicies.end())
return I->second;
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
Builder.getSubVTTIndicies().begin(),
E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
// Insert all indices.
BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
}
I = SubVTTIndicies.find(ClassSubobjectPair);
assert(I != SubVTTIndicies.end() && "Did not find index!");
return I->second;
}
uint64_t
CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
BaseSubobject Base) {
SecondaryVirtualPointerIndicesMapTy::iterator I =
SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
if (I != SecondaryVirtualPointerIndices.end())
return I->second;
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
// Insert all secondary vpointer indices.
for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
Builder.getSecondaryVirtualPointerIndices().begin(),
E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) {
std::pair<const CXXRecordDecl *, BaseSubobject> Pair =
std::make_pair(RD, I->first);
SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second));
}
I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!");
return I->second;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGBuiltin.cpp | //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Builtin calls as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include <sstream>
using namespace clang;
using namespace CodeGen;
using namespace llvm;
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID) {
assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
// Get the name, skip over the __builtin_ prefix (if necessary).
StringRef Name;
GlobalDecl D(FD);
// If the builtin has been declared explicitly with an assembler label,
// use the mangled name. This differs from the plain label on platforms
// that prefix labels.
if (FD->hasAttr<AsmLabelAttr>())
Name = getMangledName(D);
else
Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
/// Emit the conversions required to turn the given value into an
/// integer of the given size.
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::IntegerType *IntType) {
V = CGF.EmitToMemory(V, T);
if (V->getType()->isPointerTy())
return CGF.Builder.CreatePtrToInt(V, IntType);
assert(V->getType() == IntType);
return V;
}
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::Type *ResultType) {
V = CGF.EmitFromMemory(V, T);
if (ResultType->isPointerTy())
return CGF.Builder.CreateIntToPtr(V, ResultType);
assert(V->getType() == ResultType);
return V;
}
/// Utility to insert an atomic instruction based on Instrinsic::ID
/// and the expression node.
static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
llvm::Value *Result =
CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
llvm::SequentiallyConsistent);
return EmitFromInt(CGF, Result, T, ValueType);
}
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) {
return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
}
/// Utility to insert an atomic instruction based Instrinsic::ID and
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E,
Instruction::BinaryOps Op,
bool Invert = false) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
llvm::Value *Result =
CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
llvm::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
if (Invert)
Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
llvm::ConstantInt::get(IntType, -1));
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
/// @brief Utility to insert an atomic cmpxchg instruction.
///
/// @param CGF The current codegen function.
/// @param E Builtin call expression to convert to cmpxchg.
/// arg0 - address to operate on
/// arg1 - value to compare with
/// arg2 - new value
/// @param ReturnBool Specifies whether to return success flag of
/// cmpxchg result or the old value.
///
/// @returns result of cmpxchg, according to ReturnBool
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
Value *Args[3];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
Value *Pair = CGF.Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
llvm::SequentiallyConsistent,
llvm::SequentiallyConsistent);
if (ReturnBool)
// Extract boolean success flag and zext it to int.
return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
CGF.ConvertType(E->getType()));
else
// Extract old value and emit it using the same type as compare value.
return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
ValueType);
}
/// EmitFAbs - Emit a call to @llvm.fabs().
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
Call->setDoesNotAccessMemory();
return Call;
}
/// Emit the computation of the sign bit for a floating point value. Returns
/// the i1 sign bit value.
static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
LLVMContext &C = CGF.CGM.getLLVMContext();
llvm::Type *Ty = V->getType();
int Width = Ty->getPrimitiveSizeInBits();
llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateBitCast(V, IntTy);
if (Ty->isPPC_FP128Ty()) {
// The higher-order double comes first, and so we need to truncate the
// pair to extract the overall sign. The order of the pair is the same
// in both little- and big-Endian modes.
Width >>= 1;
IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateTrunc(V, IntTy);
}
Value *Zero = llvm::Constant::getNullValue(IntTy);
return CGF.Builder.CreateICmpSLT(V, Zero);
}
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
const CallExpr *E, llvm::Value *calleeValue) {
return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
ReturnValueSlot(), Fn);
}
/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
/// depending on IntrinsicID.
///
/// \arg CGF The current codegen function.
/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
/// \arg X The first argument to the llvm.*.with.overflow.*.
/// \arg Y The second argument to the llvm.*.with.overflow.*.
/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
/// \returns The result (i.e. sum/product) returned by the intrinsic.
static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
const llvm::Intrinsic::ID IntrinsicID,
llvm::Value *X, llvm::Value *Y,
llvm::Value *&Carry) {
// Make sure we have integers of the same width.
assert(X->getType() == Y->getType() &&
"Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)");
llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
return CGF.Builder.CreateExtractValue(Tmp, 0);
}
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
Result.Val.getInt()));
if (Result.Val.isFloat())
return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
Result.Val.getFloat()));
}
switch (BuiltinID) {
default: break; // Handle intrinsics and libm functions below.
case Builtin::BI__builtin___CFStringMakeConstantString:
case Builtin::BI__builtin___NSStringMakeConstantString:
return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr));
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
case Builtin::BI__va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
? EmitScalarExpr(E->getArg(0))
: EmitVAListRef(E->getArg(0));
llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getName().data());
Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
Intrinsic::vaend : Intrinsic::vastart;
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
}
case Builtin::BI__builtin_va_copy: {
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
{DstPtr, SrcPtr}));
}
case Builtin::BI__builtin_abs:
case Builtin::BI__builtin_labs:
case Builtin::BI__builtin_llabs: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
Value *CmpResult =
Builder.CreateICmpSGE(ArgValue,
llvm::Constant::getNullValue(ArgValue->getType()),
"abscond");
Value *Result =
Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
return RValue::get(Result);
}
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsl: {
Value *Arg1 = EmitScalarExpr(E->getArg(0));
Value *Result = EmitFAbs(*this, Arg1);
return RValue::get(Result);
}
case Builtin::BI__builtin_fmod:
case Builtin::BI__builtin_fmodf:
case Builtin::BI__builtin_fmodl: {
Value *Arg1 = EmitScalarExpr(E->getArg(0));
Value *Arg2 = EmitScalarExpr(E->getArg(1));
Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod");
return RValue::get(Result);
}
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
case Builtin::BI__builtin_conjl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
Value *Real = ComplexVal.first;
Value *Imag = ComplexVal.second;
Value *Zero =
Imag->getType()->isFPOrFPVectorTy()
? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
: llvm::Constant::getNullValue(Imag->getType());
Imag = Builder.CreateFSub(Zero, Imag, "sub");
return RValue::getComplex(std::make_pair(Real, Imag));
}
case Builtin::BI__builtin_creal:
case Builtin::BI__builtin_crealf:
case Builtin::BI__builtin_creall:
case Builtin::BIcreal:
case Builtin::BIcrealf:
case Builtin::BIcreall: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.first);
}
case Builtin::BI__builtin_cimag:
case Builtin::BI__builtin_cimagf:
case Builtin::BI__builtin_cimagl:
case Builtin::BIcimag:
case Builtin::BIcimagf:
case Builtin::BIcimagl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.second);
}
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
// ffs(x) -> x ? cttz(x) + 1 : 0
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp =
Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
llvm::ConstantInt::get(ArgType, 1));
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll: {
// parity(x) -> ctpop(x) & 1
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue);
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_expect: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
// Don't generate llvm.expect on -O0 as the backend won't use it for
// anything.
// Note, we still IRGen ExpectedValue because it could have side-effects.
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
return RValue::get(ArgValue);
Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
Value *Result =
Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
return RValue::get(Result);
}
case Builtin::BI__builtin_assume_aligned: {
Value *PtrValue = EmitScalarExpr(E->getArg(0));
Value *OffsetValue =
(E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
return RValue::get(PtrValue);
}
case Builtin::BI__assume:
case Builtin::BI__builtin_assume: {
if (E->getArg(0)->HasSideEffects(getContext()))
return RValue::get(nullptr);
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
}
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
return RValue::get(Builder.CreateCall(F, ArgValue));
}
case Builtin::BI__builtin_object_size: {
// We rely on constant folding to deal with expressions with side effects.
assert(!E->getArg(0)->HasSideEffects(getContext()) &&
"should have been constant folded");
// We pass this builtin onto the optimizer so that it can
// figure out the object size in more complex cases.
llvm::Type *ResType = ConvertType(E->getType());
// LLVM only supports 0 and 2, make sure that we pass along that
// as a boolean.
Value *Ty = EmitScalarExpr(E->getArg(1));
ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
assert(CI);
uint64_t val = CI->getZExtValue();
CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
// FIXME: Get right address space.
llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
return RValue::get(
Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0)), CI}));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
llvm::ConstantInt::get(Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
llvm::ConstantInt::get(Int32Ty, 3);
Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
}
case Builtin::BI__builtin_readcyclecounter: {
Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin___clear_cache: {
Value *Begin = EmitScalarExpr(E->getArg(0));
Value *End = EmitScalarExpr(E->getArg(1));
Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
return RValue::get(Builder.CreateCall(F, {Begin, End}));
}
case Builtin::BI__builtin_trap:
return RValue::get(EmitTrapCall(Intrinsic::trap));
case Builtin::BI__debugbreak:
return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
case Builtin::BI__builtin_unreachable: {
if (SanOpts.has(SanitizerKind::Unreachable)) {
SanitizerScope SanScope(this);
EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
SanitizerKind::Unreachable),
"builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
None);
} else
Builder.CreateUnreachable();
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("unreachable.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__builtin_powi:
case Builtin::BI__builtin_powif:
case Builtin::BI__builtin_powil: {
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
}
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
case Builtin::BI__builtin_islessequal:
case Builtin::BI__builtin_islessgreater:
case Builtin::BI__builtin_isunordered: {
// Ordered comparisons: we know the arguments to these are matching scalar
// floating point values.
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
switch (BuiltinID) {
default: llvm_unreachable("Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isgreaterequal:
LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isless:
LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_islessequal:
LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_islessgreater:
LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isunordered:
LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
break;
}
// ZExt bool to int type.
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isnan: {
Value *V = EmitScalarExpr(E->getArg(0));
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isinf: {
// isinf(x) --> fabs(x) == infinity
Value *V = EmitScalarExpr(E->getArg(0));
V = EmitFAbs(*this, V);
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isinf_sign: {
// isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
Value *Arg = EmitScalarExpr(E->getArg(0));
Value *AbsArg = EmitFAbs(*this, Arg);
Value *IsInf = Builder.CreateFCmpOEQ(
AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
Value *IsNeg = EmitSignBit(*this, Arg);
llvm::Type *IntTy = ConvertType(E->getType());
Value *Zero = Constant::getNullValue(IntTy);
Value *One = ConstantInt::get(IntTy, 1);
Value *NegativeOne = ConstantInt::get(IntTy, -1);
Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
return RValue::get(Result);
}
case Builtin::BI__builtin_isnormal: {
// isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
Value *Abs = EmitFAbs(*this, V);
Value *IsLessThanInf =
Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
APFloat Smallest = APFloat::getSmallestNormalized(
getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
Value *IsNormal =
Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
"isnormal");
V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
V = Builder.CreateAnd(V, IsNormal, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isfinite: {
// isfinite(x) --> x == x && fabs(x) != infinity;
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
Value *Abs = EmitFAbs(*this, V);
Value *IsNotInf =
Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
V = Builder.CreateAnd(Eq, IsNotInf, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_fpclassify: {
Value *V = EmitScalarExpr(E->getArg(5));
llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
// Create Result
BasicBlock *Begin = Builder.GetInsertBlock();
BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
Builder.SetInsertPoint(End);
PHINode *Result =
Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
"fpclassify_result");
// if (V==0) return FP_ZERO
Builder.SetInsertPoint(Begin);
Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
"iszero");
Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
Builder.CreateCondBr(IsZero, End, NotZero);
Result->addIncoming(ZeroLiteral, Begin);
// if (V != V) return FP_NAN
Builder.SetInsertPoint(NotZero);
Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
Value *NanLiteral = EmitScalarExpr(E->getArg(0));
BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
Builder.CreateCondBr(IsNan, End, NotNan);
Result->addIncoming(NanLiteral, NotZero);
// if (fabs(V) == infinity) return FP_INFINITY
Builder.SetInsertPoint(NotNan);
Value *VAbs = EmitFAbs(*this, V);
Value *IsInf =
Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
"isinf");
Value *InfLiteral = EmitScalarExpr(E->getArg(1));
BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
Builder.CreateCondBr(IsInf, End, NotInf);
Result->addIncoming(InfLiteral, NotNan);
// if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
Builder.SetInsertPoint(NotInf);
APFloat Smallest = APFloat::getSmallestNormalized(
getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
Value *IsNormal =
Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
"isnormal");
Value *NormalResult =
Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)));
Builder.CreateBr(End);
Result->addIncoming(NormalResult, NotInf);
// return Result
Builder.SetInsertPoint(End);
return RValue::get(Result);
}
case Builtin::BIalloca:
case Builtin::BI_alloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
Dest.second, false);
return RValue::get(Dest.first);
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy: {
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
std::pair<llvm::Value*, unsigned> Src =
EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
unsigned Align = std::min(Dest.second, Src.second);
EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
return RValue::get(Dest.first);
}
case Builtin::BI__builtin___memcpy_chk: {
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
std::pair<llvm::Value*, unsigned> Src =
EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
unsigned Align = std::min(Dest.second, Src.second);
Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
return RValue::get(Dest.first);
}
case Builtin::BI__builtin_objc_memmove_collectable: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *SrcAddr = EmitScalarExpr(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
Address, SrcAddr, SizeVal);
return RValue::get(Address);
}
case Builtin::BI__builtin___memmove_chk: {
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
std::pair<llvm::Value*, unsigned> Src =
EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
unsigned Align = std::min(Dest.second, Src.second);
Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
return RValue::get(Dest.first);
}
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
std::pair<llvm::Value*, unsigned> Src =
EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
unsigned Align = std::min(Dest.second, Src.second);
EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
return RValue::get(Dest.first);
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
return RValue::get(Dest.first);
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
return RValue::get(Dest.first);
}
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
//
// Why on earth is this in the frontend? Is there any reason at
// all that the backend can't reasonably determine this while
// lowering llvm.eh.dwarf.cfa()?
//
// TODO: If there's a satisfactory reason, add a target hook for
// this instead of hard-coding 0, which is correct for most targets.
int32_t Offset = 0;
Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth =
CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth =
CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_extract_return_addr: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
return RValue::get(Result);
}
case Builtin::BI__builtin_frob_return_addr: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
return RValue::get(Result);
}
case Builtin::BI__builtin_dwarf_sp_column: {
llvm::IntegerType *Ty
= cast<llvm::IntegerType>(ConvertType(E->getType()));
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
if (Column == -1) {
CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
return RValue::get(llvm::UndefValue::get(Ty));
}
return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
}
case Builtin::BI__builtin_init_dwarf_reg_size_table: {
Value *Address = EmitScalarExpr(E->getArg(0));
if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
}
case Builtin::BI__builtin_eh_return: {
Value *Int = EmitScalarExpr(E->getArg(0));
Value *Ptr = EmitScalarExpr(E->getArg(1));
llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
? Intrinsic::eh_return_i32
: Intrinsic::eh_return_i64);
Builder.CreateCall(F, {Int, Ptr});
Builder.CreateUnreachable();
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("builtin_eh_return.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__builtin_unwind_init: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_extend_pointer: {
// Extends a pointer to the size of an _Unwind_Word, which is
// uint64_t on all platforms. Generally this gets poked into a
// register and eventually used as an address, so if the
// addressing registers are wider than pointers and the platform
// doesn't implicitly ignore high-order bits when doing
// addressing, we need to make sure we zext / sext based on
// the platform's expectations.
//
// See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
// Cast the pointer to intptr_t.
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
// If that's 64 bits, we're done.
if (IntPtrTy->getBitWidth() == 64)
return RValue::get(Result);
// Otherwise, ask the codegen data what to do.
if (getTargetHooks().extendPointerWithSExt())
return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
else
return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
}
case Builtin::BI__builtin_setjmp: {
// Buffer is a void**.
Value *Buf = EmitScalarExpr(E->getArg(0));
// Store the frame pointer to the setjmp buffer.
Value *FrameAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
ConstantInt::get(Int32Ty, 0));
Builder.CreateStore(FrameAddr, Buf);
// Store the stack pointer to the setjmp buffer.
Value *StackAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
Value *StackSaveSlot =
Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
return RValue::get(Builder.CreateCall(F, Buf));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
// Call LLVM's EH longjmp, which is lightweight.
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
// longjmp doesn't return; mark this as unreachable.
Builder.CreateUnreachable();
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("longjmp.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
case Builtin::BI__sync_fetch_and_and:
case Builtin::BI__sync_fetch_and_xor:
case Builtin::BI__sync_fetch_and_nand:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_sub_and_fetch:
case Builtin::BI__sync_and_and_fetch:
case Builtin::BI__sync_or_and_fetch:
case Builtin::BI__sync_xor_and_fetch:
case Builtin::BI__sync_nand_and_fetch:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_release:
case Builtin::BI__sync_swap:
llvm_unreachable("Shouldn't make it through sema");
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
case Builtin::BI__sync_fetch_and_add_4:
case Builtin::BI__sync_fetch_and_add_8:
case Builtin::BI__sync_fetch_and_add_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
case Builtin::BI__sync_fetch_and_sub_1:
case Builtin::BI__sync_fetch_and_sub_2:
case Builtin::BI__sync_fetch_and_sub_4:
case Builtin::BI__sync_fetch_and_sub_8:
case Builtin::BI__sync_fetch_and_sub_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
case Builtin::BI__sync_fetch_and_or_1:
case Builtin::BI__sync_fetch_and_or_2:
case Builtin::BI__sync_fetch_and_or_4:
case Builtin::BI__sync_fetch_and_or_8:
case Builtin::BI__sync_fetch_and_or_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
case Builtin::BI__sync_fetch_and_and_1:
case Builtin::BI__sync_fetch_and_and_2:
case Builtin::BI__sync_fetch_and_and_4:
case Builtin::BI__sync_fetch_and_and_8:
case Builtin::BI__sync_fetch_and_and_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
case Builtin::BI__sync_fetch_and_xor_1:
case Builtin::BI__sync_fetch_and_xor_2:
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
case Builtin::BI__sync_fetch_and_nand_1:
case Builtin::BI__sync_fetch_and_nand_2:
case Builtin::BI__sync_fetch_and_nand_4:
case Builtin::BI__sync_fetch_and_nand_8:
case Builtin::BI__sync_fetch_and_nand_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
case Builtin::BI__sync_fetch_and_max:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
case Builtin::BI__sync_fetch_and_umin:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
case Builtin::BI__sync_fetch_and_umax:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
llvm::Instruction::Add);
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
llvm::Instruction::Sub);
case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
llvm::Instruction::And);
case Builtin::BI__sync_or_and_fetch_1:
case Builtin::BI__sync_or_and_fetch_2:
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
llvm::Instruction::Or);
case Builtin::BI__sync_xor_and_fetch_1:
case Builtin::BI__sync_xor_and_fetch_2:
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
llvm::Instruction::Xor);
case Builtin::BI__sync_nand_and_fetch_1:
case Builtin::BI__sync_nand_and_fetch_2:
case Builtin::BI__sync_nand_and_fetch_4:
case Builtin::BI__sync_nand_and_fetch_8:
case Builtin::BI__sync_nand_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
llvm::Instruction::And, true);
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16:
return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
case Builtin::BI__sync_bool_compare_and_swap_1:
case Builtin::BI__sync_bool_compare_and_swap_2:
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16:
return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
case Builtin::BI__sync_swap_1:
case Builtin::BI__sync_swap_2:
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_release_1:
case Builtin::BI__sync_lock_release_2:
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
Store->setAlignment(StoreSize.getQuantity());
Store->setAtomic(llvm::Release);
return RValue::get(nullptr);
}
case Builtin::BI__sync_synchronize: {
// We assume this is supposed to correspond to a C++0x-style
// sequentially-consistent fence (i.e. this is only usable for
// synchonization, not device I/O or anything like that). This intrinsic
// is really badly designed in the sense that in theory, there isn't
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
Builder.CreateFence(llvm::SequentiallyConsistent);
return RValue::get(nullptr);
}
case Builtin::BI__c11_atomic_is_lock_free:
case Builtin::BI__atomic_is_lock_free: {
// Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
// __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
// _Atomic(T) is always properly-aligned.
const char *LibCallName = "__atomic_is_lock_free";
CallArgList Args;
Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
getContext().getSizeType());
if (BuiltinID == Builtin::BI__atomic_is_lock_free)
Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
getContext().VoidPtrTy);
else
Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
getContext().VoidPtrTy);
const CGFunctionInfo &FuncInfo =
CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
FunctionType::ExtInfo(),
RequiredArgs::All);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
}
case Builtin::BI__atomic_test_and_set: {
// Look at the argument type to determine whether this is a volatile
// operation. The parameter type is always volatile.
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
AtomicRMWInst *Result = nullptr;
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::Monotonic);
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::Acquire);
break;
case 3: // memory_order_release
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::Release);
break;
case 4: // memory_order_acq_rel
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::AcquireRelease);
break;
case 5: // memory_order_seq_cst
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::SequentiallyConsistent);
break;
}
Result->setVolatile(Volatile);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[5] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("acquire", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("acqrel", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[5] = {
llvm::Monotonic, llvm::Acquire, llvm::Release,
llvm::AcquireRelease, llvm::SequentiallyConsistent
};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
Builder.SetInsertPoint(ContBB);
PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
for (unsigned i = 0; i < 5; ++i) {
Builder.SetInsertPoint(BBs[i]);
AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal, Orders[i]);
RMW->setVolatile(Volatile);
Result->addIncoming(RMW, BBs[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(1), BBs[1]);
SI->addCase(Builder.getInt32(2), BBs[1]);
SI->addCase(Builder.getInt32(3), BBs[2]);
SI->addCase(Builder.getInt32(4), BBs[3]);
SI->addCase(Builder.getInt32(5), BBs[4]);
Builder.SetInsertPoint(ContBB);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
case Builtin::BI__atomic_clear: {
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
Store->setAlignment(1);
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Store->setOrdering(llvm::Monotonic);
break;
case 3: // memory_order_release
Store->setOrdering(llvm::Release);
break;
case 5: // memory_order_seq_cst
Store->setOrdering(llvm::SequentiallyConsistent);
break;
}
return RValue::get(nullptr);
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[3] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[3] = {
llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
for (unsigned i = 0; i < 3; ++i) {
Builder.SetInsertPoint(BBs[i]);
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
Store->setAlignment(1);
Store->setOrdering(Orders[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(3), BBs[1]);
SI->addCase(Builder.getInt32(5), BBs[2]);
Builder.SetInsertPoint(ContBB);
return RValue::get(nullptr);
}
case Builtin::BI__atomic_thread_fence:
case Builtin::BI__atomic_signal_fence:
case Builtin::BI__c11_atomic_thread_fence:
case Builtin::BI__c11_atomic_signal_fence: {
llvm::SynchronizationScope Scope;
if (BuiltinID == Builtin::BI__atomic_signal_fence ||
BuiltinID == Builtin::BI__c11_atomic_signal_fence)
Scope = llvm::SingleThread;
else
Scope = llvm::CrossThread;
Value *Order = EmitScalarExpr(E->getArg(0));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
Builder.CreateFence(llvm::Acquire, Scope);
break;
case 3: // memory_order_release
Builder.CreateFence(llvm::Release, Scope);
break;
case 4: // memory_order_acq_rel
Builder.CreateFence(llvm::AcquireRelease, Scope);
break;
case 5: // memory_order_seq_cst
Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
break;
}
return RValue::get(nullptr);
}
llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
AcquireBB = createBasicBlock("acquire", CurFn);
ReleaseBB = createBasicBlock("release", CurFn);
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
Builder.SetInsertPoint(AcquireBB);
Builder.CreateFence(llvm::Acquire, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(1), AcquireBB);
SI->addCase(Builder.getInt32(2), AcquireBB);
Builder.SetInsertPoint(ReleaseBB);
Builder.CreateFence(llvm::Release, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(3), ReleaseBB);
Builder.SetInsertPoint(AcqRelBB);
Builder.CreateFence(llvm::AcquireRelease, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(4), AcqRelBB);
Builder.SetInsertPoint(SeqCstBB);
Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(5), SeqCstBB);
Builder.SetInsertPoint(ContBB);
return RValue::get(nullptr);
}
// Library functions with special handling.
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
case Builtin::BIsqrtl: {
// Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
// in finite- or unsafe-math mode (the intrinsic has different semantics
// for handling negative numbers compared to the library function, so
// -fmath-errno=0 is not enough).
if (!FD->hasAttr<ConstAttr>())
break;
if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
CGM.getCodeGenOpts().NoNaNsFPMath))
break;
Value *Arg0 = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = Arg0->getType();
Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
return RValue::get(Builder.CreateCall(F, Arg0));
}
case Builtin::BI__builtin_pow:
case Builtin::BI__builtin_powf:
case Builtin::BI__builtin_powl:
case Builtin::BIpow:
case Builtin::BIpowf:
case Builtin::BIpowl: {
// Transform a call to pow* into a @llvm.pow.* intrinsic call.
if (!FD->hasAttr<ConstAttr>())
break;
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
}
case Builtin::BIfma:
case Builtin::BIfmaf:
case Builtin::BIfmal:
case Builtin::BI__builtin_fma:
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmal: {
// Rewrite fma to intrinsic.
Value *FirstArg = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = FirstArg->getType();
Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
return RValue::get(
Builder.CreateCall(F, {FirstArg, EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2))}));
}
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl: {
return RValue::get(
Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
ConvertType(E->getType())));
}
case Builtin::BI__builtin_annotation: {
llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
AnnVal->getType());
// Get the annotation string, go through casts. Sema requires this to be a
// non-wide string literal, potentially casted, so the cast<> is safe.
const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
}
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
case Builtin::BI__builtin_subcll: {
// We translate all of these builtins from expressions of the form:
// int x = ..., y = ..., carryin = ..., carryout, result;
// result = __builtin_addc(x, y, carryin, &carryout);
//
// to LLVM IR of the form:
//
// %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
// %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
// %carry1 = extractvalue {i32, i1} %tmp1, 1
// %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
// i32 %carryin)
// %result = extractvalue {i32, i1} %tmp2, 0
// %carry2 = extractvalue {i32, i1} %tmp2, 1
// %tmp3 = or i1 %carry1, %carry2
// %tmp4 = zext i1 %tmp3 to i32
// store i32 %tmp4, i32* %carryout
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
std::pair<llvm::Value*, unsigned> CarryOutPtr =
EmitPointerWithAlignment(E->getArg(3));
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown multiprecision builtin id.");
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
case Builtin::BI__builtin_subcll:
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
break;
}
// Construct our resulting LLVM IR expression.
llvm::Value *Carry1;
llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
X, Y, Carry1);
llvm::Value *Carry2;
llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
Sum1, Carryin, Carry2);
llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
X->getType());
llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
CarryOutPtr.first);
CarryOutStore->setAlignment(CarryOutPtr.second);
return RValue::get(Sum2);
}
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow: {
// We translate all of these builtins directly to the relevant llvm IR node.
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
std::pair<llvm::Value *, unsigned> SumOutPtr =
EmitPointerWithAlignment(E->getArg(2));
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown security overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
IntrinsicId = llvm::Intrinsic::umul_with_overflow;
break;
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
break;
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
break;
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow:
IntrinsicId = llvm::Intrinsic::smul_with_overflow;
break;
}
llvm::Value *Carry;
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
SumOutStore->setAlignment(SumOutPtr.second);
return RValue::get(Carry);
}
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getAddress());
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
E->getArg(0), false);
case Builtin::BI__builtin_operator_delete:
return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
E->getArg(0), true);
case Builtin::BI__noop:
// __noop always evaluates to an integer literal zero.
return RValue::get(ConstantInt::get(IntTy, 0));
case Builtin::BI__builtin_call_with_static_chain: {
const CallExpr *Call = cast<CallExpr>(E->getArg(0));
const Expr *Chain = E->getArg(1);
return EmitCall(Call->getCallee()->getType(),
EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
Call->getCalleeDecl(), EmitScalarExpr(Chain));
}
case Builtin::BI_InterlockedExchange:
case Builtin::BI_InterlockedExchangePointer:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI_InterlockedCompareExchangePointer: {
llvm::Type *RTy;
llvm::IntegerType *IntType =
IntegerType::get(getLLVMContext(),
getContext().getTypeSize(E->getType()));
llvm::Type *IntPtrType = IntType->getPointerTo();
llvm::Value *Destination =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
RTy = Exchange->getType();
Exchange = Builder.CreatePtrToInt(Exchange, IntType);
llvm::Value *Comparand =
Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
SequentiallyConsistent,
SequentiallyConsistent);
Result->setVolatile(true);
return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
0),
RTy));
}
case Builtin::BI_InterlockedCompareExchange: {
AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(1)),
SequentiallyConsistent,
SequentiallyConsistent);
CXI->setVolatile(true);
return RValue::get(Builder.CreateExtractValue(CXI, 0));
}
case Builtin::BI_InterlockedIncrement: {
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Add,
EmitScalarExpr(E->getArg(0)),
ConstantInt::get(Int32Ty, 1),
llvm::SequentiallyConsistent);
RMWI->setVolatile(true);
return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
}
case Builtin::BI_InterlockedDecrement: {
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Sub,
EmitScalarExpr(E->getArg(0)),
ConstantInt::get(Int32Ty, 1),
llvm::SequentiallyConsistent);
RMWI->setVolatile(true);
return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
}
case Builtin::BI_InterlockedExchangeAdd: {
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Add,
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1)),
llvm::SequentiallyConsistent);
RMWI->setVolatile(true);
return RValue::get(RMWI);
}
case Builtin::BI__readfsdword: {
Value *IntToPtr =
Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
llvm::PointerType::get(CGM.Int32Ty, 257));
LoadInst *Load =
Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true);
return RValue::get(Load);
}
#if 0 // HLSL Change Start - no support for exception handling
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
return RValue::get(EmitSEHExceptionCode());
case Builtin::BI__exception_info:
case Builtin::BI_exception_info:
return RValue::get(EmitSEHExceptionInfo());
case Builtin::BI__abnormal_termination:
case Builtin::BI_abnormal_termination:
return RValue::get(EmitSEHAbnormalTermination());
case Builtin::BI_setjmpex: {
if (getTarget().getTriple().isOSMSVCRT()) {
llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
llvm::AttributeSet ReturnsTwiceAttr =
AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
llvm::Attribute::ReturnsTwice);
llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
"_setjmpex", ReturnsTwiceAttr);
llvm::Value *Buf = Builder.CreateBitOrPointerCast(
EmitScalarExpr(E->getArg(0)), Int8PtrTy);
llvm::Value *FrameAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
ConstantInt::get(Int32Ty, 0));
llvm::Value *Args[] = {Buf, FrameAddr};
llvm::CallSite CS = EmitRuntimeCallOrInvoke(SetJmpEx, Args);
CS.setAttributes(ReturnsTwiceAttr);
return RValue::get(CS.getInstruction());
}
break;
}
case Builtin::BI_setjmp: {
if (getTarget().getTriple().isOSMSVCRT()) {
llvm::AttributeSet ReturnsTwiceAttr =
AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
llvm::Attribute::ReturnsTwice);
llvm::Value *Buf = Builder.CreateBitOrPointerCast(
EmitScalarExpr(E->getArg(0)), Int8PtrTy);
llvm::CallSite CS;
if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy};
llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true),
"_setjmp3", ReturnsTwiceAttr);
llvm::Value *Count = ConstantInt::get(IntTy, 0);
llvm::Value *Args[] = {Buf, Count};
CS = EmitRuntimeCallOrInvoke(SetJmp3, Args);
} else {
llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
llvm::Constant *SetJmp = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
"_setjmp", ReturnsTwiceAttr);
llvm::Value *FrameAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
ConstantInt::get(Int32Ty, 0));
llvm::Value *Args[] = {Buf, FrameAddr};
CS = EmitRuntimeCallOrInvoke(SetJmp, Args);
}
CS.setAttributes(ReturnsTwiceAttr);
return RValue::get(CS.getInstruction());
}
break;
}
case Builtin::BI__GetExceptionInfo: {
if (llvm::GlobalVariable *GV =
CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
break;
}
#endif // HLSL Change Ends - no support for exception handling
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
// the call using the normal call path, but using the unmangled
// version of the function name.
if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E,
CGM.getBuiltinLibFunction(FD, BuiltinID));
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
if (const char *Prefix =
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
// NOTE we dont need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
if (IntrinsicID == Intrinsic::not_intrinsic)
IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
}
if (IntrinsicID != Intrinsic::not_intrinsic) {
SmallVector<Value*, 16> Args;
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
Function *F = CGM.getIntrinsic(IntrinsicID);
llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue;
// If this is a normal argument, just emit it as a scalar.
if ((ICEArguments & (1 << i)) == 0) {
ArgValue = EmitScalarExpr(E->getArg(i));
} else {
// If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
assert(IsConst && "Constant arg isn't actually constant?");
(void)IsConst;
ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
}
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
llvm::Type *PTy = FTy->getParamType(i);
if (PTy != ArgValue->getType()) {
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
ArgValue = Builder.CreateBitCast(ArgValue, PTy);
}
Args.push_back(ArgValue);
}
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
llvm::Type *RetTy = VoidTy;
if (!BuiltinRetType->isVoidType())
RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
V = Builder.CreateBitCast(V, RetTy);
}
return RValue::get(V);
}
// See if we have a target specific builtin that needs to be lowered.
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
return RValue::get(V);
ErrorUnsupported(E, "builtin function");
// Unknown builtin, for now just dump it out and return undef.
return GetUndefRValue(E->getType());
}
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
#if 1 // HLSL Change Starts
return nullptr;
#else
switch (getTarget().getTriple().getArch()) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
return EmitARMBuiltinExpr(BuiltinID, E);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
return EmitAArch64BuiltinExpr(BuiltinID, E);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return EmitX86BuiltinExpr(BuiltinID, E);
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
return EmitPPCBuiltinExpr(BuiltinID, E);
case llvm::Triple::r600:
case llvm::Triple::amdgcn:
return EmitAMDGPUBuiltinExpr(BuiltinID, E);
case llvm::Triple::systemz:
return EmitSystemZBuiltinExpr(BuiltinID, E);
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
return EmitNVPTXBuiltinExpr(BuiltinID, E);
default:
return nullptr;
}
#endif // HLSL Change Ends
}
#if 0 // HLSL Change Starts
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
bool V1Ty=false) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
case NeonTypeFlags::Float16:
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
case NeonTypeFlags::Poly64:
return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
case NeonTypeFlags::Poly128:
// FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
// There is a lot of i128 and f128 API missing.
// so we use v16i8 to represent poly128 and get pattern matched.
return llvm::VectorType::get(CGF->Int8Ty, 16);
case NeonTypeFlags::Float32:
return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Float64:
return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
}
llvm_unreachable("Unknown vector element type!");
}
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
Value* SV = llvm::ConstantVector::getSplat(nElts, C);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
const char *name,
unsigned shift, bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j)
if (shift > 0 && shift == j)
Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
int SV = cast<ConstantInt>(V)->getSExtValue();
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
}
// \brief Right-shift a vector by a constant.
Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
llvm::Type *Ty, bool usgn,
const char *name) {
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
int EltSize = VTy->getScalarSizeInBits();
Vec = Builder.CreateBitCast(Vec, Ty);
// lshr/ashr are undefined when the shift amount is equal to the vector
// element size.
if (ShiftAmt == EltSize) {
if (usgn) {
// Right-shifting an unsigned value by its size yields 0.
llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
} else {
// Right-shifting a signed value by its size is equivalent
// to a shift of size-1.
--ShiftAmt;
Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
}
}
Shift = EmitNeonShiftVector(Shift, Ty, false);
if (usgn)
return Builder.CreateLShr(Vec, Shift, name);
else
return Builder.CreateAShr(Vec, Shift, name);
}
#endif // HLSL Change Ends
/// GetPointeeAlignment - Given an expression with a pointer type, find the
/// alignment of the type referenced by the pointer. Skip over implicit
/// casts.
std::pair<llvm::Value*, unsigned>
CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
assert(Addr->getType()->isPointerType());
Addr = Addr->IgnoreParens();
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
ICE->getSubExpr()->getType()->isPointerType()) {
std::pair<llvm::Value*, unsigned> Ptr =
EmitPointerWithAlignment(ICE->getSubExpr());
Ptr.first = Builder.CreateBitCast(Ptr.first,
ConvertType(Addr->getType()));
return Ptr;
} else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
LValue LV = EmitLValue(ICE->getSubExpr());
unsigned Align = LV.getAlignment().getQuantity();
if (!Align) {
// FIXME: Once LValues are fixed to always set alignment,
// zap this code.
QualType PtTy = ICE->getSubExpr()->getType();
if (!PtTy->isIncompleteType())
Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
else
Align = 1;
}
return std::make_pair(LV.getAddress(), Align);
}
}
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
if (UO->getOpcode() == UO_AddrOf) {
LValue LV = EmitLValue(UO->getSubExpr());
unsigned Align = LV.getAlignment().getQuantity();
if (!Align) {
// FIXME: Once LValues are fixed to always set alignment,
// zap this code.
QualType PtTy = UO->getSubExpr()->getType();
if (!PtTy->isIncompleteType())
Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
else
Align = 1;
}
return std::make_pair(LV.getAddress(), Align);
}
}
unsigned Align = 1;
QualType PtTy = Addr->getType()->getPointeeType();
if (!PtTy->isIncompleteType())
Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
return std::make_pair(EmitScalarExpr(Addr), Align);
}
#if 0 // HLSL Change Starts
enum {
AddRetType = (1 << 0),
Add1ArgType = (1 << 1),
Add2ArgTypes = (1 << 2),
VectorizeRetType = (1 << 3),
VectorizeArgTypes = (1 << 4),
InventFloatType = (1 << 5),
UnsignedAlts = (1 << 6),
Use64BitVectors = (1 << 7),
Use128BitVectors = (1 << 8),
Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
VectorRet = AddRetType | VectorizeRetType,
VectorRetGetArgs01 =
AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
FpCmpzModifiers =
AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
};
struct NeonIntrinsicInfo {
unsigned BuiltinID;
unsigned LLVMIntrinsic;
unsigned AltLLVMIntrinsic;
const char *NameHint;
unsigned TypeModifier;
bool operator<(unsigned RHSBuiltinID) const {
return BuiltinID < RHSBuiltinID;
}
};
#define NEONMAP0(NameBase) \
{ NEON::BI__builtin_neon_ ## NameBase, 0, 0, #NameBase, 0 }
#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
{ NEON:: BI__builtin_neon_ ## NameBase, \
Intrinsic::LLVMIntrinsic, 0, #NameBase, TypeModifier }
#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
{ NEON:: BI__builtin_neon_ ## NameBase, \
Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
#NameBase, TypeModifier }
static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP1(vabs_v, arm_neon_vabs, 0),
NEONMAP1(vabsq_v, arm_neon_vabs, 0),
NEONMAP0(vaddhn_v),
NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
NEONMAP1(vaeseq_v, arm_neon_aese, 0),
NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vcage_v, arm_neon_vacge, 0),
NEONMAP1(vcageq_v, arm_neon_vacge, 0),
NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
NEONMAP1(vcale_v, arm_neon_vacge, 0),
NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_v, arm_neon_vcvtfp2hf, 0),
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvt_s32_v),
NEONMAP0(vcvt_s64_v),
NEONMAP0(vcvt_u32_v),
NEONMAP0(vcvt_u64_v),
NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
NEONMAP0(vcvtq_f32_v),
NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_s32_v),
NEONMAP0(vcvtq_s64_v),
NEONMAP0(vcvtq_u32_v),
NEONMAP0(vcvtq_u64_v),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
NEONMAP0(vld1_dup_v),
NEONMAP1(vld1_v, arm_neon_vld1, 0),
NEONMAP0(vld1q_dup_v),
NEONMAP1(vld1q_v, arm_neon_vld1, 0),
NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2_v, arm_neon_vld2, 0),
NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2q_v, arm_neon_vld2, 0),
NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3_v, arm_neon_vld3, 0),
NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3q_v, arm_neon_vld3, 0),
NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4_v, arm_neon_vld4, 0),
NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4q_v, arm_neon_vld4, 0),
NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
NEONMAP0(vmull_v),
NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
NEONMAP0(vshlq_n_v),
NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
NEONMAP1(vst1_v, arm_neon_vst1, 0),
NEONMAP1(vst1q_v, arm_neon_vst1, 0),
NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
NEONMAP1(vst2_v, arm_neon_vst2, 0),
NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
NEONMAP1(vst2q_v, arm_neon_vst2, 0),
NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
NEONMAP1(vst3_v, arm_neon_vst3, 0),
NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
NEONMAP1(vst3q_v, arm_neon_vst3, 0),
NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
NEONMAP1(vst4_v, arm_neon_vst4, 0),
NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
NEONMAP1(vst4q_v, arm_neon_vst4, 0),
NEONMAP0(vsubhn_v),
NEONMAP0(vtrn_v),
NEONMAP0(vtrnq_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
NEONMAP0(vuzp_v),
NEONMAP0(vuzpq_v),
NEONMAP0(vzip_v),
NEONMAP0(vzipq_v)
};
static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vabs_v, aarch64_neon_abs, 0),
NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
NEONMAP0(vaddhn_v),
NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
NEONMAP1(vcage_v, aarch64_neon_facge, 0),
NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
NEONMAP1(vcale_v, aarch64_neon_facge, 0),
NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0),
NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_f32_v),
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
NEONMAP0(vshlq_n_v),
NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
NEONMAP0(vsubhn_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
};
static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
};
#undef NEONMAP0
#undef NEONMAP1
#undef NEONMAP2
static bool NEONSIMDIntrinsicsProvenSorted = false;
static bool AArch64SIMDIntrinsicsProvenSorted = false;
static bool AArch64SISDIntrinsicsProvenSorted = false;
static const NeonIntrinsicInfo *
findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
unsigned BuiltinID, bool &MapProvenSorted) {
#ifndef NDEBUG
if (!MapProvenSorted) {
// FIXME: use std::is_sorted once C++11 is allowed
for (unsigned i = 0; i < IntrinsicMap.size() - 1; ++i)
assert(IntrinsicMap[i].BuiltinID <= IntrinsicMap[i + 1].BuiltinID);
MapProvenSorted = true;
}
#endif
const NeonIntrinsicInfo *Builtin =
std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
return Builtin;
return nullptr;
}
Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier,
llvm::Type *ArgType,
const CallExpr *E) {
int VectorSize = 0;
if (Modifier & Use64BitVectors)
VectorSize = 64;
else if (Modifier & Use128BitVectors)
VectorSize = 128;
// Return type.
SmallVector<llvm::Type *, 3> Tys;
if (Modifier & AddRetType) {
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
if (Modifier & VectorizeRetType)
Ty = llvm::VectorType::get(
Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
Tys.push_back(Ty);
}
// Arguments.
if (Modifier & VectorizeArgTypes) {
int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
ArgType = llvm::VectorType::get(ArgType, Elts);
}
if (Modifier & (Add1ArgType | Add2ArgTypes))
Tys.push_back(ArgType);
if (Modifier & Add2ArgTypes)
Tys.push_back(ArgType);
if (Modifier & InventFloatType)
Tys.push_back(FloatTy);
return CGM.getIntrinsic(IntrinsicID, Tys);
}
static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
const NeonIntrinsicInfo &SISDInfo,
SmallVectorImpl<Value *> &Ops,
const CallExpr *E) {
unsigned BuiltinID = SISDInfo.BuiltinID;
unsigned int Int = SISDInfo.LLVMIntrinsic;
unsigned Modifier = SISDInfo.TypeModifier;
const char *s = SISDInfo.NameHint;
switch (BuiltinID) {
case NEON::BI__builtin_neon_vcled_s64:
case NEON::BI__builtin_neon_vcled_u64:
case NEON::BI__builtin_neon_vcles_f32:
case NEON::BI__builtin_neon_vcled_f64:
case NEON::BI__builtin_neon_vcltd_s64:
case NEON::BI__builtin_neon_vcltd_u64:
case NEON::BI__builtin_neon_vclts_f32:
case NEON::BI__builtin_neon_vcltd_f64:
case NEON::BI__builtin_neon_vcales_f32:
case NEON::BI__builtin_neon_vcaled_f64:
case NEON::BI__builtin_neon_vcalts_f32:
case NEON::BI__builtin_neon_vcaltd_f64:
// Only one direction of comparisons actually exist, cmle is actually a cmge
// with swapped operands. The table gives us the right intrinsic but we
// still need to do the swap.
std::swap(Ops[0], Ops[1]);
break;
}
assert(Int && "Generic code assumes a valid intrinsic");
// Determine the type(s) of this overloaded AArch64 intrinsic.
const Expr *Arg = E->getArg(0);
llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
int j = 0;
ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j) {
llvm::Type *ArgTy = ai->getType();
if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
ArgTy->getPrimitiveSizeInBits())
continue;
assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
// The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
// it before inserting.
Ops[j] =
CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
Ops[j] =
CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
}
Value *Result = CGF.EmitNeonCall(F, Ops, s);
llvm::Type *ResultType = CGF.ConvertType(E->getType());
if (ResultType->getPrimitiveSizeInBits() <
Result->getType()->getPrimitiveSizeInBits())
return CGF.Builder.CreateExtractElement(Result, C0);
return CGF.Builder.CreateBitCast(Result, ResultType, s);
}
Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
const char *NameHint, unsigned Modifier, const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
// Get the last argument, which specifies the vector type.
llvm::APSInt NeonTypeConst;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(NeonTypeConst.getZExtValue());
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
unsigned Int = LLVMIntrinsic;
if ((Modifier & UnsignedAlts) && !Usgn)
Int = AltLLVMIntrinsic;
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vabs_v:
case NEON::BI__builtin_neon_vabsq_v:
if (VTy->getElementType()->isFloatingPointTy())
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
case NEON::BI__builtin_neon_vaddhn_v: {
llvm::VectorType *SrcTy =
llvm::VectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
SrcTy->getScalarSizeInBits() / 2);
ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
// %res = trunc <4 x i32> %high to <4 x i16>
return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
}
case NEON::BI__builtin_neon_vcale_v:
case NEON::BI__builtin_neon_vcaleq_v:
case NEON::BI__builtin_neon_vcalt_v:
case NEON::BI__builtin_neon_vcaltq_v:
std::swap(Ops[0], Ops[1]);
case NEON::BI__builtin_neon_vcage_v:
case NEON::BI__builtin_neon_vcageq_v:
case NEON::BI__builtin_neon_vcagt_v:
case NEON::BI__builtin_neon_vcagtq_v: {
llvm::Type *VecFlt = llvm::VectorType::get(
VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
VTy->getNumElements());
llvm::Type *Tys[] = { VTy, VecFlt };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
}
case NEON::BI__builtin_neon_vclz_v:
case NEON::BI__builtin_neon_vclzq_v:
// We generate target-independent intrinsic, which needs a second argument
// for whether or not clz of zero is undefined; on ARM it isn't.
Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
break;
case NEON::BI__builtin_neon_vcvt_f32_v:
case NEON::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_n_f32_v:
case NEON::BI__builtin_neon_vcvt_n_f64_v:
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *FloatTy =
GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32,
false, Quad));
llvm::Type *Tys[2] = { FloatTy, Ty };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case NEON::BI__builtin_neon_vcvt_n_s32_v:
case NEON::BI__builtin_neon_vcvt_n_u32_v:
case NEON::BI__builtin_neon_vcvt_n_s64_v:
case NEON::BI__builtin_neon_vcvt_n_u64_v:
case NEON::BI__builtin_neon_vcvtq_n_s32_v:
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *FloatTy =
GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32,
false, Quad));
llvm::Type *Tys[2] = { Ty, FloatTy };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case NEON::BI__builtin_neon_vcvt_s32_v:
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v: {
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *FloatTy =
GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32,
false, Quad));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v:
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v:
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v:
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *InTy =
GetNeonType(this,
NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32, false, Quad));
llvm::Type *Tys[2] = { Ty, InTy };
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vext_v:
case NEON::BI__builtin_neon_vextq_v: {
int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
}
case NEON::BI__builtin_neon_vfma_v:
case NEON::BI__builtin_neon_vfmaq_v: {
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
return EmitNeonSplat(Ops[0], CI);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v:
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v:
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
for (unsigned I = 2; I < Ops.size() - 1; ++I)
Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
Ops.push_back(Align);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
if (Usgn)
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
}
case NEON::BI__builtin_neon_vmovn_v: {
llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: the integer vmull operations could be emitted in terms of pure
// LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
// hoisting the exts outside loops. Until global ISel comes along that can
// see through such movement this leads to bad CodeGen. So we need an
// intrinsic for now.
Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case NEON::BI__builtin_neon_vpadal_v:
case NEON::BI__builtin_neon_vpadalq_v: {
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vpaddl_v:
case NEON::BI__builtin_neon_vpaddlq_v: {
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
case NEON::BI__builtin_neon_vqdmlal_v:
case NEON::BI__builtin_neon_vqdmlsl_v: {
SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
Value *Mul = EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty),
MulOps, "vqdmlal");
SmallVector<Value *, 2> AccumOps;
AccumOps.push_back(Ops[0]);
AccumOps.push_back(Mul);
return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty),
AccumOps, NameHint);
}
case NEON::BI__builtin_neon_vqshl_n_v:
case NEON::BI__builtin_neon_vqshlq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
1, false);
case NEON::BI__builtin_neon_vqshlu_n_v:
case NEON::BI__builtin_neon_vqshluq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
1, false);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
case NEON::BI__builtin_neon_vrsqrte_v:
case NEON::BI__builtin_neon_vrsqrteq_v:
Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrshr_n_v:
case NEON::BI__builtin_neon_vrshrq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
1, true);
case NEON::BI__builtin_neon_vshl_n_v:
case NEON::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
"vshl_n");
case NEON::BI__builtin_neon_vshll_n_v: {
llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
if (Usgn)
Ops[0] = Builder.CreateZExt(Ops[0], VTy);
else
Ops[0] = Builder.CreateSExt(Ops[0], VTy);
Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
}
case NEON::BI__builtin_neon_vshrn_n_v: {
llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
if (Usgn)
Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
else
Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
}
case NEON::BI__builtin_neon_vshr_n_v:
case NEON::BI__builtin_neon_vshrq_n_v:
return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v:
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v:
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v:
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v:
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v:
Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
case NEON::BI__builtin_neon_vsubhn_v: {
llvm::VectorType *SrcTy =
llvm::VectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
SrcTy->getScalarSizeInBits() / 2);
ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
// %res = trunc <4 x i32> %high to <4 x i16>
return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(Builder.getInt32(i+vi));
Indices.push_back(Builder.getInt32(i+e+vi));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
SV = Builder.CreateStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vtst_v:
case NEON::BI__builtin_neon_vtstq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
ConstantAggregateZero::get(Ty));
return Builder.CreateSExt(Ops[0], Ty, "vtst");
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
SV = Builder.CreateStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
SV = Builder.CreateStore(SV, Addr);
}
return SV;
}
}
assert(Int && "Expected valid intrinsic number");
// Determine the type(s) of this overloaded AArch64 intrinsic.
Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
Value *Result = EmitNeonCall(F, Ops, NameHint);
llvm::Type *ResultType = ConvertType(E->getType());
// AArch64 intrinsic one-element vector type cast to
// scalar type expected by the builtin
return Builder.CreateBitCast(Result, ResultType, NameHint);
}
Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
const CmpInst::Predicate Ip, const Twine &Name) {
llvm::Type *OTy = Op->getType();
// FIXME: this is utterly horrific. We should not be looking at previous
// codegen context to find out what needs doing. Unfortunately TableGen
// currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
// (etc).
if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
OTy = BI->getOperand(0)->getType();
Op = Builder.CreateBitCast(Op, OTy);
if (OTy->getScalarType()->isFloatingPointTy()) {
Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
} else {
Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
}
return Builder.CreateSExt(Op, Ty, Name);
}
static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Value *ExtOp, Value *IndexOp,
llvm::Type *ResTy, unsigned IntID,
const char *Name) {
SmallVector<Value *, 2> TblOps;
if (ExtOp)
TblOps.push_back(ExtOp);
// Build a vector containing sequential number like (0, 1, 2, ..., 15)
SmallVector<Constant*, 16> Indices;
llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
}
Value *SV = llvm::ConstantVector::get(Indices);
int PairPos = 0, End = Ops.size() - 1;
while (PairPos < End) {
TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
Ops[PairPos+1], SV, Name));
PairPos += 2;
}
// If there's an odd number of 64-bit lookup table, fill the high 64-bit
// of the 128-bit lookup table with zero.
if (PairPos == End) {
Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
ZeroTbl, SV, Name));
}
Function *TblF;
TblOps.push_back(IndexOp);
TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
return CGF.EmitNeonCall(TblF, TblOps, Name);
}
Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
switch (BuiltinID) {
default:
return nullptr;
case ARM::BI__builtin_arm_nop:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, 0));
case ARM::BI__builtin_arm_yield:
case ARM::BI__yield:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, 1));
case ARM::BI__builtin_arm_wfe:
case ARM::BI__wfe:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, 2));
case ARM::BI__builtin_arm_wfi:
case ARM::BI__wfi:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, 3));
case ARM::BI__builtin_arm_sev:
case ARM::BI__sev:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, 4));
case ARM::BI__builtin_arm_sevl:
case ARM::BI__sevl:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, 5));
}
}
// Generates the IR for the read/write special register builtin,
// ValueType is the type of the value that is to be written or read,
// RegisterType is the type of the register being written to or read from.
static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
llvm::Type *RegisterType,
llvm::Type *ValueType, bool IsRead) {
// write and register intrinsics only support 32 and 64 bit operations.
assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
&& "Unsupported size for register.");
CodeGen::CGBuilderTy &Builder = CGF.Builder;
CodeGen::CodeGenModule &CGM = CGF.CGM;
LLVMContext &Context = CGM.getLLVMContext();
const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
StringRef SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Type *Types[] = { RegisterType };
bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
&& "Can't fit 64-bit value in 32-bit register");
if (IsRead) {
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
llvm::Value *Call = Builder.CreateCall(F, Metadata);
if (MixedTypes)
// Read into 64 bit register and then truncate result to 32 bit.
return Builder.CreateTrunc(Call, ValueType);
if (ValueType->isPointerTy())
// Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
return Builder.CreateIntToPtr(Call, ValueType);
return Call;
}
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
if (MixedTypes) {
// Extend 32 bit write value to 64 bit to pass to write.
ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
return Builder.CreateCall(F, { Metadata, ArgValue });
}
if (ValueType->isPointerTy()) {
// Have VoidPtrTy ArgValue but want to return an i32/i64.
ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
return Builder.CreateCall(F, { Metadata, ArgValue });
}
return Builder.CreateCall(F, { Metadata, ArgValue });
}
/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
/// argument that specifies the vector type.
static bool HasExtraNeonArgument(unsigned BuiltinID) {
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
case NEON::BI__builtin_neon_vsha1h_u32:
case NEON::BI__builtin_neon_vsha1cq_u32:
case NEON::BI__builtin_neon_vsha1pq_u32:
case NEON::BI__builtin_neon_vsha1mq_u32:
case ARM::BI_MoveToCoprocessor:
case ARM::BI_MoveToCoprocessor2:
return false;
}
return true;
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (auto Hint = GetValueForARMHint(BuiltinID))
return Hint;
if (BuiltinID == ARM::BI__emit) {
bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
APSInt Value;
if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
llvm::InlineAsm *Emit =
IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
/*SideEffects=*/true)
: InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
/*SideEffects=*/true);
return Builder.CreateCall(Emit);
}
if (BuiltinID == ARM::BI__builtin_arm_dbg) {
Value *Option = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
}
if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *IsData = EmitScalarExpr(E->getArg(2));
// Locality is not supported on ARM target
Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
if (BuiltinID == ARM::BI__builtin_arm_rbit) {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_rbit),
EmitScalarExpr(E->getArg(0)),
"rbit");
}
if (BuiltinID == ARM::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
SmallVector<Value*, 2> Ops;
for (unsigned i = 0; i < 2; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 64) ||
BuiltinID == ARM::BI__ldrexd) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
case ARM::BI__builtin_arm_ldaex:
F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
break;
case ARM::BI__builtin_arm_ldrexd:
case ARM::BI__builtin_arm_ldrex:
case ARM::BI__ldrexd:
F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
break;
}
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
"ldrexd");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
Val0 = Builder.CreateZExt(Val0, Int64Ty);
Val1 = Builder.CreateZExt(Val1, Int64Ty);
Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
}
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
LoadAddr->getType());
Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
}
if (BuiltinID == ARM::BI__builtin_arm_strexd ||
((BuiltinID == ARM::BI__builtin_arm_stlex ||
BuiltinID == ARM::BI__builtin_arm_strex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlexd
: Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
}
if (BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
else {
StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlex
: Intrinsic::arm_strex,
StoreAddr->getType());
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
}
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
}
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case ARM::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::arm_crc32b; break;
case ARM::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
case ARM::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::arm_crc32h; break;
case ARM::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
case ARM::BI__builtin_arm_crc32w:
case ARM::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::arm_crc32w; break;
case ARM::BI__builtin_arm_crc32cw:
case ARM::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
// crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
// intrinsics, hence we need different codegen for these cases.
if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
BuiltinID == ARM::BI__builtin_arm_crc32cd) {
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
Value *Arg1b = Builder.CreateLShr(Arg1, C1);
Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
return Builder.CreateCall(F, {Res, Arg1b});
} else {
Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
return Builder.CreateCall(F, {Arg0, Arg1});
}
}
if (BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsr ||
BuiltinID == ARM::BI__builtin_arm_wsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsrp) {
bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_rsrp;
bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsrp;
bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsr64;
llvm::Type *ValueType;
llvm::Type *RegisterType;
if (IsPointerBuiltin) {
ValueType = VoidPtrTy;
RegisterType = Int32Ty;
} else if (Is64Bit) {
ValueType = RegisterType = Int64Ty;
} else {
ValueType = RegisterType = Int32Ty;
}
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
}
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
SmallVector<Value*, 4> Ops;
llvm::Value *Align = nullptr;
bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
for (unsigned i = 0, e = NumArgs; i != e; i++) {
if (i == 0) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
case NEON::BI__builtin_neon_vld1q_lane_v:
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v:
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v:
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v:
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v:
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v:
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
std::pair<llvm::Value*, unsigned> Src =
EmitPointerWithAlignment(E->getArg(0));
Ops.push_back(Src.first);
Align = Builder.getInt32(Src.second);
continue;
}
}
if (i == 1) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v:
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v:
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v:
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v:
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
std::pair<llvm::Value*, unsigned> Src =
EmitPointerWithAlignment(E->getArg(1));
Ops.push_back(Src.first);
Align = Builder.getInt32(Src.second);
continue;
}
}
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
}
}
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_f32:
return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsha1h_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1cq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1pq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1mq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
"vsha1h");
// The ARM _MoveToCoprocessor builtins put the input register value as
// the first argument, but the LLVM intrinsic expects it as the third one.
case ARM::BI_MoveToCoprocessor:
case ARM::BI_MoveToCoprocessor2: {
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
}
// Get the last argument, which specifies the vector type.
assert(HasExtraArg);
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
if (!Arg->isIntegerConstantExpr(Result, getContext()))
return nullptr;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
// Determine the overloaded type of this builtin.
llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
Ty = FloatTy;
else
Ty = DoubleTy;
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
// Call the appropriate intrinsic.
Function *F = CGM.getIntrinsic(Int, Ty);
return Builder.CreateCall(F, Ops, "vcvtr");
}
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
bool usgn = Type.isUnsigned();
bool rightShift = false;
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
unsigned Int;
switch (BuiltinID) {
default: return nullptr;
case NEON::BI__builtin_neon_vld1q_lane_v:
// Handle 64-bit integer elements as a special case. Use shuffles of
// one-element vectors to avoid poor code for i64 in the backend.
if (VTy->getElementType()->isIntegerTy(64)) {
// Extract the other lane.
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
// Load the value as a one-element vector.
Ty = llvm::VectorType::get(VTy->getElementType(), 1);
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
// Combine them.
SmallVector<Constant*, 2> Indices;
Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
Indices.push_back(ConstantInt::get(Int32Ty, Lane));
SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
}
// fall through
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
LoadInst *Ld = Builder.CreateLoad(Ops[0]);
Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v: {
// Handle 64-bit elements as a special-case. There is no "dup" needed.
if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_dup_v:
Int = Intrinsic::arm_neon_vld2;
break;
case NEON::BI__builtin_neon_vld3_dup_v:
Int = Intrinsic::arm_neon_vld3;
break;
case NEON::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld4;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_dup_v:
Int = Intrinsic::arm_neon_vld2lane;
break;
case NEON::BI__builtin_neon_vld3_dup_v:
Int = Intrinsic::arm_neon_vld3lane;
break;
case NEON::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld4lane;
break;
default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
Args.push_back(Ops[1]);
Args.append(STy->getNumElements(), UndefValue::get(Ty));
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Args.push_back(CI);
Args.push_back(Align);
Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
// splat lane 0 to all elts in each vector of the result.
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Value *Val = Builder.CreateExtractValue(Ops[1], i);
Value *Elt = Builder.CreateBitCast(Val, Ty);
Elt = EmitNeonSplat(Elt, CI);
Elt = Builder.CreateBitCast(Elt, Val->getType());
Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
}
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
1, true);
case NEON::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
1, true);
case NEON::BI__builtin_neon_vqshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
Ops, "vqshrun_n", 1, true);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
Ops, "vrecpe");
case NEON::BI__builtin_neon_vrshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
Ops, "vrshrn_n", 1, true);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
case NEON::BI__builtin_neon_vsri_n_v:
case NEON::BI__builtin_neon_vsriq_n_v:
rightShift = true;
case NEON::BI__builtin_neon_vsli_n_v:
case NEON::BI__builtin_neon_vsliq_n_v:
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
Ops, "vsli_n");
case NEON::BI__builtin_neon_vsra_n_v:
case NEON::BI__builtin_neon_vsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
case NEON::BI__builtin_neon_vst1q_lane_v:
// Handle 64-bit integer elements as a special case. Use a shuffle to get
// a one-element vector and avoid poor code for i64 in the backend.
if (VTy->getElementType()->isIntegerTy(64)) {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
Ops[2] = Align;
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Ops[1]->getType()), Ops);
}
// fall through
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
StoreInst *St = Builder.CreateStore(Ops[1],
Builder.CreateBitCast(Ops[0], Ty));
St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
return St;
}
case NEON::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
Ops, "vtbl1");
case NEON::BI__builtin_neon_vtbl2_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
Ops, "vtbl2");
case NEON::BI__builtin_neon_vtbl3_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
Ops, "vtbl3");
case NEON::BI__builtin_neon_vtbl4_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
Ops, "vtbl4");
case NEON::BI__builtin_neon_vtbx1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
Ops, "vtbx1");
case NEON::BI__builtin_neon_vtbx2_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
Ops, "vtbx2");
case NEON::BI__builtin_neon_vtbx3_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
Ops, "vtbx3");
case NEON::BI__builtin_neon_vtbx4_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
Ops, "vtbx4");
}
}
static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
SmallVectorImpl<Value *> &Ops) {
unsigned int Int = 0;
const char *s = nullptr;
switch (BuiltinID) {
default:
return nullptr;
case NEON::BI__builtin_neon_vtbl1_v:
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
case NEON::BI__builtin_neon_vtbl2_v:
case NEON::BI__builtin_neon_vqtbl2_v:
case NEON::BI__builtin_neon_vqtbl2q_v:
case NEON::BI__builtin_neon_vtbl3_v:
case NEON::BI__builtin_neon_vqtbl3_v:
case NEON::BI__builtin_neon_vqtbl3q_v:
case NEON::BI__builtin_neon_vtbl4_v:
case NEON::BI__builtin_neon_vqtbl4_v:
case NEON::BI__builtin_neon_vqtbl4q_v:
break;
case NEON::BI__builtin_neon_vtbx1_v:
case NEON::BI__builtin_neon_vqtbx1_v:
case NEON::BI__builtin_neon_vqtbx1q_v:
case NEON::BI__builtin_neon_vtbx2_v:
case NEON::BI__builtin_neon_vqtbx2_v:
case NEON::BI__builtin_neon_vqtbx2q_v:
case NEON::BI__builtin_neon_vtbx3_v:
case NEON::BI__builtin_neon_vqtbx3_v:
case NEON::BI__builtin_neon_vqtbx3q_v:
case NEON::BI__builtin_neon_vtbx4_v:
case NEON::BI__builtin_neon_vqtbx4_v:
case NEON::BI__builtin_neon_vqtbx4q_v:
break;
}
assert(E->getNumArgs() >= 3);
// Get the last argument, which specifies the vector type.
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
llvm::VectorType *VTy = GetNeonType(&CGF, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
unsigned nElts = VTy->getNumElements();
CodeGen::CGBuilderTy &Builder = CGF.Builder;
// AArch64 scalar builtins are not overloaded, they do not have an extra
// argument that specifies the vector type, need to handle each case.
SmallVector<Value *, 2> TblOps;
switch (BuiltinID) {
case NEON::BI__builtin_neon_vtbl1_v: {
TblOps.push_back(Ops[0]);
return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
Intrinsic::aarch64_neon_tbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl2_v: {
TblOps.push_back(Ops[0]);
TblOps.push_back(Ops[1]);
return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
Intrinsic::aarch64_neon_tbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl3_v: {
TblOps.push_back(Ops[0]);
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
Intrinsic::aarch64_neon_tbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbl4_v: {
TblOps.push_back(Ops[0]);
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
TblOps.push_back(Ops[3]);
return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
Intrinsic::aarch64_neon_tbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbx1_v: {
TblOps.push_back(Ops[1]);
Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
Intrinsic::aarch64_neon_tbl1, "vtbl1");
llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx2_v: {
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
Intrinsic::aarch64_neon_tbx1, "vtbx1");
}
case NEON::BI__builtin_neon_vtbx3_v: {
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
TblOps.push_back(Ops[3]);
Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
Intrinsic::aarch64_neon_tbl2, "vtbl2");
llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
TwentyFourV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx4_v: {
TblOps.push_back(Ops[1]);
TblOps.push_back(Ops[2]);
TblOps.push_back(Ops[3]);
TblOps.push_back(Ops[4]);
return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
Intrinsic::aarch64_neon_tbx2, "vtbx2");
}
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
case NEON::BI__builtin_neon_vqtbl2_v:
case NEON::BI__builtin_neon_vqtbl2q_v: {
Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
case NEON::BI__builtin_neon_vqtbl3_v:
case NEON::BI__builtin_neon_vqtbl3q_v:
Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
case NEON::BI__builtin_neon_vqtbl4_v:
case NEON::BI__builtin_neon_vqtbl4q_v:
Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
case NEON::BI__builtin_neon_vqtbx1_v:
case NEON::BI__builtin_neon_vqtbx1q_v:
Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
case NEON::BI__builtin_neon_vqtbx2_v:
case NEON::BI__builtin_neon_vqtbx2q_v:
Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
case NEON::BI__builtin_neon_vqtbx3_v:
case NEON::BI__builtin_neon_vqtbx3q_v:
Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
case NEON::BI__builtin_neon_vqtbx4_v:
case NEON::BI__builtin_neon_vqtbx4q_v:
Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
}
}
if (!Int)
return nullptr;
Function *F = CGF.CGM.getIntrinsic(Int, Ty);
return CGF.EmitNeonCall(F, Ops, s);
}
Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
Op = Builder.CreateBitCast(Op, Int16Ty);
Value *V = UndefValue::get(VTy);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Op = Builder.CreateInsertElement(V, Op, CI);
return Op;
}
Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
Op = Builder.CreateBitCast(Op, Int8Ty);
Value *V = UndefValue::get(VTy);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Op = Builder.CreateInsertElement(V, Op, CI);
return Op;
}
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
case AArch64::BI__builtin_arm_nop:
HintID = 0;
break;
case AArch64::BI__builtin_arm_yield:
HintID = 1;
break;
case AArch64::BI__builtin_arm_wfe:
HintID = 2;
break;
case AArch64::BI__builtin_arm_wfi:
HintID = 3;
break;
case AArch64::BI__builtin_arm_sev:
HintID = 4;
break;
case AArch64::BI__builtin_arm_sevl:
HintID = 5;
break;
}
if (HintID != static_cast<unsigned>(-1)) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *CacheLevel = EmitScalarExpr(E->getArg(2));
Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
Value *IsData = EmitScalarExpr(E->getArg(4));
Value *Locality = nullptr;
if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
// Temporal fetch, needs to convert cache level to locality.
Locality = llvm::ConstantInt::get(Int32Ty,
-cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
} else {
// Streaming fetch.
Locality = llvm::ConstantInt::get(Int32Ty, 0);
}
// FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
// PLDL3STRM or PLDL2STRM.
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
assert((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
SmallVector<Value*, 2> Ops;
for (unsigned i = 0; i < 2; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 128) {
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxp
: Intrinsic::aarch64_ldxp);
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
"ldxp");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
Val0 = Builder.CreateZExt(Val0, Int128Ty);
Val1 = Builder.CreateZExt(Val1, Int128Ty);
Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
} else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
LoadAddr->getType());
Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxp
: Intrinsic::aarch64_stxp);
llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
One);
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
Int8PtrTy);
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
}
if (BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
else {
StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
}
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxr
: Intrinsic::aarch64_stxr,
StoreAddr->getType());
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
}
if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
return Builder.CreateCall(F);
}
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case AArch64::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
case AArch64::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
case AArch64::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
case AArch64::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
case AArch64::BI__builtin_arm_crc32w:
CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
case AArch64::BI__builtin_arm_crc32cw:
CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
case AArch64::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
case AArch64::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
return Builder.CreateCall(F, {Arg0, Arg1});
}
if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsr ||
BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
BuiltinID == AArch64::BI__builtin_arm_wsrp) {
bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_rsrp;
bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsrp;
bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
BuiltinID != AArch64::BI__builtin_arm_wsr;
llvm::Type *ValueType;
llvm::Type *RegisterType = Int64Ty;
if (IsPointerBuiltin) {
ValueType = VoidPtrTy;
} else if (Is64Bit) {
ValueType = Int64Ty;
} else {
ValueType = Int32Ty;
}
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
}
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
llvm::SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
assert(IsConst && "Constant arg isn't actually constant?");
(void)IsConst;
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
}
}
auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
if (Builtin) {
Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
assert(Result && "SISD intrinsic should have been handled");
return Result;
}
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
NeonTypeFlags Type(0);
if (Arg->isIntegerConstantExpr(Result, getContext()))
// Determine the type of this overloaded NEON intrinsic.
Type = NeonTypeFlags(Result.getZExtValue());
bool usgn = Type.isUnsigned();
bool quad = Type.isQuad();
// Handle non-overloaded intrinsics first.
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
return Builder.CreateLoad(Ptr);
}
case NEON::BI__builtin_neon_vstrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
}
case NEON::BI__builtin_neon_vcvts_u32_f32:
case NEON::BI__builtin_neon_vcvtd_u64_f64:
usgn = true;
// FALL THROUGH
case NEON::BI__builtin_neon_vcvts_s32_f32:
case NEON::BI__builtin_neon_vcvtd_s64_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
if (usgn)
return Builder.CreateFPToUI(Ops[0], InTy);
return Builder.CreateFPToSI(Ops[0], InTy);
}
case NEON::BI__builtin_neon_vcvts_f32_u32:
case NEON::BI__builtin_neon_vcvtd_f64_u64:
usgn = true;
// FALL THROUGH
case NEON::BI__builtin_neon_vcvts_f32_s32:
case NEON::BI__builtin_neon_vcvtd_f64_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
if (usgn)
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
case NEON::BI__builtin_neon_vpaddd_s64: {
llvm::Type *Ty =
llvm::VectorType::get(llvm::Type::getInt64Ty(getLLVMContext()), 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f64 into a scalar f64.
return Builder.CreateAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpaddd_f64: {
llvm::Type *Ty =
llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f64 into a scalar f64.
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpadds_f32: {
llvm::Type *Ty =
llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f32, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f32 into a scalar f32.
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vceqzd_s64:
case NEON::BI__builtin_neon_vceqzd_f64:
case NEON::BI__builtin_neon_vceqzs_f32:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
case NEON::BI__builtin_neon_vcgezd_s64:
case NEON::BI__builtin_neon_vcgezd_f64:
case NEON::BI__builtin_neon_vcgezs_f32:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
case NEON::BI__builtin_neon_vclezd_s64:
case NEON::BI__builtin_neon_vclezd_f64:
case NEON::BI__builtin_neon_vclezs_f32:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
case NEON::BI__builtin_neon_vcgtzd_s64:
case NEON::BI__builtin_neon_vcgtzd_f64:
case NEON::BI__builtin_neon_vcgtzs_f32:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
case NEON::BI__builtin_neon_vcltzd_s64:
case NEON::BI__builtin_neon_vcltzd_f64:
case NEON::BI__builtin_neon_vcltzs_f32:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vceqzd_u64: {
llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[0] = Builder.CreateICmp(llvm::ICmpInst::ICMP_EQ, Ops[0],
llvm::Constant::getNullValue(Ty));
return Builder.CreateSExt(Ops[0], Ty, "vceqzd");
}
case NEON::BI__builtin_neon_vceqd_f64:
case NEON::BI__builtin_neon_vcled_f64:
case NEON::BI__builtin_neon_vcltd_f64:
case NEON::BI__builtin_neon_vcged_f64:
case NEON::BI__builtin_neon_vcgtd_f64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqs_f32:
case NEON::BI__builtin_neon_vcles_f32:
case NEON::BI__builtin_neon_vclts_f32:
case NEON::BI__builtin_neon_vcges_f32:
case NEON::BI__builtin_neon_vcgts_f32: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:
case NEON::BI__builtin_neon_vcgtd_s64:
case NEON::BI__builtin_neon_vcgtd_u64:
case NEON::BI__builtin_neon_vcltd_s64:
case NEON::BI__builtin_neon_vcltd_u64:
case NEON::BI__builtin_neon_vcged_u64:
case NEON::BI__builtin_neon_vcged_s64:
case NEON::BI__builtin_neon_vcled_u64:
case NEON::BI__builtin_neon_vcled_s64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
}
case NEON::BI__builtin_neon_vtstd_s64:
case NEON::BI__builtin_neon_vtstd_u64: {
llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
llvm::Constant::getNullValue(Ty));
return Builder.CreateSExt(Ops[0], Ty, "vtstd");
}
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vset_lane_f64:
// The vector type needs a cast for the v1f64 variant.
Ops[1] = Builder.CreateBitCast(Ops[1],
llvm::VectorType::get(DoubleTy, 1));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsetq_lane_f64:
// The vector type needs a cast for the v2f64 variant.
Ops[1] = Builder.CreateBitCast(Ops[1],
llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vdupb_lane_i8:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vdupb_laneq_i8:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vduph_lane_i16:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vduph_laneq_i16:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vdups_lane_i32:
Ops[0] = Builder.CreateBitCast(
Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdups_lane_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdups_lane");
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vdups_laneq_i32:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vdupd_lane_i64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdupd_lane_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdupd_lane");
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vdupd_laneq_i64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vget_lane_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vdups_laneq_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vgetq_lane_f64:
case NEON::BI__builtin_neon_vdupd_laneq_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
case NEON::BI__builtin_neon_vsubd_s64:
case NEON::BI__builtin_neon_vsubd_u64:
return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
case NEON::BI__builtin_neon_vqdmlalh_s16:
case NEON::BI__builtin_neon_vqdmlslh_s16: {
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqshlud_n_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
Ops, "vqshlu_n");
}
case NEON::BI__builtin_neon_vqshld_n_u64:
case NEON::BI__builtin_neon_vqshld_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
? Intrinsic::aarch64_neon_uqshl
: Intrinsic::aarch64_neon_sqshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
}
case NEON::BI__builtin_neon_vrshrd_n_u64:
case NEON::BI__builtin_neon_vrshrd_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
? Intrinsic::aarch64_neon_urshl
: Intrinsic::aarch64_neon_srshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
Ops[1] = ConstantInt::get(Int64Ty, -SV);
return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
}
case NEON::BI__builtin_neon_vrsrad_n_u64:
case NEON::BI__builtin_neon_vrsrad_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
? Intrinsic::aarch64_neon_urshl
: Intrinsic::aarch64_neon_srshl;
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
{Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
}
case NEON::BI__builtin_neon_vshld_n_s64:
case NEON::BI__builtin_neon_vshld_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
return Builder.CreateShl(
Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
}
case NEON::BI__builtin_neon_vshrd_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
return Builder.CreateAShr(
Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
Amt->getZExtValue())),
"shrd_n");
}
case NEON::BI__builtin_neon_vshrd_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
uint64_t ShiftAmt = Amt->getZExtValue();
// Right-shifting an unsigned value by its size yields 0.
if (ShiftAmt == 64)
return ConstantInt::get(Int64Ty, 0);
return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
"shrd_n");
}
case NEON::BI__builtin_neon_vsrad_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
Ops[1] = Builder.CreateAShr(
Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
Amt->getZExtValue())),
"shrd_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
}
case NEON::BI__builtin_neon_vsrad_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
uint64_t ShiftAmt = Amt->getZExtValue();
// Right-shifting an unsigned value by its size yields 0.
// As Op + 0 = Op, return Ops[0] directly.
if (ShiftAmt == 64)
return Ops[0];
Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
"shrd_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
}
case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
"lane");
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(Ops[2]));
llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
Ops.pop_back();
unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqdmlals_s32:
case NEON::BI__builtin_neon_vqdmlsls_s32: {
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(Ops[1]);
ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
ProductOps, "vqdmlXl");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqdmlals_lane_s32:
case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
"lane");
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(Ops[1]);
ProductOps.push_back(Ops[2]);
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
ProductOps, "vqdmlXl");
Ops.pop_back();
unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
}
}
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
// Not all intrinsics handled by the common case work for AArch64 yet, so only
// defer to common code if it's been added to our special map.
Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
AArch64SIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
return V;
unsigned Int;
switch (BuiltinID) {
default: return nullptr;
case NEON::BI__builtin_neon_vbsl_v:
case NEON::BI__builtin_neon_vbslq_v: {
llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
return Builder.CreateBitCast(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vfma_lane_v:
case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
// The ARM builtins (and instructions) have the addend as the first
// operand, but the 'fma' intrinsics have it last. Swap it around here.
Value *Addend = Ops[0];
Value *Multiplicand = Ops[1];
Value *LaneSource = Ops[2];
Ops[0] = Multiplicand;
Ops[1] = LaneSource;
Ops[2] = Addend;
// Now adjust things to handle the lane access.
llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
VTy;
llvm::Constant *cst = cast<Constant>(Ops[3]);
Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
Ops.pop_back();
Int = Intrinsic::fma;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
}
case NEON::BI__builtin_neon_vfma_laneq_v: {
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
// v1f64 fma should be mapped to Neon scalar f64 fma
if (VTy && VTy->getElementType() == DoubleTy) {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
return Builder.CreateBitCast(Result, Ty);
}
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
VTy->getNumElements() * 2);
Ops[2] = Builder.CreateBitCast(Ops[2], STy);
Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
cast<ConstantInt>(Ops[3]));
Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmaq_laneq_v: {
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmas_lane_f32:
case NEON::BI__builtin_neon_vfmas_laneq_f32:
case NEON::BI__builtin_neon_vfmad_lane_f64:
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(3)));
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vfms_v:
case NEON::BI__builtin_neon_vfmsq_v: { // Only used for FP types
// FIXME: probably remove when we no longer support aarch64_simd.h
// (arm_neon.h delegates to vfma).
// The ARM builtins (and instructions) have the addend as the first
// operand, but the 'fma' intrinsics have it last. Swap it around here.
Value *Subtrahend = Ops[0];
Value *Multiplicand = Ops[2];
Ops[0] = Multiplicand;
Ops[2] = Subtrahend;
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[1] = Builder.CreateFNeg(Ops[1]);
Int = Intrinsic::fma;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmls");
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case NEON::BI__builtin_neon_vmax_v:
case NEON::BI__builtin_neon_vmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case NEON::BI__builtin_neon_vabd_v:
case NEON::BI__builtin_neon_vabdq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
case NEON::BI__builtin_neon_vpadal_v:
case NEON::BI__builtin_neon_vpadalq_v: {
unsigned ArgElts = VTy->getNumElements();
llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
unsigned BitWidth = EltTy->getBitWidth();
llvm::Type *ArgTy = llvm::VectorType::get(
llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
llvm::Type* Tys[2] = { VTy, ArgTy };
Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
SmallVector<llvm::Value*, 1> TmpOps;
TmpOps.push_back(Ops[1]);
Function *F = CGM.getIntrinsic(Int, Tys);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
return Builder.CreateAdd(tmp, addend);
}
case NEON::BI__builtin_neon_vpmin_v:
case NEON::BI__builtin_neon_vpminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
case NEON::BI__builtin_neon_vpmax_v:
case NEON::BI__builtin_neon_vpmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
case NEON::BI__builtin_neon_vminnm_v:
case NEON::BI__builtin_neon_vminnmq_v:
Int = Intrinsic::aarch64_neon_fminnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
case NEON::BI__builtin_neon_vmaxnm_v:
case NEON::BI__builtin_neon_vmaxnmq_v:
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vrecpss_f32: {
llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type),
Ops, "vrecps");
}
case NEON::BI__builtin_neon_vrecpsd_f64: {
llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
Ops, "vrecps");
}
case NEON::BI__builtin_neon_vqshrun_n_v:
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
case NEON::BI__builtin_neon_vqrshrun_n_v:
Int = Intrinsic::aarch64_neon_sqrshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
case NEON::BI__builtin_neon_vrshrn_n_v:
Int = Intrinsic::aarch64_neon_rshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrnda_v:
case NEON::BI__builtin_neon_vrndaq_v: {
Int = Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrndi_v:
case NEON::BI__builtin_neon_vrndiq_v: {
Int = Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
}
case NEON::BI__builtin_neon_vrndm_v:
case NEON::BI__builtin_neon_vrndmq_v: {
Int = Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndn_v:
case NEON::BI__builtin_neon_vrndnq_v: {
Int = Intrinsic::aarch64_neon_frintn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndp_v:
case NEON::BI__builtin_neon_vrndpq_v: {
Int = Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndx_v:
case NEON::BI__builtin_neon_vrndxq_v: {
Int = Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
Int = Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vceqz_v:
case NEON::BI__builtin_neon_vceqzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
ICmpInst::ICMP_EQ, "vceqz");
case NEON::BI__builtin_neon_vcgez_v:
case NEON::BI__builtin_neon_vcgezq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
ICmpInst::ICMP_SGE, "vcgez");
case NEON::BI__builtin_neon_vclez_v:
case NEON::BI__builtin_neon_vclezq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
ICmpInst::ICMP_SLE, "vclez");
case NEON::BI__builtin_neon_vcgtz_v:
case NEON::BI__builtin_neon_vcgtzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
ICmpInst::ICMP_SGT, "vcgtz");
case NEON::BI__builtin_neon_vcltz_v:
case NEON::BI__builtin_neon_vcltzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vcvt_f64_v:
case NEON::BI__builtin_neon_vcvtq_f64_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f64_f32: {
assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
"unexpected vcvt_f64_f32 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvt_f32_f64: {
assert(Type.getEltType() == NeonTypeFlags::Float32 &&
"unexpected vcvt_f32_f64 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvt_s32_v:
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v: {
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *InTy =
GetNeonType(this,
NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32, false, quad));
Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
if (usgn)
return Builder.CreateFPToUI(Ops[0], Ty);
return Builder.CreateFPToSI(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *InTy =
GetNeonType(this,
NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32, false, quad));
llvm::Type *Tys[2] = { Ty, InTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
}
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *InTy =
GetNeonType(this,
NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32, false, quad));
llvm::Type *Tys[2] = { Ty, InTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
}
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *InTy =
GetNeonType(this,
NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32, false, quad));
llvm::Type *Tys[2] = { Ty, InTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
}
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
bool Double =
(cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
llvm::Type *InTy =
GetNeonType(this,
NeonTypeFlags(Double ? NeonTypeFlags::Float64
: NeonTypeFlags::Float32, false, quad));
llvm::Type *Tys[2] = { Ty, InTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
}
case NEON::BI__builtin_neon_vmulx_v:
case NEON::BI__builtin_neon_vmulxq_v: {
Int = Intrinsic::aarch64_neon_fmulx;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
}
case NEON::BI__builtin_neon_vmul_lane_v:
case NEON::BI__builtin_neon_vmul_laneq_v: {
// v1f64 vmul_lane should be mapped to Neon scalar mul lane
bool Quad = false;
if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
Quad = true;
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
return Builder.CreateBitCast(Result, Ty);
}
case NEON::BI__builtin_neon_vnegd_s64:
return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
case NEON::BI__builtin_neon_vpmaxnm_v:
case NEON::BI__builtin_neon_vpmaxnmq_v: {
Int = Intrinsic::aarch64_neon_fmaxnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
}
case NEON::BI__builtin_neon_vpminnm_v:
case NEON::BI__builtin_neon_vpminnmq_v: {
Int = Intrinsic::aarch64_neon_fminnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
}
case NEON::BI__builtin_neon_vsqrt_v:
case NEON::BI__builtin_neon_vsqrtq_v: {
Int = Intrinsic::sqrt;
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
}
case NEON::BI__builtin_neon_vrbit_v:
case NEON::BI__builtin_neon_vrbitq_v: {
Int = Intrinsic::aarch64_neon_rbit;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
}
case NEON::BI__builtin_neon_vaddv_u8:
// FIXME: These are handled by the AArch64 scalar code.
usgn = true;
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vaddv_u16:
usgn = true;
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vaddvq_u8:
usgn = true;
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vaddvq_u16:
usgn = true;
// FALLTHROUGH
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vmaxv_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vmaxv_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vmaxvq_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vmaxvq_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vmaxv_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vmaxv_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vmaxvq_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vmaxvq_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vminv_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vminv_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vminvq_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vminvq_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vminv_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vminv_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vminvq_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 8));
}
case NEON::BI__builtin_neon_vminvq_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vmul_n_f64: {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
return Builder.CreateFMul(Ops[0], RHS);
}
case NEON::BI__builtin_neon_vaddlv_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vaddlv_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vaddlvq_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlv_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vaddlv_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0],
llvm::IntegerType::get(getLLVMContext(), 16));
}
case NEON::BI__builtin_neon_vaddlvq_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = llvm::IntegerType::get(getLLVMContext(), 32);
VTy =
llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vsri_n_v:
case NEON::BI__builtin_neon_vsriq_n_v: {
Int = Intrinsic::aarch64_neon_vsri;
llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
return EmitNeonCall(Intrin, Ops, "vsri_n");
}
case NEON::BI__builtin_neon_vsli_n_v:
case NEON::BI__builtin_neon_vsliq_n_v: {
Int = Intrinsic::aarch64_neon_vsli;
llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
return EmitNeonCall(Intrin, Ops, "vsli_n");
}
case NEON::BI__builtin_neon_vsra_n_v:
case NEON::BI__builtin_neon_vsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v: {
Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
SmallVector<llvm::Value*,2> TmpOps;
TmpOps.push_back(Ops[1]);
TmpOps.push_back(Ops[2]);
Function* F = CGM.getIntrinsic(Int, Ty);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
return Builder.CreateAdd(Ops[0], tmp);
}
// FIXME: Sharing loads & stores with 32-bit is complicated by the absence
// of an Align parameter here.
case NEON::BI__builtin_neon_vld1_x2_v:
case NEON::BI__builtin_neon_vld1q_x2_v:
case NEON::BI__builtin_neon_vld1_x3_v:
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
unsigned Int;
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld1_x2_v:
case NEON::BI__builtin_neon_vld1q_x2_v:
Int = Intrinsic::aarch64_neon_ld1x2;
break;
case NEON::BI__builtin_neon_vld1_x3_v:
case NEON::BI__builtin_neon_vld1q_x3_v:
Int = Intrinsic::aarch64_neon_ld1x3;
break;
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v:
Int = Intrinsic::aarch64_neon_ld1x4;
break;
}
Function *F = CGM.getIntrinsic(Int, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst1_x2_v:
case NEON::BI__builtin_neon_vst1q_x2_v:
case NEON::BI__builtin_neon_vst1_x3_v:
case NEON::BI__builtin_neon_vst1q_x3_v:
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
llvm::Type *Tys[2] = { VTy, PTy };
unsigned Int;
switch (BuiltinID) {
case NEON::BI__builtin_neon_vst1_x2_v:
case NEON::BI__builtin_neon_vst1q_x2_v:
Int = Intrinsic::aarch64_neon_st1x2;
break;
case NEON::BI__builtin_neon_vst1_x3_v:
case NEON::BI__builtin_neon_vst1q_x3_v:
Int = Intrinsic::aarch64_neon_st1x3;
break;
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v:
Int = Intrinsic::aarch64_neon_st1x4;
break;
}
SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
IntOps.push_back(Ops[0]);
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), IntOps, "");
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
return Builder.CreateLoad(Ops[0]);
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
return Builder.CreateStore(Ops[1], Ops[0]);
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[0] = Builder.CreateLoad(Ops[0]);
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[0] = Builder.CreateLoad(Ops[0]);
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
}
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
Ops.push_back(Ops[1]);
Ops.erase(Ops.begin()+1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateZExt(Ops[3],
llvm::IntegerType::get(getLLVMContext(), 64));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
Ops.push_back(Ops[1]);
Ops.erase(Ops.begin()+1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateZExt(Ops[4],
llvm::IntegerType::get(getLLVMContext(), 64));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
Ops.push_back(Ops[1]);
Ops.erase(Ops.begin()+1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateZExt(Ops[5],
llvm::IntegerType::get(getLLVMContext(), 64));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
Ops[2] = Builder.CreateZExt(Ops[2],
llvm::IntegerType::get(getLLVMContext(), 64));
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
Ops[3] = Builder.CreateZExt(Ops[3],
llvm::IntegerType::get(getLLVMContext(), 64));
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
Ops[4] = Builder.CreateZExt(Ops[4],
llvm::IntegerType::get(getLLVMContext(), 64));
llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
SV = Builder.CreateStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
SV = Builder.CreateStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<Constant*, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
SV = Builder.CreateStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vqtbl1q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
Ops, "vtbl1");
}
case NEON::BI__builtin_neon_vqtbl2q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
Ops, "vtbl2");
}
case NEON::BI__builtin_neon_vqtbl3q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
Ops, "vtbl3");
}
case NEON::BI__builtin_neon_vqtbl4q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
Ops, "vtbl4");
}
case NEON::BI__builtin_neon_vqtbx1q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
Ops, "vtbx1");
}
case NEON::BI__builtin_neon_vqtbx2q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
Ops, "vtbx2");
}
case NEON::BI__builtin_neon_vqtbx3q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
Ops, "vtbx3");
}
case NEON::BI__builtin_neon_vqtbx4q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
Ops, "vtbx4");
}
case NEON::BI__builtin_neon_vsqadd_v:
case NEON::BI__builtin_neon_vsqaddq_v: {
Int = Intrinsic::aarch64_neon_usqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
}
case NEON::BI__builtin_neon_vuqadd_v:
case NEON::BI__builtin_neon_vuqaddq_v: {
Int = Intrinsic::aarch64_neon_suqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
}
}
}
llvm::Value *CodeGenFunction::
BuildVector(ArrayRef<llvm::Value*> Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
AllConstants &= isa<Constant>(Ops[i]);
// If this is a constant vector, create a ConstantVector.
if (AllConstants) {
SmallVector<llvm::Constant*, 16> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return llvm::ConstantVector::get(CstOps);
}
// Otherwise, insertelement the values to build the vector.
Value *Result =
llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
return Result;
}
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
// If this is a normal argument, just emit it as a scalar.
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
continue;
}
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
}
switch (BuiltinID) {
default: return nullptr;
case X86::BI__builtin_cpu_supports: {
const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
// TODO: When/if this becomes more than x86 specific then use a TargetInfo
// based mapping.
// Processor features and mapping to processor feature value.
enum X86Features {
CMOV = 0,
MMX,
POPCNT,
SSE,
SSE2,
SSE3,
SSSE3,
SSE4_1,
SSE4_2,
AVX,
AVX2,
SSE4_A,
FMA4,
XOP,
FMA,
AVX512F,
BMI,
BMI2,
MAX
};
X86Features Feature = StringSwitch<X86Features>(FeatureStr)
.Case("cmov", X86Features::CMOV)
.Case("mmx", X86Features::MMX)
.Case("popcnt", X86Features::POPCNT)
.Case("sse", X86Features::SSE)
.Case("sse2", X86Features::SSE2)
.Case("sse3", X86Features::SSE3)
.Case("sse4.1", X86Features::SSE4_1)
.Case("sse4.2", X86Features::SSE4_2)
.Case("avx", X86Features::AVX)
.Case("avx2", X86Features::AVX2)
.Case("sse4a", X86Features::SSE4_A)
.Case("fma4", X86Features::FMA4)
.Case("xop", X86Features::XOP)
.Case("fma", X86Features::FMA)
.Case("avx512f", X86Features::AVX512F)
.Case("bmi", X86Features::BMI)
.Case("bmi2", X86Features::BMI2)
.Default(X86Features::MAX);
assert(Feature != X86Features::MAX && "Invalid feature!");
// Matching the struct layout from the compiler-rt/libgcc structure that is
// filled in:
// unsigned int __cpu_vendor;
// unsigned int __cpu_type;
// unsigned int __cpu_subtype;
// unsigned int __cpu_features[1];
llvm::Type *STy = llvm::StructType::get(
Int32Ty, Int32Ty, Int32Ty, llvm::ArrayType::get(Int32Ty, 1), nullptr);
// Grab the global __cpu_model.
llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
// Grab the first (0th) element from the field __cpu_features off of the
// global in the struct STy.
Value *Idxs[] = {
ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, 3),
ConstantInt::get(Int32Ty, 0)
};
Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
Value *Features = Builder.CreateLoad(CpuFeatures);
// Check the value of the bit corresponding to the feature requested.
Value *Bitset = Builder.CreateAnd(
Features, llvm::ConstantInt::get(Int32Ty, 1 << Feature));
return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
}
case X86::BI_mm_prefetch: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = ConstantInt::get(Int32Ty, 0);
Value *Locality = EmitScalarExpr(E->getArg(1));
Value *Data = ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:
return Builder.CreateBitCast(BuildVector(Ops),
llvm::Type::getX86_MMXTy(getLLVMContext()));
case X86::BI__builtin_ia32_vec_ext_v2si:
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
case X86::BI__builtin_ia32_ldmxcsr: {
Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp, Int8PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
Value *Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Builder.CreateBitCast(Tmp, Int8PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
// cast val v2i64
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index);
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
// cast pointer to i64 & store
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
return Builder.CreateStore(Ops[1], Ops[0]);
}
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
unsigned NumElts =
cast<llvm::VectorType>(Ops[0]->getType())->getNumElements();
assert(NumElts % 16 == 0);
unsigned NumLanes = NumElts / 16;
unsigned NumLaneElts = NumElts / NumLanes;
// If palignr is shifting the pair of vectors more than the size of two
// lanes, emit zero.
if (ShiftVal >= (2 * NumLaneElts))
return llvm::Constant::getNullValue(ConvertType(E->getType()));
// If palignr is shifting the pair of input vectors more than one lane,
// but less than two lanes, convert to shifting in zeroes.
if (ShiftVal > NumLaneElts) {
ShiftVal -= NumLaneElts;
Ops[1] = Ops[0];
Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
}
SmallVector<llvm::Constant*, 32> Indices;
// 256-bit palignr operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
unsigned Idx = ShiftVal + i;
if (Idx >= NumLaneElts)
Idx += NumElts - NumLaneElts; // End of lane, switch operand.
Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
}
}
Value* SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
}
case X86::BI__builtin_ia32_pslldqi256: {
// Shift value is in bits so divide by 8.
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
// If pslldq is shifting the vector more than 15 bytes, emit zero.
if (shiftVal >= 16)
return llvm::Constant::getNullValue(ConvertType(E->getType()));
SmallVector<llvm::Constant*, 32> Indices;
// 256-bit pslldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != 32; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = 32 + i - shiftVal;
if (Idx < 32) Idx -= 16; // end of lane, switch operand.
Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
}
}
llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Zero, Ops[0], SV, "pslldq");
llvm::Type *ResultType = ConvertType(E->getType());
return Builder.CreateBitCast(SV, ResultType, "cast");
}
case X86::BI__builtin_ia32_psrldqi256: {
// Shift value is in bits so divide by 8.
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3;
// If psrldq is shifting the vector more than 15 bytes, emit zero.
if (shiftVal >= 16)
return llvm::Constant::getNullValue(ConvertType(E->getType()));
SmallVector<llvm::Constant*, 32> Indices;
// 256-bit psrldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != 32; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = i + shiftVal;
if (Idx >= 16) Idx += 16; // end of lane, switch operand.
Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l));
}
}
llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = llvm::ConstantVector::get(Indices);
SV = Builder.CreateShuffleVector(Ops[0], Zero, SV, "psrldq");
llvm::Type *ResultType = ConvertType(E->getType());
return Builder.CreateBitCast(SV, ResultType, "cast");
}
case X86::BI__builtin_ia32_movntps:
case X86::BI__builtin_ia32_movntps256:
case X86::BI__builtin_ia32_movntpd:
case X86::BI__builtin_ia32_movntpd256:
case X86::BI__builtin_ia32_movntdq:
case X86::BI__builtin_ia32_movntdq256:
case X86::BI__builtin_ia32_movnti:
case X86::BI__builtin_ia32_movnti64: {
llvm::MDNode *Node = llvm::MDNode::get(
getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
// Convert the type of the pointer to a pointer to the stored type.
Value *BC = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()),
"cast");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
// If the operand is an integer, we can't assume alignment. Otherwise,
// assume natural alignment.
QualType ArgTy = E->getArg(1)->getType();
unsigned Align;
if (ArgTy->isIntegerType())
Align = 1;
else
Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
SI->setAlignment(Align);
return SI;
}
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
return Builder.CreateCall(F, Ops, "pswapd");
}
case X86::BI__builtin_ia32_rdrand16_step:
case X86::BI__builtin_ia32_rdrand32_step:
case X86::BI__builtin_ia32_rdrand64_step:
case X86::BI__builtin_ia32_rdseed16_step:
case X86::BI__builtin_ia32_rdseed32_step:
case X86::BI__builtin_ia32_rdseed64_step: {
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_rdrand16_step:
ID = Intrinsic::x86_rdrand_16;
break;
case X86::BI__builtin_ia32_rdrand32_step:
ID = Intrinsic::x86_rdrand_32;
break;
case X86::BI__builtin_ia32_rdrand64_step:
ID = Intrinsic::x86_rdrand_64;
break;
case X86::BI__builtin_ia32_rdseed16_step:
ID = Intrinsic::x86_rdseed_16;
break;
case X86::BI__builtin_ia32_rdseed32_step:
ID = Intrinsic::x86_rdseed_32;
break;
case X86::BI__builtin_ia32_rdseed64_step:
ID = Intrinsic::x86_rdseed_64;
break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
// SSE comparison intrisics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpeqss:
case X86::BI__builtin_ia32_cmpltss:
case X86::BI__builtin_ia32_cmpless:
case X86::BI__builtin_ia32_cmpunordss:
case X86::BI__builtin_ia32_cmpneqss:
case X86::BI__builtin_ia32_cmpnltss:
case X86::BI__builtin_ia32_cmpnless:
case X86::BI__builtin_ia32_cmpordss:
case X86::BI__builtin_ia32_cmpeqpd:
case X86::BI__builtin_ia32_cmpltpd:
case X86::BI__builtin_ia32_cmplepd:
case X86::BI__builtin_ia32_cmpunordpd:
case X86::BI__builtin_ia32_cmpneqpd:
case X86::BI__builtin_ia32_cmpnltpd:
case X86::BI__builtin_ia32_cmpnlepd:
case X86::BI__builtin_ia32_cmpordpd:
case X86::BI__builtin_ia32_cmpeqsd:
case X86::BI__builtin_ia32_cmpltsd:
case X86::BI__builtin_ia32_cmplesd:
case X86::BI__builtin_ia32_cmpunordsd:
case X86::BI__builtin_ia32_cmpneqsd:
case X86::BI__builtin_ia32_cmpnltsd:
case X86::BI__builtin_ia32_cmpnlesd:
case X86::BI__builtin_ia32_cmpordsd:
// These exist so that the builtin that takes an immediate can be bounds
// checked by clang to avoid passing bad immediates to the backend. Since
// AVX has a larger immediate than SSE we would need separate builtins to
// do the different bounds checking. Rather than create a clang specific
// SSE only builtin, this implements eight separate builtins to match gcc
// implementation.
// Choose the immediate.
unsigned Imm;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqss:
case X86::BI__builtin_ia32_cmpeqpd:
case X86::BI__builtin_ia32_cmpeqsd:
Imm = 0;
break;
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpltss:
case X86::BI__builtin_ia32_cmpltpd:
case X86::BI__builtin_ia32_cmpltsd:
Imm = 1;
break;
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmpless:
case X86::BI__builtin_ia32_cmplepd:
case X86::BI__builtin_ia32_cmplesd:
Imm = 2;
break;
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpunordss:
case X86::BI__builtin_ia32_cmpunordpd:
case X86::BI__builtin_ia32_cmpunordsd:
Imm = 3;
break;
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpneqss:
case X86::BI__builtin_ia32_cmpneqpd:
case X86::BI__builtin_ia32_cmpneqsd:
Imm = 4;
break;
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnltss:
case X86::BI__builtin_ia32_cmpnltpd:
case X86::BI__builtin_ia32_cmpnltsd:
Imm = 5;
break;
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpnless:
case X86::BI__builtin_ia32_cmpnlepd:
case X86::BI__builtin_ia32_cmpnlesd:
Imm = 6;
break;
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordss:
case X86::BI__builtin_ia32_cmpordpd:
case X86::BI__builtin_ia32_cmpordsd:
Imm = 7;
break;
}
// Choose the intrinsic ID.
const char *name;
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpordps:
name = "cmpps";
ID = Intrinsic::x86_sse_cmp_ps;
break;
case X86::BI__builtin_ia32_cmpeqss:
case X86::BI__builtin_ia32_cmpltss:
case X86::BI__builtin_ia32_cmpless:
case X86::BI__builtin_ia32_cmpunordss:
case X86::BI__builtin_ia32_cmpneqss:
case X86::BI__builtin_ia32_cmpnltss:
case X86::BI__builtin_ia32_cmpnless:
case X86::BI__builtin_ia32_cmpordss:
name = "cmpss";
ID = Intrinsic::x86_sse_cmp_ss;
break;
case X86::BI__builtin_ia32_cmpeqpd:
case X86::BI__builtin_ia32_cmpltpd:
case X86::BI__builtin_ia32_cmplepd:
case X86::BI__builtin_ia32_cmpunordpd:
case X86::BI__builtin_ia32_cmpneqpd:
case X86::BI__builtin_ia32_cmpnltpd:
case X86::BI__builtin_ia32_cmpnlepd:
case X86::BI__builtin_ia32_cmpordpd:
name = "cmppd";
ID = Intrinsic::x86_sse2_cmp_pd;
break;
case X86::BI__builtin_ia32_cmpeqsd:
case X86::BI__builtin_ia32_cmpltsd:
case X86::BI__builtin_ia32_cmplesd:
case X86::BI__builtin_ia32_cmpunordsd:
case X86::BI__builtin_ia32_cmpneqsd:
case X86::BI__builtin_ia32_cmpnltsd:
case X86::BI__builtin_ia32_cmpnlesd:
case X86::BI__builtin_ia32_cmpordsd:
name = "cmpsd";
ID = Intrinsic::x86_sse2_cmp_sd;
break;
}
Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, name);
}
}
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
default: return nullptr;
// vec_ld, vec_lvsl, vec_lvsr
case PPC::BI__builtin_altivec_lvx:
case PPC::BI__builtin_altivec_lvxl:
case PPC::BI__builtin_altivec_lvebx:
case PPC::BI__builtin_altivec_lvehx:
case PPC::BI__builtin_altivec_lvewx:
case PPC::BI__builtin_altivec_lvsl:
case PPC::BI__builtin_altivec_lvsr:
case PPC::BI__builtin_vsx_lxvd2x:
case PPC::BI__builtin_vsx_lxvw4x:
{
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
Ops.pop_back();
switch (BuiltinID) {
default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
case PPC::BI__builtin_altivec_lvx:
ID = Intrinsic::ppc_altivec_lvx;
break;
case PPC::BI__builtin_altivec_lvxl:
ID = Intrinsic::ppc_altivec_lvxl;
break;
case PPC::BI__builtin_altivec_lvebx:
ID = Intrinsic::ppc_altivec_lvebx;
break;
case PPC::BI__builtin_altivec_lvehx:
ID = Intrinsic::ppc_altivec_lvehx;
break;
case PPC::BI__builtin_altivec_lvewx:
ID = Intrinsic::ppc_altivec_lvewx;
break;
case PPC::BI__builtin_altivec_lvsl:
ID = Intrinsic::ppc_altivec_lvsl;
break;
case PPC::BI__builtin_altivec_lvsr:
ID = Intrinsic::ppc_altivec_lvsr;
break;
case PPC::BI__builtin_vsx_lxvd2x:
ID = Intrinsic::ppc_vsx_lxvd2x;
break;
case PPC::BI__builtin_vsx_lxvw4x:
ID = Intrinsic::ppc_vsx_lxvw4x;
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
// vec_st
case PPC::BI__builtin_altivec_stvx:
case PPC::BI__builtin_altivec_stvxl:
case PPC::BI__builtin_altivec_stvebx:
case PPC::BI__builtin_altivec_stvehx:
case PPC::BI__builtin_altivec_stvewx:
case PPC::BI__builtin_vsx_stxvd2x:
case PPC::BI__builtin_vsx_stxvw4x:
{
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
Ops.pop_back();
switch (BuiltinID) {
default: llvm_unreachable("Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;
case PPC::BI__builtin_altivec_stvxl:
ID = Intrinsic::ppc_altivec_stvxl;
break;
case PPC::BI__builtin_altivec_stvebx:
ID = Intrinsic::ppc_altivec_stvebx;
break;
case PPC::BI__builtin_altivec_stvehx:
ID = Intrinsic::ppc_altivec_stvehx;
break;
case PPC::BI__builtin_altivec_stvewx:
ID = Intrinsic::ppc_altivec_stvewx;
break;
case PPC::BI__builtin_vsx_stxvd2x:
ID = Intrinsic::ppc_vsx_stxvd2x;
break;
case PPC::BI__builtin_vsx_stxvw4x:
ID = Intrinsic::ppc_vsx_stxvw4x;
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
// Square root
case PPC::BI__builtin_vsx_xvsqrtsp:
case PPC::BI__builtin_vsx_xvsqrtdp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
ID = Intrinsic::sqrt;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, X);
}
// Count leading zeros
case PPC::BI__builtin_altivec_vclzb:
case PPC::BI__builtin_altivec_vclzh:
case PPC::BI__builtin_altivec_vclzw:
case PPC::BI__builtin_altivec_vclzd: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
// Copy sign
case PPC::BI__builtin_vsx_xvcpsgnsp:
case PPC::BI__builtin_vsx_xvcpsgndp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
ID = Intrinsic::copysign;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, {X, Y});
}
// Rounding/truncation
case PPC::BI__builtin_vsx_xvrspip:
case PPC::BI__builtin_vsx_xvrdpip:
case PPC::BI__builtin_vsx_xvrdpim:
case PPC::BI__builtin_vsx_xvrspim:
case PPC::BI__builtin_vsx_xvrdpi:
case PPC::BI__builtin_vsx_xvrspi:
case PPC::BI__builtin_vsx_xvrdpic:
case PPC::BI__builtin_vsx_xvrspic:
case PPC::BI__builtin_vsx_xvrdpiz:
case PPC::BI__builtin_vsx_xvrspiz: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
BuiltinID == PPC::BI__builtin_vsx_xvrspim)
ID = Intrinsic::floor;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
BuiltinID == PPC::BI__builtin_vsx_xvrspi)
ID = Intrinsic::round;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
BuiltinID == PPC::BI__builtin_vsx_xvrspic)
ID = Intrinsic::nearbyint;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
BuiltinID == PPC::BI__builtin_vsx_xvrspip)
ID = Intrinsic::ceil;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
ID = Intrinsic::trunc;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, X);
}
// FMA variations
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
switch (BuiltinID) {
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
return Builder.CreateCall(F, {X, Y, Z});
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
return Builder.CreateFSub(Zero,
Builder.CreateCall(F, {X, Y, Z}), "sub");
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
return Builder.CreateCall(F,
{X, Y, Builder.CreateFSub(Zero, Z, "sub")});
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp:
Value *FsubRes =
Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
return Builder.CreateFSub(Zero, FsubRes, "sub");
}
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
}
}
}
// Emit an intrinsic that has 1 float or double.
static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, Src0);
}
// Emit an intrinsic that has 3 float or double operands.
static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, {Src0, Src1, Src2});
}
// Emit an intrinsic that has 1 float or double operand, and 1 integer.
static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, {Src0, Src1});
}
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgpu_div_scale:
case AMDGPU::BI__builtin_amdgpu_div_scalef: {
// Translate from the intrinsics's struct return to the builtin's out
// argument.
std::pair<llvm::Value *, unsigned> FlagOutPtr
= EmitPointerWithAlignment(E->getArg(3));
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Z = EmitScalarExpr(E->getArg(2));
llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::AMDGPU_div_scale,
X->getType());
llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
llvm::Type *RealFlagType
= FlagOutPtr.first->getType()->getPointerElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
llvm::StoreInst *FlagStore = Builder.CreateStore(FlagExt, FlagOutPtr.first);
FlagStore->setAlignment(FlagOutPtr.second);
return Result;
}
case AMDGPU::BI__builtin_amdgpu_div_fmas:
case AMDGPU::BI__builtin_amdgpu_div_fmasf: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas,
Src0->getType());
llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
}
case AMDGPU::BI__builtin_amdgpu_div_fixup:
case AMDGPU::BI__builtin_amdgpu_div_fixupf:
return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
case AMDGPU::BI__builtin_amdgpu_trig_preop:
case AMDGPU::BI__builtin_amdgpu_trig_preopf:
return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_trig_preop);
case AMDGPU::BI__builtin_amdgpu_rcp:
case AMDGPU::BI__builtin_amdgpu_rcpf:
return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
case AMDGPU::BI__builtin_amdgpu_rsq:
case AMDGPU::BI__builtin_amdgpu_rsqf:
return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq);
case AMDGPU::BI__builtin_amdgpu_rsq_clamped:
case AMDGPU::BI__builtin_amdgpu_rsq_clampedf:
return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);
case AMDGPU::BI__builtin_amdgpu_ldexp:
case AMDGPU::BI__builtin_amdgpu_ldexpf:
return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp);
case AMDGPU::BI__builtin_amdgpu_class:
case AMDGPU::BI__builtin_amdgpu_classf:
return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_class);
default:
return nullptr;
}
}
/// Handle a SystemZ function in which the final argument is a pointer
/// to an int that receives the post-instruction CC value. At the LLVM level
/// this is represented as a function that returns a {result, cc} pair.
static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
unsigned IntrinsicID,
const CallExpr *E) {
unsigned NumArgs = E->getNumArgs() - 1;
SmallVector<Value *, 8> Args(NumArgs);
for (unsigned I = 0; I < NumArgs; ++I)
Args[I] = CGF.EmitScalarExpr(E->getArg(I));
Value *CCPtr = CGF.EmitScalarExpr(E->getArg(NumArgs));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
Value *Call = CGF.Builder.CreateCall(F, Args);
Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
CGF.Builder.CreateStore(CC, CCPtr);
return CGF.Builder.CreateExtractValue(Call, 0);
}
Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case SystemZ::BI__builtin_tbegin: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbegin_nofloat: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbeginc: {
Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tabort: {
Value *Data = EmitScalarExpr(E->getArg(0));
Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
}
case SystemZ::BI__builtin_non_tx_store: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Data = EmitScalarExpr(E->getArg(1));
Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
return Builder.CreateCall(F, {Data, Address});
}
// Vector builtins. Note that most vector builtins are mapped automatically
// to target-specific LLVM intrinsics. The ones handled specially here can
// be represented via standard LLVM IR, which is preferable to enable common
// LLVM optimizations.
case SystemZ::BI__builtin_s390_vpopctb:
case SystemZ::BI__builtin_s390_vpopcth:
case SystemZ::BI__builtin_s390_vpopctf:
case SystemZ::BI__builtin_s390_vpopctg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vclzb:
case SystemZ::BI__builtin_s390_vclzh:
case SystemZ::BI__builtin_s390_vclzf:
case SystemZ::BI__builtin_s390_vclzg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case SystemZ::BI__builtin_s390_vctzb:
case SystemZ::BI__builtin_s390_vctzh:
case SystemZ::BI__builtin_s390_vctzf:
case SystemZ::BI__builtin_s390_vctzg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case SystemZ::BI__builtin_s390_vfsqdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vfmadb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateCall(F, {X, Y, Z});
}
case SystemZ::BI__builtin_s390_vfmsdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
}
case SystemZ::BI__builtin_s390_vflpdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vflndb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
}
case SystemZ::BI__builtin_s390_vfidb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
// Constant-fold the M4 and M5 mask arguments.
llvm::APSInt M4, M5;
bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
(void)IsConstM4; (void)IsConstM5;
// Check whether this instance of vfidb can be represented via a LLVM
// standard intrinsic. We only support some combinations of M4 and M5.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (M4.getZExtValue()) {
default: break;
case 0: // IEEE-inexact exception allowed
switch (M5.getZExtValue()) {
default: break;
case 0: ID = Intrinsic::rint; break;
}
break;
case 4: // IEEE-inexact exception suppressed
switch (M5.getZExtValue()) {
default: break;
case 0: ID = Intrinsic::nearbyint; break;
case 1: ID = Intrinsic::round; break;
case 5: ID = Intrinsic::trunc; break;
case 6: ID = Intrinsic::ceil; break;
case 7: ID = Intrinsic::floor; break;
}
break;
}
if (ID != Intrinsic::not_intrinsic) {
Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, X);
}
Function *F = CGM.getIntrinsic(Intrinsic::s390_vfidb);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
return Builder.CreateCall(F, {X, M4Value, M5Value});
}
// Vector intrisincs that output the post-instruction CC value.
#define INTRINSIC_WITH_CC(NAME) \
case SystemZ::BI__builtin_##NAME: \
return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
INTRINSIC_WITH_CC(s390_vpkshs);
INTRINSIC_WITH_CC(s390_vpksfs);
INTRINSIC_WITH_CC(s390_vpksgs);
INTRINSIC_WITH_CC(s390_vpklshs);
INTRINSIC_WITH_CC(s390_vpklsfs);
INTRINSIC_WITH_CC(s390_vpklsgs);
INTRINSIC_WITH_CC(s390_vceqbs);
INTRINSIC_WITH_CC(s390_vceqhs);
INTRINSIC_WITH_CC(s390_vceqfs);
INTRINSIC_WITH_CC(s390_vceqgs);
INTRINSIC_WITH_CC(s390_vchbs);
INTRINSIC_WITH_CC(s390_vchhs);
INTRINSIC_WITH_CC(s390_vchfs);
INTRINSIC_WITH_CC(s390_vchgs);
INTRINSIC_WITH_CC(s390_vchlbs);
INTRINSIC_WITH_CC(s390_vchlhs);
INTRINSIC_WITH_CC(s390_vchlfs);
INTRINSIC_WITH_CC(s390_vchlgs);
INTRINSIC_WITH_CC(s390_vfaebs);
INTRINSIC_WITH_CC(s390_vfaehs);
INTRINSIC_WITH_CC(s390_vfaefs);
INTRINSIC_WITH_CC(s390_vfaezbs);
INTRINSIC_WITH_CC(s390_vfaezhs);
INTRINSIC_WITH_CC(s390_vfaezfs);
INTRINSIC_WITH_CC(s390_vfeebs);
INTRINSIC_WITH_CC(s390_vfeehs);
INTRINSIC_WITH_CC(s390_vfeefs);
INTRINSIC_WITH_CC(s390_vfeezbs);
INTRINSIC_WITH_CC(s390_vfeezhs);
INTRINSIC_WITH_CC(s390_vfeezfs);
INTRINSIC_WITH_CC(s390_vfenebs);
INTRINSIC_WITH_CC(s390_vfenehs);
INTRINSIC_WITH_CC(s390_vfenefs);
INTRINSIC_WITH_CC(s390_vfenezbs);
INTRINSIC_WITH_CC(s390_vfenezhs);
INTRINSIC_WITH_CC(s390_vfenezfs);
INTRINSIC_WITH_CC(s390_vistrbs);
INTRINSIC_WITH_CC(s390_vistrhs);
INTRINSIC_WITH_CC(s390_vistrfs);
INTRINSIC_WITH_CC(s390_vstrcbs);
INTRINSIC_WITH_CC(s390_vstrchs);
INTRINSIC_WITH_CC(s390_vstrcfs);
INTRINSIC_WITH_CC(s390_vstrczbs);
INTRINSIC_WITH_CC(s390_vstrczhs);
INTRINSIC_WITH_CC(s390_vstrczfs);
INTRINSIC_WITH_CC(s390_vfcedbs);
INTRINSIC_WITH_CC(s390_vfchdbs);
INTRINSIC_WITH_CC(s390_vfchedbs);
INTRINSIC_WITH_CC(s390_vftcidb);
#undef INTRINSIC_WITH_CC
default:
return nullptr;
}
}
Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case NVPTX::BI__nvvm_atom_add_gen_i:
case NVPTX::BI__nvvm_atom_add_gen_l:
case NVPTX::BI__nvvm_atom_add_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
case NVPTX::BI__nvvm_atom_sub_gen_i:
case NVPTX::BI__nvvm_atom_sub_gen_l:
case NVPTX::BI__nvvm_atom_sub_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
case NVPTX::BI__nvvm_atom_and_gen_i:
case NVPTX::BI__nvvm_atom_and_gen_l:
case NVPTX::BI__nvvm_atom_and_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
case NVPTX::BI__nvvm_atom_or_gen_i:
case NVPTX::BI__nvvm_atom_or_gen_l:
case NVPTX::BI__nvvm_atom_or_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
case NVPTX::BI__nvvm_atom_xor_gen_i:
case NVPTX::BI__nvvm_atom_xor_gen_l:
case NVPTX::BI__nvvm_atom_xor_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
case NVPTX::BI__nvvm_atom_xchg_gen_i:
case NVPTX::BI__nvvm_atom_xchg_gen_l:
case NVPTX::BI__nvvm_atom_xchg_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
case NVPTX::BI__nvvm_atom_max_gen_i:
case NVPTX::BI__nvvm_atom_max_gen_l:
case NVPTX::BI__nvvm_atom_max_gen_ll:
case NVPTX::BI__nvvm_atom_max_gen_ui:
case NVPTX::BI__nvvm_atom_max_gen_ul:
case NVPTX::BI__nvvm_atom_max_gen_ull:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
case NVPTX::BI__nvvm_atom_min_gen_i:
case NVPTX::BI__nvvm_atom_min_gen_l:
case NVPTX::BI__nvvm_atom_min_gen_ll:
case NVPTX::BI__nvvm_atom_min_gen_ui:
case NVPTX::BI__nvvm_atom_min_gen_ul:
case NVPTX::BI__nvvm_atom_min_gen_ull:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
case NVPTX::BI__nvvm_atom_cas_gen_i:
case NVPTX::BI__nvvm_atom_cas_gen_l:
case NVPTX::BI__nvvm_atom_cas_gen_ll:
return MakeAtomicCmpXchgValue(*this, E, true);
case NVPTX::BI__nvvm_atom_add_gen_f: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
// atomicrmw only deals with integer arguments so we need to use
// LLVM's nvvm_atomic_load_add_f32 intrinsic for that.
Value *FnALAF32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType());
return Builder.CreateCall(FnALAF32, {Ptr, Val});
}
default:
return nullptr;
}
}
#endif // HLSL Change Ends
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGDecl.cpp | //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Decl nodes as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGOpenCLRuntime.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "CGHLSLRuntime.h" // HLSL Change
#include "dxc/DXIL/DxilMetadataHelper.h" // HLSL Change
using namespace clang;
using namespace CodeGen;
void CodeGenFunction::EmitDecl(const Decl &D) {
switch (D.getKind()) {
case Decl::TranslationUnit:
case Decl::ExternCContext:
case Decl::Namespace:
case Decl::UnresolvedUsingTypename:
case Decl::ClassTemplateSpecialization:
case Decl::ClassTemplatePartialSpecialization:
case Decl::VarTemplateSpecialization:
case Decl::VarTemplatePartialSpecialization:
case Decl::TemplateTypeParm:
case Decl::UnresolvedUsingValue:
case Decl::NonTypeTemplateParm:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXConversion:
case Decl::Field:
case Decl::MSProperty:
case Decl::IndirectField:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::ParmVar:
case Decl::ImplicitParam:
case Decl::ClassTemplate:
case Decl::VarTemplate:
case Decl::FunctionTemplate:
case Decl::TypeAliasTemplate:
case Decl::TemplateTemplateParm:
case Decl::ObjCMethod:
case Decl::ObjCCategory:
case Decl::ObjCProtocol:
case Decl::ObjCInterface:
case Decl::ObjCCategoryImpl:
case Decl::ObjCImplementation:
case Decl::ObjCProperty:
case Decl::ObjCCompatibleAlias:
case Decl::AccessSpec:
case Decl::LinkageSpec:
case Decl::ObjCPropertyImpl:
case Decl::FileScopeAsm:
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
case Decl::Captured:
case Decl::ClassScopeFunctionSpecialization:
case Decl::UsingShadow:
case Decl::ObjCTypeParam:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
case Decl::EnumConstant: // enum ? { X = ? }
case Decl::CXXRecord: // struct/union/class X; [C++]
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
case Decl::Import:
case Decl::OMPThreadPrivate:
case Decl::Empty:
case Decl::HLSLBuffer: // HLSL Change
// None of these decls require codegen support.
return;
case Decl::NamespaceAlias:
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
return;
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(D));
return;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
return;
case Decl::Var: {
const VarDecl &VD = cast<VarDecl>(D);
assert(VD.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
return EmitVarDecl(VD);
}
case Decl::Typedef: // typedef int X;
case Decl::TypeAlias: { // using X = int; [C++0x]
const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
QualType Ty = TD.getUnderlyingType();
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
}
}
}
/// EmitVarDecl - This method handles emission of any variable declaration
/// inside a function, including static vars etc.
void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
if (D.isStaticLocal()) {
llvm::GlobalValue::LinkageTypes Linkage =
CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false);
// FIXME: We need to force the emission/use of a guard variable for
// some variables even if we can constant-evaluate them because
// we can't guarantee every translation unit will constant-evaluate them.
return EmitStaticVarDecl(D, Linkage);
}
// HLSL Change Begin - treat local constant as static.
// Global variable will be generated instead of alloca.
if (D.getType().isConstQualified() &&
(D.isLocalVarDecl() && D.getKind() != Decl::ParmVar &&
!D.isNRVOVariable())) {
// Only create global when has constant init.
if (!isTrivialInitializer(D.getInit()) && CGM.EmitConstantInit(D, this)) {
llvm::GlobalValue::LinkageTypes Linkage =
CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false);
return EmitStaticVarDecl(D, Linkage);
}
}
// HLSL Change End.
if (D.hasExternalStorage())
// Don't emit it now, allow it to be emitted lazily on its first use.
return;
if (D.getStorageClass() == SC_OpenCLWorkGroupLocal)
return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
assert(D.hasLocalStorage());
return EmitAutoVarDecl(D);
}
static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
if (CGM.getLangOpts().CPlusPlus)
return CGM.getMangledName(&D).str();
// If this isn't C++, we don't need a mangled name, just a pretty one.
assert(!D.isExternallyVisible() && "name shouldn't matter");
std::string ContextName;
const DeclContext *DC = D.getDeclContext();
if (auto *CD = dyn_cast<CapturedDecl>(DC))
DC = cast<DeclContext>(CD->getNonClosureContext());
if (const auto *FD = dyn_cast<FunctionDecl>(DC))
ContextName = CGM.getMangledName(FD);
else if (const auto *BD = dyn_cast<BlockDecl>(DC))
ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
ContextName = OMD->getSelector().getAsString();
else
llvm_unreachable("Unknown context for static var decl");
ContextName += "." + D.getNameAsString();
return ContextName;
}
llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
// In general, we don't always emit static var decls once before we reference
// them. It is possible to reference them before emitting the function that
// contains them, and it is possible to emit the containing function multiple
// times.
if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
return ExistingGV;
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
// Use the label if the variable is renamed with the asm-label extension.
std::string Name;
if (D.hasAttr<AsmLabelAttr>())
Name = getMangledName(&D);
else
Name = getStaticDeclName(*this, D);
llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
unsigned AddrSpace =
GetGlobalVarAddressSpace(&D, getContext().getTargetAddressSpace(Ty));
// Local address space cannot have an initializer.
llvm::Constant *Init = nullptr;
if (Ty.getAddressSpace() != LangAS::opencl_local)
Init = EmitNullConstant(Ty);
else
Init = llvm::UndefValue::get(LTy);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
Init, Name, nullptr,
llvm::GlobalVariable::NotThreadLocal,
AddrSpace);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
setGlobalVisibility(GV, &D);
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
if (D.getTLSKind())
setTLSMode(GV, D);
if (D.isExternallyVisible()) {
if (D.hasAttr<DLLImportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
else if (D.hasAttr<DLLExportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
}
// Make sure the result is of the correct type.
unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(Ty);
llvm::Constant *Addr = GV;
if (AddrSpace != ExpectedAddrSpace) {
llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
Addr = llvm::ConstantExpr::getAddrSpaceCast(GV, PTy);
}
setStaticLocalDeclAddress(&D, Addr);
// Ensure that the static local gets initialized by making sure the parent
// function gets emitted eventually.
const Decl *DC = cast<Decl>(D.getDeclContext());
// We can't name blocks or captured statements directly, so try to emit their
// parents.
if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
DC = DC->getNonClosureContext();
// FIXME: Ensure that global blocks get emitted.
if (!DC)
return Addr;
}
GlobalDecl GD;
if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
GD = GlobalDecl(CD, Ctor_Base);
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
GD = GlobalDecl(DD, Dtor_Base);
else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
GD = GlobalDecl(FD);
else {
// Don't do anything for Obj-C method decls or global closures. We should
// never defer them.
assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
}
if (GD.getDecl())
(void)GetAddrOfGlobal(GD);
return Addr;
}
/// hasNontrivialDestruction - Determine whether a type's destruction is
/// non-trivial. If so, and the variable uses static initialization, we must
/// register its destructor to run on exit.
static bool hasNontrivialDestruction(QualType T) {
CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
return RD && !RD->hasTrivialDestructor();
}
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
/// one. Otherwise it just returns GV.
llvm::GlobalVariable *
CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV) {
llvm::Constant *Init = CGM.EmitConstantInit(D, this);
// If constant emission failed, then this should be a C++ static
// initializer.
if (!Init) {
if (!getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else if (Builder.GetInsertBlock()) {
// Since we have a static initializer, this global variable can't
// be constant.
GV->setConstant(false);
EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
}
return GV;
}
// The initializer may differ in type from the global. Rewrite
// the global to match the initializer. (We have to do this
// because some types, like unions, can't be completely represented
// in the LLVM type system.)
if (GV->getType()->getElementType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
/*InsertBefore*/ OldGV,
OldGV->getThreadLocalMode(),
CGM.getContext().getTargetAddressSpace(D.getType()));
GV->setVisibility(OldGV->getVisibility());
// Steal the name of the old global
GV->takeName(OldGV);
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
OldGV->eraseFromParent();
}
GV->setConstant(CGM.isTypeConstant(D.getType(), true));
GV->setInitializer(Init);
if (hasNontrivialDestruction(D.getType())) {
// We have a constant initializer, but a nontrivial destructor. We still
// need to perform a guarded "initialization" in order to register the
// destructor.
EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
}
return GV;
}
void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage) {
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(!DMEntry && "Decl already exists in localdeclmap!");
// Check to see if we already have a global variable for this
// declaration. This can happen when double-emitting function
// bodies, e.g. with complete and base constructors.
llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
DMEntry = addr;
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
// Make sure to evaluate VLA bounds now so that we have them for later.
if (D.getType()->isVariablyModifiedType())
EmitVariablyModifiedType(D.getType());
// Save the type in case adding the initializer forces a type change.
llvm::Type *expectedType = addr->getType();
llvm::GlobalVariable *var =
cast<llvm::GlobalVariable>(addr->stripPointerCasts());
// If this value has an initializer, emit it.
if (D.getInit())
var = AddInitializerToStaticVarDecl(D, var);
var->setAlignment(getContext().getDeclAlign(&D).getQuantity());
if (D.hasAttr<AnnotateAttr>())
CGM.AddGlobalAnnotations(&D, var);
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
var->setSection(SA->getName());
if (D.hasAttr<UsedAttr>())
CGM.addUsedGlobal(var);
// We may have to cast the constant because of the initializer
// mismatch above.
//
// FIXME: It is really dangerous to store this in the map; if anyone
// RAUW's the GV uses of this constant will be invalid.
llvm::Constant *castedAddr =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
DMEntry = castedAddr;
CGM.setStaticLocalDeclAddress(&D, castedAddr);
CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
if (DI &&
CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitGlobalVariable(var, &D);
}
}
namespace {
struct DestroyObject : EHScopeStack::Cleanup {
DestroyObject(llvm::Value *addr, QualType type,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
: addr(addr), type(type), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
llvm::Value *addr;
QualType type;
CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Don't use an EH cleanup recursively from an EH cleanup.
bool useEHCleanupForArray =
flags.isForNormalCleanup() && this->useEHCleanupForArray;
CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
}
};
struct DestroyNRVOVariable : EHScopeStack::Cleanup {
DestroyNRVOVariable(llvm::Value *addr,
const CXXDestructorDecl *Dtor,
llvm::Value *NRVOFlag)
: Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
const CXXDestructorDecl *Dtor;
llvm::Value *NRVOFlag;
llvm::Value *Loc;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Along the exceptions path we always execute the dtor.
bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
llvm::BasicBlock *SkipDtorBB = nullptr;
if (NRVO) {
// If we exited via NRVO, we skip the destructor call.
llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
CGF.EmitBlock(RunDtorBB);
}
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
/*ForVirtualBase=*/false,
/*Delegating=*/false,
Loc);
if (NRVO) CGF.EmitBlock(SkipDtorBB);
}
};
struct CallStackRestore : EHScopeStack::Cleanup {
llvm::Value *Stack;
CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
CGF.Builder.CreateCall(F, V);
}
};
struct ExtendGCLifetime : EHScopeStack::Cleanup {
const VarDecl &Var;
ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Compute the address of the local variable, in case it's a
// byref or something.
DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
Var.getType(), VK_LValue, SourceLocation());
llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
SourceLocation());
CGF.EmitExtendGCLifetime(value);
}
};
struct CallCleanupFunction : EHScopeStack::Cleanup {
llvm::Constant *CleanupFn;
const CGFunctionInfo &FnInfo;
const VarDecl &Var;
CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
const VarDecl *Var)
: CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
Var.getType(), VK_LValue, SourceLocation());
// Compute the address of the local variable, in case it's a byref
// or something.
llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getAddress();
// In some cases, the type of the function argument will be different from
// the type of the pointer. An example of this is
// void f(void* arg);
// __attribute__((cleanup(f))) void *g;
//
// To fix this we insert a bitcast here.
QualType ArgTy = FnInfo.arg_begin()->type;
llvm::Value *Arg =
CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
CallArgList Args;
Args.add(RValue::get(Arg),
CGF.getContext().getPointerType(Var.getType()));
CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
}
};
/// A cleanup to call @llvm.lifetime.end.
class CallLifetimeEnd : public EHScopeStack::Cleanup {
llvm::Value *Addr;
llvm::Value *Size;
public:
CallLifetimeEnd(llvm::Value *addr, llvm::Value *size)
: Addr(addr), Size(size) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitLifetimeEnd(Size, Addr);
}
};
}
/// EmitAutoVarWithLifetime - Does the setup required for an automatic
/// variable with lifetime.
static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
llvm::Value *addr,
Qualifiers::ObjCLifetime lifetime) {
switch (lifetime) {
case Qualifiers::OCL_None:
llvm_unreachable("present but none");
case Qualifiers::OCL_ExplicitNone:
// nothing to do
break;
case Qualifiers::OCL_Strong: {
CodeGenFunction::Destroyer *destroyer =
(var.hasAttr<ObjCPreciseLifetimeAttr>()
? CodeGenFunction::destroyARCStrongPrecise
: CodeGenFunction::destroyARCStrongImprecise);
CleanupKind cleanupKind = CGF.getARCCleanupKind();
CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
cleanupKind & EHCleanup);
break;
}
case Qualifiers::OCL_Autoreleasing:
// nothing to do
break;
case Qualifiers::OCL_Weak:
// __weak objects always get EH cleanups; otherwise, exceptions
// could cause really nasty crashes instead of mere leaks.
CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
CodeGenFunction::destroyARCWeak,
/*useEHCleanup*/ true);
break;
}
}
static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
if (const Expr *e = dyn_cast<Expr>(s)) {
// Skip the most common kinds of expressions that make
// hierarchy-walking expensive.
s = e = e->IgnoreParenCasts();
if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
return (ref->getDecl() == &var);
if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
const BlockDecl *block = be->getBlockDecl();
for (const auto &I : block->captures()) {
if (I.getVariable() == &var)
return true;
}
}
}
for (const Stmt *SubStmt : s->children())
// SubStmt might be null; as in missing decl or conditional of an if-stmt.
if (SubStmt && isAccessedBy(var, SubStmt))
return true;
return false;
}
static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
if (!decl) return false;
if (!isa<VarDecl>(decl)) return false;
const VarDecl *var = cast<VarDecl>(decl);
return isAccessedBy(*var, e);
}
static void drillIntoBlockVariable(CodeGenFunction &CGF,
LValue &lvalue,
const VarDecl *var) {
lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
}
void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime) {
llvm::Value *value = EmitScalarExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitStoreThroughLValue(RValue::get(value), lvalue, true);
return;
}
if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
init = DIE->getExpr();
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
enterFullExpression(ewc);
init = ewc->getSubExpr();
}
CodeGenFunction::RunCleanupsScope Scope(*this);
// We have to maintain the illusion that the variable is
// zero-initialized. If the variable might be accessed in its
// initializer, zero-initialize before running the initializer, then
// actually perform the initialization with an assign.
bool accessedByInit = false;
if (lifetime != Qualifiers::OCL_ExplicitNone)
accessedByInit = (capturedByInit || isAccessedBy(D, init));
if (accessedByInit) {
LValue tempLV = lvalue;
// Drill down to the __block object if necessary.
if (capturedByInit) {
// We can use a simple GEP for this because it can't have been
// moved yet.
tempLV.setAddress(Builder.CreateStructGEP(
nullptr, tempLV.getAddress(),
getByRefValueLLVMField(cast<VarDecl>(D)).second));
}
llvm::PointerType *ty
= cast<llvm::PointerType>(tempLV.getAddress()->getType());
ty = cast<llvm::PointerType>(ty->getElementType());
llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
EmitARCInitWeak(tempLV.getAddress(), zero);
// Otherwise just do a simple store.
else
EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
}
// Emit the initializer.
llvm::Value *value = nullptr;
switch (lifetime) {
case Qualifiers::OCL_None:
llvm_unreachable("present but none");
case Qualifiers::OCL_ExplicitNone:
// nothing to do
value = EmitScalarExpr(init);
break;
case Qualifiers::OCL_Strong: {
value = EmitARCRetainScalarExpr(init);
break;
}
case Qualifiers::OCL_Weak: {
// No way to optimize a producing initializer into this. It's not
// worth optimizing for, because the value will immediately
// disappear in the common case.
value = EmitScalarExpr(init);
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
if (accessedByInit)
EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
else
EmitARCInitWeak(lvalue.getAddress(), value);
return;
}
case Qualifiers::OCL_Autoreleasing:
value = EmitARCRetainAutoreleaseScalarExpr(init);
break;
}
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
// If the variable might have been accessed by its initializer, we
// might have to initialize with a barrier. We have to do this for
// both __weak and __strong, but __weak got filtered out above.
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
EmitARCRelease(oldValue, ARCImpreciseLifetime);
return;
}
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
/// EmitScalarInit - Initialize the given lvalue with the given object.
void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime)
return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
switch (lifetime) {
case Qualifiers::OCL_None:
llvm_unreachable("present but none");
case Qualifiers::OCL_ExplicitNone:
// nothing to do
break;
case Qualifiers::OCL_Strong:
init = EmitARCRetain(lvalue.getType(), init);
break;
case Qualifiers::OCL_Weak:
// Initialize and then skip the primitive store.
EmitARCInitWeak(lvalue.getAddress(), init);
return;
case Qualifiers::OCL_Autoreleasing:
init = EmitARCRetainAutorelease(lvalue.getType(), init);
break;
}
EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
/// non-zero parts of the specified initializer with equal or fewer than
/// NumStores scalar stores.
static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
unsigned &NumStores) {
// Zero and Undef never requires any extra stores.
if (isa<llvm::ConstantAggregateZero>(Init) ||
isa<llvm::ConstantPointerNull>(Init) ||
isa<llvm::UndefValue>(Init))
return true;
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init))
return Init->isNullValue() || NumStores--;
// See if we can emit each element.
if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
return false;
}
return true;
}
if (llvm::ConstantDataSequential *CDS =
dyn_cast<llvm::ConstantDataSequential>(Init)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
llvm::Constant *Elt = CDS->getElementAsConstant(i);
if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
return false;
}
return true;
}
// Anything else is hard and scary.
return false;
}
/// emitStoresForInitAfterMemset - For inits that
/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
/// stores that would be required.
static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
bool isVolatile, CGBuilderTy &Builder) {
assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
"called emitStoresForInitAfterMemset for zero or undef value.");
if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
isa<llvm::ConstantExpr>(Init)) {
Builder.CreateStore(Init, Loc, isVolatile);
return;
}
if (llvm::ConstantDataSequential *CDS =
dyn_cast<llvm::ConstantDataSequential>(Init)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
llvm::Constant *Elt = CDS->getElementAsConstant(i);
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
emitStoresForInitAfterMemset(
Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
isVolatile, Builder);
}
return;
}
assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
"Unknown value type!");
for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
// If necessary, get a pointer to the element and emit it.
if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
emitStoresForInitAfterMemset(
Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i),
isVolatile, Builder);
}
}
/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
/// plus some stores to initialize a local variable instead of using a memcpy
/// from a constant global. It is beneficial to use memset if the global is all
/// zeros, or mostly zeros and large.
static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
uint64_t GlobalSize) {
// If a global is all zeros, always use a memset.
if (isa<llvm::ConstantAggregateZero>(Init)) return true;
// If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
// do it if it will require 6 or fewer scalar stores.
// TODO: Should budget depends on the size? Avoiding a large global warrants
// plopping in more stores.
unsigned StoreBudget = 6;
uint64_t SizeLimit = 32;
return GlobalSize > SizeLimit &&
canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
}
/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
AutoVarEmission emission = EmitAutoVarAlloca(D);
EmitAutoVarInit(emission);
EmitAutoVarCleanups(emission);
}
/// Emit a lifetime.begin marker if some criteria are satisfied.
/// \return a pointer to the temporary size Value if a marker was emitted, null
/// otherwise
llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
llvm::Value *Addr) {
// For now, only in optimized builds.
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
return nullptr;
// HLSL Change Begins
// Don't emit the intrinsic for hlsl for now unless it is explicitly enabled
if (!CGM.getCodeGenOpts().HLSLEnableLifetimeMarkers)
return nullptr;
// HLSL Change Ends
// Disable lifetime markers in msan builds.
// FIXME: Remove this when msan works with lifetime markers.
if (getLangOpts().Sanitize.has(SanitizerKind::Memory))
return nullptr;
llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
Addr = Builder.CreateBitCast(Addr, Int8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
C->setDoesNotThrow();
if (CGM.getCodeGenOpts().HLSLOptimizationToggles.IsEnabled(hlsl::options::TOGGLE_PARTIAL_LIFETIME_MARKERS)) return nullptr; // HLSL Change - Returning nullptr prevents generating lifetime.end
return SizeV;
}
void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
Addr = Builder.CreateBitCast(Addr, Int8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
C->setDoesNotThrow();
}
/// EmitAutoVarAlloca - Emit the alloca and debug information for a
/// local variable. Does not emit initialization or destruction.
CodeGenFunction::AutoVarEmission
CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
QualType Ty = D.getType();
AutoVarEmission emission(D);
bool isByRef = D.hasAttr<BlocksAttr>();
emission.IsByRef = isByRef;
CharUnits alignment = getContext().getDeclAlign(&D);
emission.Alignment = alignment;
// If the type is variably-modified, emit all the VLA sizes for it.
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
// If this value is an array or struct with a statically determinable
// constant initializer, there are optimizations we can do.
//
// TODO: We should constant-evaluate the initializer of any variable,
// as long as it is initialized by a constant expression. Currently,
// isConstantInitializer produces wrong answers for structs with
// reference or bitfield members, and a few other cases, and checking
// for POD-ness protects us from some of these.
if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
// HLSL Change Begins.
// HLSL will not evaluate constant array init list.
// So skip it here.
!getLangOpts().HLSL &&
// HLSL Change Ends.
(D.isConstexpr() ||
((Ty.isPODType(getContext()) ||
getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
D.getInit()->isConstantInitializer(getContext(), false)))) {
// If the variable's a const type, and it's neither an NRVO
// candidate nor a __block variable and has no mutable members,
// emit it as a global instead.
if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
CGM.isTypeConstant(Ty, true)) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
emission.Address = nullptr; // signal this condition to later callbacks
assert(emission.wasEmittedAsGlobal());
return emission;
}
// Otherwise, tell the initialization code that we're in this case.
emission.IsConstantAggregate = true;
}
// A normal fixed sized variable becomes an alloca in the entry block,
// unless it's an NRVO variable.
llvm::Type *LTy = ConvertTypeForMem(Ty);
if (NRVO) {
// The named return value optimization: allocate this variable in the
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
DeclPtr = ReturnValue;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
// Create a flag that is used to indicate when the NRVO was applied
// to this variable. Set it to zero to indicate that NRVO was not
// applied.
llvm::Value *Zero = Builder.getFalse();
llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
// Record the NRVO flag for this variable.
NRVOFlags[&D] = NRVOFlag;
emission.NRVOFlag = NRVOFlag;
}
}
} else {
if (isByRef)
LTy = BuildByRefType(&D);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameForIR()); // HLSL Change: use getNameForIR rather than getName
CharUnits allocaAlignment = alignment;
if (isByRef)
allocaAlignment = std::max(allocaAlignment,
getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0)));
Alloc->setAlignment(allocaAlignment.getQuantity());
DeclPtr = Alloc;
// Emit a lifetime intrinsic if meaningful. There's no point
// in doing this if we don't have a valid insertion point (?).
uint64_t size = CGM.getDataLayout().getTypeAllocSize(LTy);
if (HaveInsertPoint()) {
emission.SizeForLifetimeMarkers = EmitLifetimeStart(size, Alloc);
} else {
assert(!emission.useLifetimeMarkers());
}
}
} else {
EnsureInsertPoint();
if (!DidCallStackSave) {
// Save the stack.
llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
// Push a cleanup block and restore the stack there.
// FIXME: in general circumstances, this should be an EH cleanup.
pushStackRestore(NormalCleanup, Stack);
}
llvm::Value *elementCount;
QualType elementType;
std::tie(elementCount, elementType) = getVLASize(Ty);
llvm::Type *llvmTy = ConvertTypeForMem(elementType);
// Allocate memory for the array.
llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
vla->setAlignment(alignment.getQuantity());
DeclPtr = vla;
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(!DMEntry && "Decl already exists in localdeclmap!");
DMEntry = DeclPtr;
emission.Address = DeclPtr;
// Emit debug info for local var declaration.
if (HaveInsertPoint())
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
}
}
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, emission.Address);
CGM.getHLSLRuntime().FinishAutoVar(*this, D, emission.Address); // HLSL Change
return emission;
}
/// Determines whether the given __block variable is potentially
/// captured by the given expression.
static bool isCapturedBy(const VarDecl &var, const Expr *e) {
// Skip the most common kinds of expressions that make
// hierarchy-walking expensive.
e = e->IgnoreParenCasts();
if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
const BlockDecl *block = be->getBlockDecl();
for (const auto &I : block->captures()) {
if (I.getVariable() == &var)
return true;
}
// No need to walk into the subexpressions.
return false;
}
if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
const CompoundStmt *CS = SE->getSubStmt();
for (const auto *BI : CS->body())
if (const auto *E = dyn_cast<Expr>(BI)) {
if (isCapturedBy(var, E))
return true;
}
else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
// special case declarations
for (const auto *I : DS->decls()) {
if (const auto *VD = dyn_cast<VarDecl>((I))) {
const Expr *Init = VD->getInit();
if (Init && isCapturedBy(var, Init))
return true;
}
}
}
else
// FIXME. Make safe assumption assuming arbitrary statements cause capturing.
// Later, provide code to poke into statements for capture analysis.
return true;
return false;
}
for (const Stmt *SubStmt : e->children())
if (isCapturedBy(var, cast<Expr>(SubStmt)))
return true;
return false;
}
/// \brief Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
if (!Init)
return true;
if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
if (CXXConstructorDecl *Constructor = Construct->getConstructor())
if (Constructor->isTrivial() &&
Constructor->isDefaultConstructor() &&
!Construct->requiresZeroInitialization())
return true;
return false;
}
void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
// If this was emitted as a global constant, we're done.
if (emission.wasEmittedAsGlobal()) return;
const VarDecl &D = *emission.Variable;
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
QualType type = D.getType();
// If this local has an initializer, emit it now.
const Expr *Init = D.getInit();
// If we are at an unreachable point, we don't need to emit the initializer
// unless it contains a label.
if (!HaveInsertPoint()) {
if (!Init || !ContainsLabel(Init)) return;
EnsureInsertPoint();
}
// Initialize the structure of a __block variable.
if (emission.IsByRef)
emitByrefStructureInit(emission);
if (isTrivialInitializer(Init))
return;
CharUnits alignment = emission.Alignment;
// Check whether this is a byref variable that's potentially
// captured and moved by its own initializer. If so, we'll need to
// emit the initializer first, then copy into the variable.
bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
llvm::Value *Loc =
capturedByInit ? emission.Address : emission.getObjectAddress(*this);
llvm::Constant *constant = nullptr;
if (emission.IsConstantAggregate || D.isConstexpr()) {
assert(!capturedByInit && "constant init contains a capturing block?");
constant = CGM.EmitConstantInit(D, this);
}
if (!constant) {
LValue lv = MakeAddrLValue(Loc, type, alignment);
lv.setNonGC(true);
return EmitExprAsInit(Init, &D, lv, capturedByInit);
}
if (!emission.IsConstantAggregate) {
// For simple scalar/complex initialization, store the value directly.
LValue lv = MakeAddrLValue(Loc, type, alignment);
lv.setNonGC(true);
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
// HLSL Change Begins
if (getLangOpts().HLSL) {
// create a temporary global with the initializer then
// Store from the global to the alloca.
std::string Name = getStaticDeclName(CGM, D);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
constant, Name);
GV->setAlignment(alignment.getQuantity());
GV->setUnnamedAddr(true);
// Don't generate memcpy for hlsl.
CGM.getHLSLRuntime().EmitHLSLAggregateCopy(*this, GV, Loc, type);
return;
}
// HLSL Change Ends
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
bool isVolatile = type.isVolatileQualified();
llvm::Value *SizeVal =
llvm::ConstantInt::get(IntPtrTy,
getContext().getTypeSizeInChars(type).getQuantity());
llvm::Type *BP = Int8PtrTy;
if (Loc->getType() != BP)
Loc = Builder.CreateBitCast(Loc, BP);
// If the initializer is all or mostly zeros, codegen with memset then do
// a few stores afterward.
if (shouldUseMemSetPlusStoresToInitialize(constant,
CGM.getDataLayout().getTypeAllocSize(constant->getType()))) {
Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
alignment.getQuantity(), isVolatile);
// Zero and undef don't require a stores.
if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) {
Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
}
} else {
// Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
std::string Name = getStaticDeclName(CGM, D);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
constant, Name);
GV->setAlignment(alignment.getQuantity());
GV->setUnnamedAddr(true);
llvm::Value *SrcPtr = GV;
if (SrcPtr->getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
isVolatile);
}
}
/// Emit an expression as an initializer for a variable at the given
/// location. The expression is not necessarily the normal
/// initializer for the variable, and the address is not necessarily
/// its normal location.
///
/// \param init the initializing expression
/// \param var the variable to act as if we're initializing
/// \param loc the address to initialize; its type is a pointer
/// to the LLVM mapping of the variable's type
/// \param alignment the alignment of the address
/// \param capturedByInit true if the variable is a __block variable
/// whose address is potentially changed by the initializer
void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit) {
QualType type = D->getType();
if (type->isReferenceType()) {
RValue rvalue = EmitReferenceBindingToExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitStoreThroughLValue(rvalue, lvalue, true);
return;
}
switch (getEvaluationKind(type)) {
case TEK_Scalar:
EmitScalarInit(init, D, lvalue, capturedByInit);
return;
case TEK_Complex: {
ComplexPairTy complex = EmitComplexExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitStoreOfComplex(complex, lvalue, /*init*/ true);
return;
}
case TEK_Aggregate:
if (type->isAtomicType()) {
EmitAtomicInit(const_cast<Expr*>(init), lvalue);
} else {
// TODO: how can we delay here if D is captured by its initializer?
EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
}
return;
}
llvm_unreachable("bad evaluation kind");
}
/// Enter a destroy cleanup for the given local variable.
void CodeGenFunction::emitAutoVarTypeCleanup(
const CodeGenFunction::AutoVarEmission &emission,
QualType::DestructionKind dtorKind) {
assert(dtorKind != QualType::DK_none);
// Note that for __block variables, we want to destroy the
// original stack object, not the possibly forwarded object.
llvm::Value *addr = emission.getObjectAddress(*this);
const VarDecl *var = emission.Variable;
QualType type = var->getType();
CleanupKind cleanupKind = NormalAndEHCleanup;
CodeGenFunction::Destroyer *destroyer = nullptr;
switch (dtorKind) {
case QualType::DK_none:
llvm_unreachable("no cleanup for trivially-destructible variable");
case QualType::DK_cxx_destructor:
// If there's an NRVO flag on the emission, we need a different
// cleanup.
if (emission.NRVOFlag) {
assert(!type->isArrayType());
CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor,
emission.NRVOFlag);
return;
}
break;
case QualType::DK_objc_strong_lifetime:
// Suppress cleanups for pseudo-strong variables.
if (var->isARCPseudoStrong()) return;
// Otherwise, consider whether to use an EH cleanup or not.
cleanupKind = getARCCleanupKind();
// Use the imprecise destroyer by default.
if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
destroyer = CodeGenFunction::destroyARCStrongImprecise;
break;
case QualType::DK_objc_weak_lifetime:
break;
}
// If we haven't chosen a more specific destroyer, use the default.
if (!destroyer) destroyer = getDestroyer(dtorKind);
// Use an EH cleanup in array destructors iff the destructor itself
// is being pushed as an EH cleanup.
bool useEHCleanup = (cleanupKind & EHCleanup);
EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
useEHCleanup);
}
void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
assert(emission.Variable && "emission was not valid!");
// If this was emitted as a global constant, we're done.
if (emission.wasEmittedAsGlobal()) return;
// If we don't have an insertion point, we're done. Sema prevents
// us from jumping into any of these scopes anyway.
if (!HaveInsertPoint()) return;
const VarDecl &D = *emission.Variable;
// Make sure we call @llvm.lifetime.end. This needs to happen
// *last*, so the cleanup needs to be pushed *first*.
if (emission.useLifetimeMarkers()) {
EHStack.pushCleanup<CallLifetimeEnd>(NormalCleanup,
emission.getAllocatedAddress(),
emission.getSizeForLifetimeMarkers());
EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
cleanup.setLifetimeMarker();
}
// Check the type for a cleanup.
if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
emitAutoVarTypeCleanup(emission, dtorKind);
// In GC mode, honor objc_precise_lifetime.
if (getLangOpts().getGC() != LangOptions::NonGC &&
D.hasAttr<ObjCPreciseLifetimeAttr>()) {
EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
}
// Handle the cleanup attribute.
if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
const FunctionDecl *FD = CA->getFunctionDecl();
llvm::Constant *F = CGM.GetAddrOfFunction(FD);
assert(F && "Could not find function!");
const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
}
// If this is a block variable, call _Block_object_destroy
// (on the unforwarded address).
if (emission.IsByRef)
enterByrefCleanup(emission);
}
CodeGenFunction::Destroyer *
CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
switch (kind) {
case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
case QualType::DK_cxx_destructor:
return destroyCXXObject;
case QualType::DK_objc_strong_lifetime:
return destroyARCStrongPrecise;
case QualType::DK_objc_weak_lifetime:
return destroyARCWeak;
}
llvm_unreachable("Unknown DestructionKind");
}
/// pushEHDestroy - Push the standard destructor for the given type as
/// an EH-only cleanup.
void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
llvm::Value *addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
assert(needsEHCleanup(dtorKind));
pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
}
/// pushDestroy - Push the standard destructor for the given type as
/// at least a normal cleanup.
void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
llvm::Value *addr, QualType type) {
assert(dtorKind && "cannot push destructor for trivial type");
CleanupKind cleanupKind = getCleanupKind(dtorKind);
pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
cleanupKind & EHCleanup);
}
void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
destroyer, useEHCleanupForArray);
}
void CodeGenFunction::pushStackRestore(CleanupKind Kind, llvm::Value *SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
void CodeGenFunction::pushLifetimeExtendedDestroy(
CleanupKind cleanupKind, llvm::Value *addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray) {
assert(!isInConditionalBranch() &&
"performing lifetime extension from within conditional");
// Push an EH-only cleanup for the object now.
// FIXME: When popping normal cleanups, we need to keep this EH cleanup
// around in case a temporary's destructor throws an exception.
if (cleanupKind & EHCleanup)
EHStack.pushCleanup<DestroyObject>(
static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
destroyer, useEHCleanupForArray);
// Remember that we need to push a full cleanup for the object at the
// end of the full-expression.
pushCleanupAfterFullExpr<DestroyObject>(
cleanupKind, addr, type, destroyer, useEHCleanupForArray);
}
/// emitDestroy - Immediately perform the destruction of the given
/// object.
///
/// \param addr - the address of the object; a type*
/// \param type - the type of the object; if an array type, all
/// objects are destroyed in reverse order
/// \param destroyer - the function to call to destroy individual
/// elements
/// \param useEHCleanupForArray - whether an EH cleanup should be
/// used when destroying array elements, in case one of the
/// destructions throws an exception
void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
Destroyer *destroyer,
bool useEHCleanupForArray) {
const ArrayType *arrayType = getContext().getAsArrayType(type);
if (!arrayType)
return destroyer(*this, addr, type);
llvm::Value *begin = addr;
llvm::Value *length = emitArrayLength(arrayType, type, begin);
// Normally we have to check whether the array is zero-length.
bool checkZeroLength = true;
// But if the array length is constant, we can suppress that.
if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
// ...and if it's constant zero, we can just skip the entire thing.
if (constLength->isZero()) return;
checkZeroLength = false;
}
llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
emitArrayDestroy(begin, end, type, destroyer,
checkZeroLength, useEHCleanupForArray);
}
/// emitArrayDestroy - Destroys all the elements of the given array,
/// beginning from last to first. The array cannot be zero-length.
///
/// \param begin - a type* denoting the first element of the array
/// \param end - a type* denoting one past the end of the array
/// \param type - the element type of the array
/// \param destroyer - the function to call to destroy elements
/// \param useEHCleanup - whether to push an EH cleanup to destroy
/// the remaining elements in case the destruction of a single
/// element throws
void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
llvm::Value *end,
QualType type,
Destroyer *destroyer,
bool checkZeroLength,
bool useEHCleanup) {
assert(!type->isArrayType());
// The basic structure here is a do-while loop, because we don't
// need to check for the zero-element case.
llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
if (checkZeroLength) {
llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
"arraydestroy.isempty");
Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
}
// Enter the loop body, making that address the current address.
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
EmitBlock(bodyBB);
llvm::PHINode *elementPast =
Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
elementPast->addIncoming(end, entryBB);
// Shift the address back by one element.
llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
"arraydestroy.element");
if (useEHCleanup)
pushRegularPartialArrayCleanup(begin, element, type, destroyer);
// Perform the actual destruction there.
destroyer(*this, element, type);
if (useEHCleanup)
PopCleanupBlock();
// Check whether we've reached the end.
llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
Builder.CreateCondBr(done, doneBB, bodyBB);
elementPast->addIncoming(element, Builder.GetInsertBlock());
// Done.
EmitBlock(doneBB);
}
/// Perform partial array destruction as if in an EH cleanup. Unlike
/// emitArrayDestroy, the element type here may still be an array type.
static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
QualType type,
CodeGenFunction::Destroyer *destroyer) {
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
// VLAs don't require a GEP index to walk into.
if (!isa<VariableArrayType>(arrayType))
arrayDepth++;
type = arrayType->getElementType();
}
if (arrayDepth) {
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, arrayDepth+1);
SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
}
// Destroy the array. We don't ever need an EH cleanup because we
// assume that we're in an EH cleanup ourselves, so a throwing
// destructor causes an immediate terminate.
CGF.emitArrayDestroy(begin, end, type, destroyer,
/*checkZeroLength*/ true, /*useEHCleanup*/ false);
}
namespace {
/// RegularPartialArrayDestroy - a cleanup which performs a partial
/// array destroy where the end pointer is regularly determined and
/// does not need to be loaded from a local.
class RegularPartialArrayDestroy : public EHScopeStack::Cleanup {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEnd;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
public:
RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
QualType elementType,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
ElementType(elementType), Destroyer(destroyer) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
ElementType, Destroyer);
}
};
/// IrregularPartialArrayDestroy - a cleanup which performs a
/// partial array destroy where the end pointer is irregularly
/// determined and must be loaded from a local.
class IrregularPartialArrayDestroy : public EHScopeStack::Cleanup {
llvm::Value *ArrayBegin;
llvm::Value *ArrayEndPointer;
QualType ElementType;
CodeGenFunction::Destroyer *Destroyer;
public:
IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
CodeGenFunction::Destroyer *destroyer)
: ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
ElementType(elementType), Destroyer(destroyer) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
ElementType, Destroyer);
}
};
}
/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
/// already-constructed elements of the given array. The cleanup
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
Destroyer *destroyer) {
pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEndPointer,
elementType, destroyer);
}
/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
/// already-constructed elements of the given array. The cleanup
/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
///
/// \param elementType - the immediate element type of the array;
/// possibly still an array type
void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
Destroyer *destroyer) {
pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
arrayBegin, arrayEnd,
elementType, destroyer);
}
/// Lazily declare the @llvm.lifetime.start intrinsic.
llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
if (LifetimeStartFn) return LifetimeStartFn;
LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
llvm::Intrinsic::lifetime_start);
return LifetimeStartFn;
}
/// Lazily declare the @llvm.lifetime.end intrinsic.
llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
if (LifetimeEndFn) return LifetimeEndFn;
LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
llvm::Intrinsic::lifetime_end);
return LifetimeEndFn;
}
namespace {
/// A cleanup to perform a release of an object at the end of a
/// function. This is used to balance out the incoming +1 of a
/// ns_consumed argument when we can't reasonably do that just by
/// not doing the initial retain for a __block argument.
struct ConsumeARCParameter : EHScopeStack::Cleanup {
ConsumeARCParameter(llvm::Value *param,
ARCPreciseLifetime_t precise)
: Param(param), Precise(precise) {}
llvm::Value *Param;
ARCPreciseLifetime_t Precise;
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitARCRelease(Param, Precise);
}
};
}
/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
bool ArgIsPointer, unsigned ArgNo) {
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
Arg->setName(D.getName());
QualType Ty = D.getType();
// HLSL Change Begin - add noalias for all out param.
if (Ty.isRestrictQualified() && isa<llvm::Argument>(Arg)) {
llvm::Argument *AI = cast<llvm::Argument>(Arg);
if (!AI->hasNoAliasAttr())
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
llvm::Attribute::NoAlias));
}
// HLSL Change End
// Use better IR generation for certain implicit parameters.
if (isa<ImplicitParamDecl>(D)) {
#if 1 // HLSL Change - no support for blocks
assert(!BlockInfo && "HLSL does not support blocks");
#else
// The only implicit argument a block has is its literal.
if (BlockInfo) {
LocalDeclMap[&D] = Arg;
llvm::Value *LocalAddr = nullptr;
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
// Allocate a stack slot to let the debug info survive the RA.
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
D.getName() + ".addr");
Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
LValue lv = MakeAddrLValue(Alloc, Ty, getContext().getDeclAlign(&D));
EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
LocalAddr = Builder.CreateLoad(Alloc);
}
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, ArgNo,
LocalAddr, Builder);
}
}
return;
}
#endif // HLSL Change - no support for blocks.
}
llvm::Value *DeclPtr;
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
CharUnits Align = getContext().getDeclAlign(&D);
// If we already have a pointer to the argument, reuse the input pointer.
if (ArgIsPointer) {
// If we have a prettier pointer type at this point, bitcast to that.
unsigned AS = cast<llvm::PointerType>(Arg->getType())->getAddressSpace();
llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
DeclPtr = Arg->getType() == IRTy ? Arg : Builder.CreateBitCast(Arg, IRTy,
D.getName());
// Push a destructor cleanup for this parameter if the ABI requires it.
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
if (!IsScalar && !CurFuncIsThunk &&
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
if (RD && RD->hasNonTrivialDestructor())
pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty);
}
} else {
// HLSL Change Starts
if (getLangOpts().HLSL && Arg->getType()->isPointerTy()) {
// HLSL doesn't have pointer.
// Only case pointer type will generated is this pointer for methods.
// Don't store this pointer, just use the parameter directly.
if (Ty->isPointerType())
DoStore = false;
// For out parameter, this could happen too.
// Because the parameter is reference type.
// Just use the Arg directly, not store it to a temp alloca.
DoStore = false;
DeclPtr = Arg;
}
// HLSL Change Ends
else {
// Otherwise, create a temporary to hold the value.
llvm::AllocaInst *Alloc =
CreateTempAlloca(ConvertTypeForMem(Ty), D.getName() + ".addr");
Alloc->setMetadata(hlsl::DxilMDHelper::kDxilTempAllocaMDName, llvm::MDTuple::get(Alloc->getContext(), {})); // HLSL Change
Alloc->setAlignment(Align.getQuantity());
DeclPtr = Alloc;
DoStore = true;
}
}
LValue lv = MakeAddrLValue(DeclPtr, Ty, Align);
if (!getLangOpts().HLSL && IsScalar) { // HLSL Change: not ObjC
Qualifiers qs = Ty.getQualifiers();
if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
// We honor __attribute__((ns_consumed)) for types with lifetime.
// For __strong, it's handled by just skipping the initial retain;
// otherwise we have to balance out the initial +1 with an extra
// cleanup to do the release at the end of the function.
bool isConsumed = D.hasAttr<NSConsumedAttr>();
// 'self' is always formally __strong, but if this is not an
// init method then we don't want to retain it.
if (D.isARCPseudoStrong()) {
const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
assert(&D == method->getSelfDecl());
assert(lt == Qualifiers::OCL_Strong);
assert(qs.hasConst());
assert(method->getMethodFamily() != OMF_init);
(void) method;
lt = Qualifiers::OCL_ExplicitNone;
}
if (lt == Qualifiers::OCL_Strong) {
if (!isConsumed) {
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
// use objc_storeStrong(&dest, value) for retaining the
// object. But first, store a null into 'dest' because
// objc_storeStrong attempts to release its old value.
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
EmitARCStoreStrongCall(lv.getAddress(), Arg, true);
DoStore = false;
}
else
// Don't use objc_retainBlock for block pointers, because we
// don't want to Block_copy something just because we got it
// as a parameter.
Arg = EmitARCRetainNonBlock(Arg);
}
} else {
// Push the cleanup for a consumed parameter.
if (isConsumed) {
ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
? ARCPreciseLifetime : ARCImpreciseLifetime);
EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg,
precise);
}
if (lt == Qualifiers::OCL_Weak) {
EmitARCInitWeak(DeclPtr, Arg);
DoStore = false; // The weak init is a store, no need to do two.
}
}
// Enter the cleanup scope.
EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
}
}
// Store the initial value into the alloca.
if (DoStore)
EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(!DMEntry && "Decl already exists in localdeclmap!");
DMEntry = DeclPtr;
// Emit debug info for param declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
}
}
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, DeclPtr);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGHLSLRuntime.h | //===----- CGMSHLSLRuntime.h - Interface to HLSL Runtime ----------------===//
///////////////////////////////////////////////////////////////////////////////
// //
// CGHLSLRuntime.h //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This provides a class for HLSL code generation. //
//
///////////////////////////////////////////////////////////////////////////////
#pragma once
#include <functional>
#include <llvm/ADT/SmallVector.h> // HLSL Change
namespace CGHLSLMSHelper {
struct Scope;
}
namespace llvm {
class Function;
class Value;
class Constant;
class TerminatorInst;
class GlobalVariable;
class Type;
class BasicBlock;
class BranchInst;
class SwitchInst;
template <typename T> class ArrayRef;
} // namespace llvm
namespace clang {
class Decl;
class QualType;
class ExtVectorType;
class ASTContext;
class FunctionDecl;
class CallExpr;
class InitListExpr;
class Expr;
class Stmt;
class ReturnStmt;
class Attr;
class VarDecl;
class HLSLRootSignatureAttr;
namespace CodeGen {
class CodeGenModule;
class ReturnValueSlot;
class CodeGenFunction;
class RValue;
class LValue;
class CGHLSLRuntime {
protected:
CodeGenModule &CGM;
llvm::SmallVector<llvm::BranchInst *, 16> m_DxBreaks;
public:
CGHLSLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
virtual ~CGHLSLRuntime();
virtual void addResource(Decl *D) = 0;
virtual void addSubobject(Decl *D) = 0;
virtual void FinishCodeGen() = 0;
virtual RValue EmitHLSLBuiltinCallExpr(CodeGenFunction &CGF,
const FunctionDecl *FD,
const CallExpr *E,
ReturnValueSlot ReturnValue) = 0;
// Is E is a c++ init list not a hlsl init list which only match size.
virtual bool IsTrivalInitListExpr(CodeGenFunction &CGF, InitListExpr *E) = 0;
virtual llvm::Value *
EmitHLSLInitListExpr(CodeGenFunction &CGF, InitListExpr *E,
// The destPtr when emiting aggregate init, for normal
// case, it will be null.
llvm::Value *DestPtr) = 0;
virtual llvm::Constant *EmitHLSLConstInitListExpr(CodeGenModule &CGM,
InitListExpr *E) = 0;
virtual void EmitHLSLOutParamConversionInit(
CodeGenFunction &CGF, const FunctionDecl *FD, const CallExpr *E,
llvm::SmallVector<LValue, 8> &castArgList,
llvm::SmallVector<const Stmt *, 8> &argList,
llvm::SmallVector<LValue, 8> &lifetimeCleanupList,
const std::function<void(const VarDecl *, llvm::Value *)> &TmpArgMap) = 0;
virtual void EmitHLSLOutParamConversionCopyBack(
CodeGenFunction &CGF, llvm::SmallVector<LValue, 8> &castArgList,
llvm::SmallVector<LValue, 8> &lifetimeCleanupList) = 0;
virtual void MarkPotentialResourceTemp(CodeGenFunction &CGF, llvm::Value *V,
clang::QualType QaulTy) = 0;
virtual llvm::Value *
EmitHLSLMatrixOperationCall(CodeGenFunction &CGF, const clang::Expr *E,
llvm::Type *RetType,
llvm::ArrayRef<llvm::Value *> paramList) = 0;
virtual void EmitHLSLDiscard(CodeGenFunction &CGF) = 0;
virtual llvm::BranchInst *EmitHLSLCondBreak(CodeGenFunction &CGF,
llvm::Function *F,
llvm::BasicBlock *DestBB,
llvm::BasicBlock *AltBB) = 0;
// For [] on matrix
virtual llvm::Value *EmitHLSLMatrixSubscript(CodeGenFunction &CGF,
llvm::Type *RetType,
llvm::Value *Ptr,
llvm::Value *Idx,
clang::QualType Ty) = 0;
// For ._m on matrix
virtual llvm::Value *
EmitHLSLMatrixElement(CodeGenFunction &CGF, llvm::Type *RetType,
llvm::ArrayRef<llvm::Value *> paramList,
clang::QualType Ty) = 0;
virtual llvm::Value *EmitHLSLMatrixLoad(CodeGenFunction &CGF,
llvm::Value *Ptr,
clang::QualType Ty) = 0;
virtual void EmitHLSLMatrixStore(CodeGenFunction &CGF, llvm::Value *Val,
llvm::Value *DestPtr,
clang::QualType Ty) = 0;
virtual void EmitHLSLAggregateCopy(CodeGenFunction &CGF, llvm::Value *SrcPtr,
llvm::Value *DestPtr,
clang::QualType Ty) = 0;
virtual void EmitHLSLFlatConversion(CodeGenFunction &CGF, llvm::Value *Val,
llvm::Value *DestPtr, clang::QualType Ty,
clang::QualType SrcTy) = 0;
virtual void EmitHLSLFlatConversionAggregateCopy(CodeGenFunction &CGF,
llvm::Value *SrcPtr,
clang::QualType SrcTy,
llvm::Value *DestPtr,
clang::QualType DestTy) = 0;
virtual llvm::Value *EmitHLSLLiteralCast(CodeGenFunction &CGF,
llvm::Value *Src,
clang::QualType SrcType,
clang::QualType DstType) = 0;
virtual void AddHLSLFunctionInfo(llvm::Function *,
const FunctionDecl *FD) = 0;
virtual void EmitHLSLFunctionProlog(llvm::Function *,
const FunctionDecl *FD) = 0;
virtual void AddControlFlowHint(CodeGenFunction &CGF, const Stmt &S,
llvm::TerminatorInst *TI,
llvm::ArrayRef<const Attr *> Attrs) = 0;
virtual void FinishAutoVar(CodeGenFunction &CGF, const VarDecl &D,
llvm::Value *V) = 0;
virtual const clang::Expr *CheckReturnStmtGLCMismatch(
CodeGenFunction &CGF, const clang::Expr *RV, const clang::ReturnStmt &S,
clang::QualType FnRetTy,
const std::function<void(const VarDecl *, llvm::Value *)> &TmpArgMap) = 0;
virtual void MarkIfStmt(CodeGenFunction &CGF, llvm::BasicBlock *endIfBB) = 0;
virtual void MarkCleanupBlock(CodeGenFunction &CGF,
llvm::BasicBlock *cleanupBB) = 0;
virtual void MarkSwitchStmt(CodeGenFunction &CGF,
llvm::SwitchInst *switchInst,
llvm::BasicBlock *endSwitch) = 0;
virtual void MarkReturnStmt(CodeGenFunction &CGF,
llvm::BasicBlock *bbWithRet) = 0;
virtual void MarkLoopStmt(CodeGenFunction &CGF,
llvm::BasicBlock *loopContinue,
llvm::BasicBlock *loopExit) = 0;
virtual CGHLSLMSHelper::Scope *MarkScopeEnd(CodeGenFunction &CGF) = 0;
virtual bool NeedHLSLMartrixCastForStoreOp(
const clang::Decl *TD,
llvm::SmallVector<llvm::Value *, 16> &IRCallArgs) = 0;
virtual void EmitHLSLMartrixCastForStoreOp(
CodeGenFunction &CGF, llvm::SmallVector<llvm::Value *, 16> &IRCallArgs,
llvm::SmallVector<clang::QualType, 16> &ArgTys) = 0;
};
/// Create an instance of a HLSL runtime class.
CGHLSLRuntime *CreateMSHLSLRuntime(CodeGenModule &CGM);
} // namespace CodeGen
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGHLSLRootSignature.cpp | //===----- CGHLSLRootSignature.cpp - Compile root signature---------------===//
///////////////////////////////////////////////////////////////////////////////
// //
// CGHLSLRootSignature.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This provides clang::CompileRootSignature. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "dxc/DXIL/DxilConstants.h"
#include "dxc/DxilRootSignature/DxilRootSignature.h"
#include "dxc/Support/WinIncludes.h" // stream support
#include "dxc/dxcapi.h" // stream support
#include "dxc/dxcapi.h"
#include "clang/Parse/ParseHLSL.h" // root sig would be in Parser if part of lang
using namespace llvm;
void clang::CompileRootSignature(StringRef rootSigStr, DiagnosticsEngine &Diags,
SourceLocation SLoc,
hlsl::DxilRootSignatureVersion rootSigVer,
hlsl::DxilRootSignatureCompilationFlags flags,
hlsl::RootSignatureHandle *pRootSigHandle) {
std::string OSStr;
llvm::raw_string_ostream OS(OSStr);
hlsl::DxilVersionedRootSignatureDesc *D = nullptr;
if (ParseHLSLRootSignature(rootSigStr.data(), rootSigStr.size(), rootSigVer,
flags, &D, SLoc, Diags)) {
CComPtr<IDxcBlob> pSignature;
CComPtr<IDxcBlobEncoding> pErrors;
hlsl::SerializeRootSignature(D, &pSignature, &pErrors, false);
if (pSignature == nullptr) {
assert(pErrors != nullptr && "else serialize failed with no msg");
ReportHLSLRootSigError(Diags, SLoc, (char *)pErrors->GetBufferPointer(),
pErrors->GetBufferSize());
hlsl::DeleteRootSignature(D);
} else {
pRootSigHandle->Assign(D, pSignature);
}
}
} |
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/TargetInfo.h | //===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes wrap the information about a call or function
// definition used to handle ABI compliancy.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
#define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
#include "CGValue.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
namespace llvm {
class Constant;
class GlobalValue;
class Type;
class Value;
}
namespace clang {
class ABIInfo;
class Decl;
namespace CodeGen {
class CallArgList;
class CodeGenModule;
class CodeGenFunction;
class CGFunctionInfo;
}
/// TargetCodeGenInfo - This class organizes various target-specific
/// codegeneration issues, like target-specific attributes, builtins and so
/// on.
class TargetCodeGenInfo {
ABIInfo *Info;
public:
// WARNING: Acquires the ownership of ABIInfo.
TargetCodeGenInfo(ABIInfo *info = 0) : Info(info) {}
virtual ~TargetCodeGenInfo();
/// getABIInfo() - Returns ABI info helper for the target.
const ABIInfo &getABIInfo() const { return *Info; }
/// setTargetAttributes - Provides a convenient hook to handle extra
/// target-specific attributes for the given global.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {}
/// emitTargetMD - Provides a convenient hook to handle extra
/// target-specific metadata for the given global.
virtual void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const {}
/// Determines the size of struct _Unwind_Exception on this platform,
/// in 8-bit units. The Itanium ABI defines this as:
/// struct _Unwind_Exception {
/// uint64 exception_class;
/// _Unwind_Exception_Cleanup_Fn exception_cleanup;
/// uint64 private_1;
/// uint64 private_2;
/// };
virtual unsigned getSizeOfUnwindException() const;
/// Controls whether __builtin_extend_pointer should sign-extend
/// pointers to uint64_t or zero-extend them (the default). Has
/// no effect for targets:
/// - that have 64-bit pointers, or
/// - that cannot address through registers larger than pointers, or
/// - that implicitly ignore/truncate the top bits when addressing
/// through such registers.
virtual bool extendPointerWithSExt() const { return false; }
/// Determines the DWARF register number for the stack pointer, for
/// exception-handling purposes. Implements __builtin_dwarf_sp_column.
///
/// Returns -1 if the operation is unsupported by this target.
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
return -1;
}
/// Initializes the given DWARF EH register-size table, a char*.
/// Implements __builtin_init_dwarf_reg_size_table.
///
/// Returns true if the operation is unsupported by this target.
virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
return true;
}
/// Performs the code-generation required to convert a return
/// address as stored by the system into the actual address of the
/// next instruction that will be executed.
///
/// Used by __builtin_extract_return_addr().
virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
return Address;
}
/// Performs the code-generation required to convert the address
/// of an instruction into a return address suitable for storage
/// by the system in a return slot.
///
/// Used by __builtin_frob_return_addr().
virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
llvm::Value *Address) const {
return Address;
}
/// Corrects the low-level LLVM type for a given constraint and "usual"
/// type.
///
/// \returns A pointer to a new LLVM type, possibly the same as the original
/// on success; 0 on failure.
virtual llvm::Type *adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
StringRef Constraint,
llvm::Type *Ty) const {
return Ty;
}
/// Adds constraints and types for result registers.
virtual void addReturnRegisterOutputs(
CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue,
std::string &Constraints, std::vector<llvm::Type *> &ResultRegTypes,
std::vector<llvm::Type *> &ResultTruncRegTypes,
std::vector<CodeGen::LValue> &ResultRegDests, std::string &AsmString,
unsigned NumOutputs) const {}
/// doesReturnSlotInterfereWithArgs - Return true if the target uses an
/// argument slot for an 'sret' type.
virtual bool doesReturnSlotInterfereWithArgs() const { return true; }
/// Retrieve the address of a function to call immediately before
/// calling objc_retainAutoreleasedReturnValue. The
/// implementation of objc_autoreleaseReturnValue sniffs the
/// instruction stream following its return address to decide
/// whether it's a call to objc_retainAutoreleasedReturnValue.
/// This can be prohibitively expensive, depending on the
/// relocation model, and so on some targets it instead sniffs for
/// a particular instruction sequence. This functions returns
/// that instruction sequence in inline assembly, which will be
/// empty if none is required.
virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
return "";
}
/// Return a constant used by UBSan as a signature to identify functions
/// possessing type information, or 0 if the platform is unsupported.
virtual llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
return nullptr;
}
/// Determine whether a call to an unprototyped functions under
/// the given calling convention should use the variadic
/// convention or the non-variadic convention.
///
/// There's a good reason to make a platform's variadic calling
/// convention be different from its non-variadic calling
/// convention: the non-variadic arguments can be passed in
/// registers (better for performance), and the variadic arguments
/// can be passed on the stack (also better for performance). If
/// this is done, however, unprototyped functions *must* use the
/// non-variadic convention, because C99 states that a call
/// through an unprototyped function type must succeed if the
/// function was defined with a non-variadic prototype with
/// compatible parameters. Therefore, splitting the conventions
/// makes it impossible to call a variadic function through an
/// unprototyped type. Since function prototypes came out in the
/// late 1970s, this is probably an acceptable trade-off.
/// Nonetheless, not all platforms are willing to make it, and in
/// particularly x86-64 bends over backwards to make the
/// conventions compatible.
///
/// The default is false. This is correct whenever:
/// - the conventions are exactly the same, because it does not
/// matter and the resulting IR will be somewhat prettier in
/// certain cases; or
/// - the conventions are substantively different in how they pass
/// arguments, because in this case using the variadic convention
/// will lead to C99 violations.
///
/// However, some platforms make the conventions identical except
/// for passing additional out-of-band information to a variadic
/// function: for example, x86-64 passes the number of SSE
/// arguments in %al. On these platforms, it is desirable to
/// call unprototyped functions using the variadic convention so
/// that unprototyped calls to varargs functions still succeed.
///
/// Relatedly, platforms which pass the fixed arguments to this:
/// A foo(B, C, D);
/// differently than they would pass them to this:
/// A foo(B, C, D, ...);
/// may need to adjust the debugger-support code in Sema to do the
/// right thing when calling a function with no know signature.
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
const FunctionNoProtoType *fnType) const;
/// Gets the linker options necessary to link a dependent library on this
/// platform.
virtual void getDependentLibraryOption(llvm::StringRef Lib,
llvm::SmallString<24> &Opt) const;
/// Gets the linker options necessary to detect object file mismatches on
/// this platform.
virtual void getDetectMismatchOption(llvm::StringRef Name,
llvm::StringRef Value,
llvm::SmallString<32> &Opt) const {}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/README.txt | IRgen optimization opportunities.
//===---------------------------------------------------------------------===//
The common pattern of
--
short x; // or char, etc
(x == 10)
--
generates an zext/sext of x which can easily be avoided.
//===---------------------------------------------------------------------===//
Bitfields accesses can be shifted to simplify masking and sign
extension. For example, if the bitfield width is 8 and it is
appropriately aligned then is is a lot shorter to just load the char
directly.
//===---------------------------------------------------------------------===//
It may be worth avoiding creation of alloca's for formal arguments
for the common situation where the argument is never written to or has
its address taken. The idea would be to begin generating code by using
the argument directly and if its address is taken or it is stored to
then generate the alloca and patch up the existing code.
In theory, the same optimization could be a win for block local
variables as long as the declaration dominates all statements in the
block.
NOTE: The main case we care about this for is for -O0 -g compile time
performance, and in that scenario we will need to emit the alloca
anyway currently to emit proper debug info. So this is blocked by
being able to emit debug information which refers to an LLVM
temporary, not an alloca.
//===---------------------------------------------------------------------===//
We should try and avoid generating basic blocks which only contain
jumps. At -O0, this penalizes us all the way from IRgen (malloc &
instruction overhead), all the way down through code generation and
assembly time.
On 176.gcc:expr.ll, it looks like over 12% of basic blocks are just
direct branches!
//===---------------------------------------------------------------------===//
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGLoopInfo.h | //===---- CGLoopInfo.h - LLVM CodeGen for loop metadata -*- C++ -*---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the internal state used for llvm translation for loop statement
// metadata.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
#define LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
class BasicBlock;
class Instruction;
class MDNode;
} // end namespace llvm
namespace clang {
class Attr;
namespace CodeGen {
/// \brief Attributes that may be specified on loops.
struct LoopAttributes {
explicit LoopAttributes(bool IsParallel = false);
void clear();
/// \brief Generate llvm.loop.parallel metadata for loads and stores.
bool IsParallel;
/// \brief Values of llvm.loop.vectorize.enable metadata.
enum LVEnableState { VecUnspecified, VecEnable, VecDisable };
/// \brief llvm.loop.vectorize.enable
LVEnableState VectorizerEnable;
/// \brief llvm.loop.vectorize.width
unsigned VectorizerWidth;
/// \brief llvm.loop.interleave.count
unsigned VectorizerUnroll;
// HLSL Change Begins.
/// \brief hlsl loop unrolling policy based on [loop] and [unroll] attributes
enum HlslUnrollPolicyEnum { HlslAllowUnroll, HlslDisableUnroll, HlslForceUnroll };
/// \brief hlsl unrolling policy
HlslUnrollPolicyEnum HlslUnrollPolicy;
/// \brief argument to hlsl [unroll] attribute, 0 = full unroll
unsigned HlslUnrollCount;
// HLSL Change Ends.
};
/// \brief Information used when generating a structured loop.
class LoopInfo {
public:
/// \brief Construct a new LoopInfo for the loop with entry Header.
LoopInfo(llvm::BasicBlock *Header, const LoopAttributes &Attrs);
/// \brief Get the loop id metadata for this loop.
llvm::MDNode *getLoopID() const { return LoopID; }
/// \brief Get the header block of this loop.
llvm::BasicBlock *getHeader() const { return Header; }
/// \brief Get the set of attributes active for this loop.
const LoopAttributes &getAttributes() const { return Attrs; }
private:
/// \brief Loop ID metadata.
llvm::MDNode *LoopID;
/// \brief Header block of this loop.
llvm::BasicBlock *Header;
/// \brief The attributes for this loop.
LoopAttributes Attrs;
};
/// \brief A stack of loop information corresponding to loop nesting levels.
/// This stack can be used to prepare attributes which are applied when a loop
/// is emitted.
class LoopInfoStack {
LoopInfoStack(const LoopInfoStack &) = delete;
void operator=(const LoopInfoStack &) = delete;
public:
LoopInfoStack() {}
/// \brief Begin a new structured loop. The set of staged attributes will be
/// applied to the loop and then cleared.
void push(llvm::BasicBlock *Header,
llvm::ArrayRef<const Attr *> Attrs = llvm::None);
/// \brief End the current loop.
void pop();
/// \brief Return the top loop id metadata.
llvm::MDNode *getCurLoopID() const { return getInfo().getLoopID(); }
/// \brief Return true if the top loop is parallel.
bool getCurLoopParallel() const {
return hasInfo() ? getInfo().getAttributes().IsParallel : false;
}
/// \brief Function called by the CodeGenFunction when an instruction is
/// created.
void InsertHelper(llvm::Instruction *I) const;
/// \brief Set the next pushed loop as parallel.
void setParallel(bool Enable = true) { StagedAttrs.IsParallel = Enable; }
/// \brief Set the next pushed loop 'vectorizer.enable'
void setVectorizerEnable(bool Enable = true) {
StagedAttrs.VectorizerEnable =
Enable ? LoopAttributes::VecEnable : LoopAttributes::VecDisable;
}
/// \brief Set the vectorizer width for the next loop pushed.
void setVectorizerWidth(unsigned W) { StagedAttrs.VectorizerWidth = W; }
/// \brief Set the vectorizer unroll for the next loop pushed.
void setVectorizerUnroll(unsigned U) { StagedAttrs.VectorizerUnroll = U; }
// HLSL Change Begins
/// \brief Set the hlsl unroll count for the next loop pushed.
void setHlslUnroll(unsigned U) {
StagedAttrs.HlslUnrollPolicy = LoopAttributes::HlslForceUnroll;
StagedAttrs.HlslUnrollCount = U;
}
/// \brief Set the hlsl loop for the next loop pushed.
void setHlslLoop() {
StagedAttrs.HlslUnrollPolicy = LoopAttributes::HlslDisableUnroll;
}
// HLSL Chagne Ends
private:
/// \brief Returns true if there is LoopInfo on the stack.
bool hasInfo() const { return !Active.empty(); }
/// \brief Return the LoopInfo for the current loop. HasInfo should be called
/// first to ensure LoopInfo is present.
const LoopInfo &getInfo() const { return Active.back(); }
/// \brief The set of attributes that will be applied to the next pushed loop.
LoopAttributes StagedAttrs;
/// \brief Stack of active loops.
llvm::SmallVector<LoopInfo, 4> Active;
};
} // end namespace CodeGen
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCall.cpp | //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes wrap the information about a call or function
// definition used to handle ABI compliancy.
//
//===----------------------------------------------------------------------===//
#include "CGCall.h"
#include "ABIInfo.h"
#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGHLSLRuntime.h" // HLSL Change
#include "TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace clang;
using namespace CodeGen;
/***/
static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
switch (CC) {
default: return llvm::CallingConv::C;
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
// TODO: Add support for __pascal to LLVM.
case CC_X86Pascal: return llvm::CallingConv::C;
// TODO: Add support for __vectorcall to LLVM.
case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
}
}
/// Derives the 'this' type for codegen purposes, i.e. ignoring method
/// qualification.
/// FIXME: address space qualification?
static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
}
/// Returns the canonical formal type of the given C++ method.
static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
return MD->getType()->getCanonicalTypeUnqualified()
.getAs<FunctionProtoType>();
}
/// Returns the "extra-canonicalized" return type, which discards
/// qualifiers on the return type. Codegen doesn't care about them,
/// and it makes ABI code a little easier to be able to assume that
/// all parameter and return types are top-level unqualified.
static CanQualType GetReturnType(QualType RetTy) {
return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
}
/// Arrange the argument and result information for a value of the given
/// unprototyped freestanding function type.
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
/*instanceMethod=*/false,
/*chainCall=*/false, None,
FTNP->getExtInfo(), RequiredArgs(0));
}
/// Arrange the LLVM function layout for a value of the given function
/// type, on top of any implicit parameters already stored.
static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
SmallVectorImpl<CanQualType> &prefix,
CanQual<FunctionProtoType> FTP) {
RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
prefix.append(FTP->param_type_begin(), FTP->param_type_end());
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
/*chainCall=*/false, prefix,
FTP->getExtInfo(), required);
}
/// Arrange the argument and result information for a value of the
/// given freestanding function type.
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
// Set the appropriate calling convention for the Function.
if (D->hasAttr<StdCallAttr>())
return CC_X86StdCall;
if (D->hasAttr<FastCallAttr>())
return CC_X86FastCall;
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
if (D->hasAttr<VectorCallAttr>())
return CC_X86VectorCall;
if (D->hasAttr<PascalAttr>())
return CC_X86Pascal;
if (PcsAttr *PCS = D->getAttr<PcsAttr>())
return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
if (D->hasAttr<IntelOclBiccAttr>())
return CC_IntelOclBicc;
if (D->hasAttr<MSABIAttr>())
return IsWindows ? CC_C : CC_X86_64Win64;
if (D->hasAttr<SysVABIAttr>())
return IsWindows ? CC_X86_64SysV : CC_C;
return CC_C;
}
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
/// (Zero value of RD means we don't have any meaningful "this" argument type,
/// so fall back to a generic pointer type).
/// The member function must be an ordinary function, i.e. not a
/// constructor or destructor.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
const FunctionProtoType *FTP) {
SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
if (RD)
argTypes.push_back(GetThisType(Context, RD));
else
argTypes.push_back(Context.VoidPtrTy);
return ::arrangeLLVMFunctionInfo(
*this, true, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
/// Arrange the argument and result information for a declaration or
/// definition of the given C++ non-static member function. The
/// member function must be an ordinary function, i.e. not a
/// constructor or destructor.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
CanQual<FunctionProtoType> prototype = GetFormalType(MD);
if (MD->isInstance()) {
// The abstract case is perfectly fine.
const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
}
return arrangeFreeFunctionType(prototype);
}
const CGFunctionInfo &
CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
StructorType Type) {
SmallVector<CanQualType, 16> argTypes;
argTypes.push_back(GetThisType(Context, MD->getParent()));
GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
GD = GlobalDecl(CD, toCXXCtorType(Type));
} else {
auto *DD = dyn_cast<CXXDestructorDecl>(MD);
GD = GlobalDecl(DD, toCXXDtorType(Type));
}
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
// Add the formal parameters.
argTypes.append(FTP->param_type_begin(), FTP->param_type_end());
TheCXXABI.buildStructorSignature(MD, Type, argTypes);
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
CanQualType resultType = TheCXXABI.HasThisReturn(GD)
? argTypes.front()
: TheCXXABI.hasMostDerivedReturn(GD)
? CGM.getContext().VoidPtrTy
: Context.VoidTy;
return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
/*chainCall=*/false, argTypes, extInfo,
required);
}
/// Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
const CXXConstructorDecl *D,
CXXCtorType CtorKind,
unsigned ExtraArgs) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> ArgTypes;
for (const auto &Arg : args)
ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
CanQual<FunctionProtoType> FPT = GetFormalType(D);
RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
GlobalDecl GD(D, CtorKind);
CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
? ArgTypes.front()
: TheCXXABI.hasMostDerivedReturn(GD)
? CGM.getContext().VoidPtrTy
: Context.VoidTy;
FunctionType::ExtInfo Info = FPT->getExtInfo();
return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
/*chainCall=*/false, ArgTypes, Info,
Required);
}
/// Arrange the argument and result information for the declaration or
/// definition of the given function.
const CGFunctionInfo &
CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance())
return arrangeCXXMethodDeclaration(MD);
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
assert(isa<FunctionType>(FTy));
// When declaring a function without a prototype, always use a
// non-variadic type.
if (isa<FunctionNoProtoType>(FTy)) {
CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
return arrangeLLVMFunctionInfo(
noProto->getReturnType(), /*instanceMethod=*/false,
/*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
}
assert(isa<FunctionProtoType>(FTy));
return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
}
/// Arrange the argument and result information for the declaration or
/// definition of an Objective-C method.
const CGFunctionInfo &
CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
// It happens that this is the same as a call with no optional
// arguments, except also using the formal 'self' type.
return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
}
/// Arrange the argument and result information for the function type
/// through which to perform a send to the given Objective-C method,
/// using the given receiver type. The receiver type is not always
/// the 'self' type of the method or even an Objective-C pointer type.
/// This is *not* the right method for actually performing such a
/// message send, due to the possibility of optional arguments.
const CGFunctionInfo &
CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
QualType receiverType) {
SmallVector<CanQualType, 16> argTys;
argTys.push_back(Context.getCanonicalParamType(receiverType));
argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
// FIXME: Kill copy?
for (const auto *I : MD->params()) {
argTys.push_back(Context.getCanonicalParamType(I->getType()));
}
FunctionType::ExtInfo einfo;
bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
if (getContext().getLangOpts().ObjCAutoRefCount &&
MD->hasAttr<NSReturnsRetainedAttr>())
einfo = einfo.withProducesResult(true);
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
return arrangeLLVMFunctionInfo(
GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
/*chainCall=*/false, argTys, einfo, required);
}
const CGFunctionInfo &
CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
// FIXME: Do we need to handle ObjCMethodDecl?
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
return arrangeFunctionDeclaration(FD);
}
/// Arrange a thunk that takes 'this' as the first parameter followed by
/// varargs. Return a void pointer, regardless of the actual return type.
/// The body of the thunk will end in a musttail call to a function of the
/// correct type, and the caller will bitcast the function to the correct
/// prototype.
const CGFunctionInfo &
CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
assert(MD->isVirtual() && "only virtual memptrs have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
/*chainCall=*/false, ArgTys,
FTP->getExtInfo(), RequiredArgs(1));
}
const CGFunctionInfo &
CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
CXXCtorType CT) {
assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
CanQual<FunctionProtoType> FTP = GetFormalType(CD);
SmallVector<CanQualType, 2> ArgTys;
const CXXRecordDecl *RD = CD->getParent();
ArgTys.push_back(GetThisType(Context, RD));
if (CT == Ctor_CopyingClosure)
ArgTys.push_back(*FTP->param_type_begin());
if (RD->getNumVBases() > 0)
ArgTys.push_back(Context.IntTy);
CallingConv CC = Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true);
return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
/*chainCall=*/false, ArgTys,
FunctionType::ExtInfo(CC), RequiredArgs::All);
}
/// Arrange a call as unto a free function, except possibly with an
/// additional number of formal parameters considered required.
static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
CodeGenModule &CGM,
const CallArgList &args,
const FunctionType *fnType,
unsigned numExtraRequiredArgs,
bool chainCall) {
assert(args.size() >= numExtraRequiredArgs);
// In most cases, there are no optional arguments.
RequiredArgs required = RequiredArgs::All;
// If we have a variadic prototype, the required arguments are the
// extra prefix plus the arguments in the prototype.
if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
if (proto->isVariadic())
required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
// If we don't have a prototype at all, but we're supposed to
// explicitly use the variadic convention for unprototyped calls,
// treat all of the arguments as required but preserve the nominal
// possibility of variadics.
} else if (CGM.getTargetCodeGenInfo()
.isNoProtoCallVariadic(args,
cast<FunctionNoProtoType>(fnType))) {
required = RequiredArgs(args.size());
}
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
for (const auto &arg : args)
argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
/*instanceMethod=*/false, chainCall,
argTypes, fnType->getExtInfo(), required);
}
/// Figure out the rules for calling a function with the given formal
/// type using the given arguments. The arguments are necessary
/// because the function might be unprototyped, in which case it's
/// target-dependent in crazy ways.
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
const FunctionType *fnType,
bool chainCall) {
return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
chainCall ? 1 : 0, chainCall);
}
/// A block function call is essentially a free-function call with an
/// extra implicit argument.
const CGFunctionInfo &
CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
const FunctionType *fnType) {
return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
/*chainCall=*/false);
}
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
const CallArgList &args,
FunctionType::ExtInfo info,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
for (const auto &Arg : args)
argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
return arrangeLLVMFunctionInfo(
GetReturnType(resultType), /*instanceMethod=*/false,
/*chainCall=*/false, argTypes, info, required);
}
/// Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
const FunctionProtoType *FPT,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
for (const auto &Arg : args)
argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
FunctionType::ExtInfo info = FPT->getExtInfo();
return arrangeLLVMFunctionInfo(
GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
/*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
QualType resultType, const FunctionArgList &args,
const FunctionType::ExtInfo &info, bool isVariadic) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
for (auto Arg : args)
argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
RequiredArgs required =
(isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
return arrangeLLVMFunctionInfo(
GetReturnType(resultType), /*instanceMethod=*/false,
/*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
return arrangeLLVMFunctionInfo(
getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
None, FunctionType::ExtInfo(), RequiredArgs::All);
}
/// Arrange the argument and result information for an abstract value
/// of a given function type. This is the method which all of the
/// above functions ultimately defer to.
const CGFunctionInfo &
CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
bool instanceMethod,
bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs required) {
// HLSL Change Starts
ASTContext &context = getContext();
auto isCanonicalAsParam = [&context](const CanQualType &Ty) {
return Ty.isCanonicalAsParam() ||
(context.getLangOpts().HLSL && Ty->isArrayType());
};
// HLSL Change Ends
assert(std::all_of(argTypes.begin(), argTypes.end(),
isCanonicalAsParam)); // HLSL Change - skip array when
// check isCanonicalAsParam
(void)isCanonicalAsParam;
unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
resultType, argTypes);
void *insertPos = nullptr;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
if (FI)
return *FI;
// Construct the function info. We co-allocate the ArgInfos.
FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
resultType, argTypes, required);
FunctionInfos.InsertNode(FI, insertPos);
bool inserted = FunctionsBeingProcessed.insert(FI).second;
(void)inserted;
assert(inserted && "Recursively being processed?");
// Compute ABI information.
getABIInfo().computeInfo(*FI);
// Loop over all of the computed argument and return value info. If any of
// them are direct or extend without a specified coerce type, specify the
// default now.
ABIArgInfo &retInfo = FI->getReturnInfo();
if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
for (auto &I : FI->arguments())
if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
I.info.setCoerceToType(ConvertType(I.type));
bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
assert(erased && "Not in set?");
return *FI;
}
CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
bool instanceMethod,
bool chainCall,
const FunctionType::ExtInfo &info,
CanQualType resultType,
ArrayRef<CanQualType> argTypes,
RequiredArgs required) {
void *buffer = operator new(sizeof(CGFunctionInfo) +
sizeof(ArgInfo) * (argTypes.size() + 1));
CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
FI->CallingConvention = llvmCC;
FI->EffectiveCallingConvention = llvmCC;
FI->ASTCallingConvention = info.getCC();
FI->InstanceMethod = instanceMethod;
FI->ChainCall = chainCall;
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
FI->Required = required;
FI->HasRegParm = info.getHasRegParm();
FI->RegParm = info.getRegParm();
FI->ArgStruct = nullptr;
FI->NumArgs = argTypes.size();
FI->getArgsBuffer()[0].type = resultType;
for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
FI->getArgsBuffer()[i + 1].type = argTypes[i];
return FI;
}
/***/
namespace {
// ABIArgInfo::Expand implementation.
// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
struct TypeExpansion {
enum TypeExpansionKind {
// Elements of constant arrays are expanded recursively.
TEK_ConstantArray,
// Record fields are expanded recursively (but if record is a union, only
// the field with the largest size is expanded).
TEK_Record,
// For complex types, real and imaginary parts are expanded recursively.
TEK_Complex,
// All other types are not expandable.
TEK_None
};
const TypeExpansionKind Kind;
TypeExpansion(TypeExpansionKind K) : Kind(K) {}
virtual ~TypeExpansion() {}
};
struct ConstantArrayExpansion : TypeExpansion {
QualType EltTy;
uint64_t NumElts;
ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
: TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
static bool classof(const TypeExpansion *TE) {
return TE->Kind == TEK_ConstantArray;
}
};
struct RecordExpansion : TypeExpansion {
SmallVector<const CXXBaseSpecifier *, 1> Bases;
SmallVector<const FieldDecl *, 1> Fields;
RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
SmallVector<const FieldDecl *, 1> &&Fields)
: TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
static bool classof(const TypeExpansion *TE) {
return TE->Kind == TEK_Record;
}
};
struct ComplexExpansion : TypeExpansion {
QualType EltTy;
ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
static bool classof(const TypeExpansion *TE) {
return TE->Kind == TEK_Complex;
}
};
struct NoExpansion : TypeExpansion {
NoExpansion() : TypeExpansion(TEK_None) {}
static bool classof(const TypeExpansion *TE) {
return TE->Kind == TEK_None;
}
};
} // namespace
static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty, const ASTContext &Context) {
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
return llvm::make_unique<ConstantArrayExpansion>(
AT->getElementType(), AT->getSize().getZExtValue());
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
SmallVector<const CXXBaseSpecifier *, 1> Bases;
SmallVector<const FieldDecl *, 1> Fields;
const RecordDecl *RD = RT->getDecl();
assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
if (RD->isUnion()) {
// Unions can be here only in degenerative cases - all the fields are same
// after flattening. Thus we have to use the "largest" field.
const FieldDecl *LargestFD = nullptr;
CharUnits UnionSize = CharUnits::Zero();
for (const auto *FD : RD->fields()) {
// Skip zero length bitfields.
if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
continue;
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
if (UnionSize < FieldSize) {
UnionSize = FieldSize;
LargestFD = FD;
}
}
if (LargestFD)
Fields.push_back(LargestFD);
} else {
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
assert(!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes");
for (const CXXBaseSpecifier &BS : CXXRD->bases())
Bases.push_back(&BS);
}
for (const auto *FD : RD->fields()) {
// Skip zero length bitfields.
if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
continue;
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
Fields.push_back(FD);
}
}
return llvm::make_unique<RecordExpansion>(std::move(Bases),
std::move(Fields));
}
if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
return llvm::make_unique<ComplexExpansion>(CT->getElementType());
}
return llvm::make_unique<NoExpansion>();
}
static int getExpansionSize(QualType Ty, const ASTContext &Context) {
auto Exp = getTypeExpansion(Ty, Context);
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
}
if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
int Res = 0;
for (auto BS : RExp->Bases)
Res += getExpansionSize(BS->getType(), Context);
for (auto FD : RExp->Fields)
Res += getExpansionSize(FD->getType(), Context);
return Res;
}
if (isa<ComplexExpansion>(Exp.get()))
return 2;
assert(isa<NoExpansion>(Exp.get()));
return 1;
}
void
CodeGenTypes::getExpandedTypes(QualType Ty,
SmallVectorImpl<llvm::Type *>::iterator &TI) {
auto Exp = getTypeExpansion(Ty, Context);
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
for (int i = 0, n = CAExp->NumElts; i < n; i++) {
getExpandedTypes(CAExp->EltTy, TI);
}
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
for (auto BS : RExp->Bases)
getExpandedTypes(BS->getType(), TI);
for (auto FD : RExp->Fields)
getExpandedTypes(FD->getType(), TI);
} else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
llvm::Type *EltTy = ConvertType(CExp->EltTy);
*TI++ = EltTy;
*TI++ = EltTy;
} else {
assert(isa<NoExpansion>(Exp.get()));
*TI++ = ConvertType(Ty);
}
}
void CodeGenFunction::ExpandTypeFromArgs(
QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
assert(LV.isSimple() &&
"Unexpected non-simple lvalue during struct expansion.");
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
for (int i = 0, n = CAExp->NumElts; i < n; i++) {
llvm::Value *EltAddr =
Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i);
LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
}
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
llvm::Value *This = LV.getAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
llvm::Value *Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
LValue SubLV = MakeAddrLValue(Base, BS->getType());
// Recurse onto bases.
ExpandTypeFromArgs(BS->getType(), SubLV, AI);
}
for (auto FD : RExp->Fields) {
// FIXME: What are the right qualifiers here?
LValue SubLV = EmitLValueForField(LV, FD);
ExpandTypeFromArgs(FD->getType(), SubLV, AI);
}
} else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
llvm::Value *RealAddr =
Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real");
EmitStoreThroughLValue(RValue::get(*AI++),
MakeAddrLValue(RealAddr, CExp->EltTy));
llvm::Value *ImagAddr =
Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag");
EmitStoreThroughLValue(RValue::get(*AI++),
MakeAddrLValue(ImagAddr, CExp->EltTy));
} else {
assert(isa<NoExpansion>(Exp.get()));
EmitStoreThroughLValue(RValue::get(*AI++), LV);
}
}
void CodeGenFunction::ExpandTypeToArgs(
QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
llvm::Value *Addr = RV.getAggregateAddr();
for (int i = 0, n = CAExp->NumElts; i < n; i++) {
llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i);
RValue EltRV =
convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
}
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
llvm::Value *This = RV.getAggregateAddr();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
llvm::Value *Base =
GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
/*NullCheckValue=*/false, SourceLocation());
RValue BaseRV = RValue::getAggregate(Base);
// Recurse onto bases.
ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
IRCallArgPos);
}
LValue LV = MakeAddrLValue(This, Ty);
for (auto FD : RExp->Fields) {
RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
IRCallArgPos);
}
} else if (isa<ComplexExpansion>(Exp.get())) {
ComplexPairTy CV = RV.getComplexVal();
IRCallArgs[IRCallArgPos++] = CV.first;
IRCallArgs[IRCallArgPos++] = CV.second;
} else {
assert(isa<NoExpansion>(Exp.get()));
assert(RV.isScalar() &&
"Unexpected non-scalar rvalue during struct expansion.");
// Insert a bitcast as needed.
llvm::Value *V = RV.getScalarVal();
if (IRCallArgPos < IRFuncTy->getNumParams() &&
V->getType() != IRFuncTy->getParamType(IRCallArgPos))
V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
IRCallArgs[IRCallArgPos++] = V;
}
}
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
/// accessing some number of bytes out of it, try to gep into the struct to get
/// at its inner goodness. Dive as deep as possible without entering an element
/// with an in-memory size smaller than DstSize.
static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
llvm::StructType *SrcSTy,
uint64_t DstSize, CodeGenFunction &CGF) {
// We can't dive into a zero-element struct.
if (SrcSTy->getNumElements() == 0) return SrcPtr;
llvm::Type *FirstElt = SrcSTy->getElementType(0);
// If the first elt is at least as large as what we're looking for, or if the
// first element is the same size as the whole struct, we can enter it. The
// comparison must be made on the store size and not the alloca size. Using
// the alloca size may overstate the size of the load.
uint64_t FirstEltSize =
CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
if (FirstEltSize < DstSize &&
FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
return SrcPtr;
// GEP into the first element.
SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive");
// If the first element is a struct, recurse.
llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
return SrcPtr;
}
/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
/// are either integers or pointers. This does a truncation of the value if it
/// is too large or a zero extension if it is too small.
///
/// This behaves as if the value were coerced through memory, so on big-endian
/// targets the high bits are preserved in a truncation, while little-endian
/// targets preserve the low bits.
static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
llvm::Type *Ty,
CodeGenFunction &CGF) {
if (Val->getType() == Ty)
return Val;
if (isa<llvm::PointerType>(Val->getType())) {
// If this is Pointer->Pointer avoid conversion to and from int.
if (isa<llvm::PointerType>(Ty))
return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
// Convert the pointer to an integer so we can play with its width.
Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
}
llvm::Type *DestIntTy = Ty;
if (isa<llvm::PointerType>(DestIntTy))
DestIntTy = CGF.IntPtrTy;
if (Val->getType() != DestIntTy) {
const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
if (DL.isBigEndian()) {
// Preserve the high bits on big-endian targets.
// That is what memory coercion does.
uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
if (SrcSize > DstSize) {
Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
} else {
Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
}
} else {
// Little-endian targets preserve the low bits. No shifts required.
Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
}
}
if (isa<llvm::PointerType>(Ty))
Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
return Val;
}
/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
/// a pointer to an object of type \arg Ty, known to be aligned to
/// \arg SrcAlign bytes.
///
/// This safely handles the case when the src type is smaller than the
/// destination type; in this situation the values of bits which not
/// present in the src are undefined.
static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
llvm::Type *Ty, CharUnits SrcAlign,
CodeGenFunction &CGF) {
llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
// If SrcTy and Ty are the same, just do a load.
if (SrcTy == Ty)
return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
(isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
llvm::LoadInst *Load =
CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
}
// If load is legal, just bitcast the src pointer.
if (SrcSize >= DstSize) {
// Generally SrcSize is never greater than DstSize, since this means we are
// losing bits. However, this can happen in cases where the structure has
// additional padding, for example due to a user specified alignment.
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
llvm::Value *Casted =
CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity());
}
// Otherwise do coercion through memory. This is stupid, but
// simple.
llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty);
Tmp->setAlignment(SrcAlign.getQuantity());
llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
CGF.Builder.CreateMemCpy(Casted, SrcCasted,
llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
SrcAlign.getQuantity(), false);
return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity());
}
// Function to store a first-class aggregate into memory. We prefer to
// store the elements rather than the aggregate to be more friendly to
// fast-isel.
// FIXME: Do we need to recurse here?
static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
llvm::Value *DestPtr, bool DestIsVolatile,
CharUnits DestAlign) {
// Prefer scalar stores to first-class aggregate stores.
if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Val->getType())) {
// HLSL Change Begins
assert(!CGF.getLangOpts().HLSL &&
"HLSL uses SRet so this should not be possible to reach.");
// HLSL Change Ends
const llvm::StructLayout *Layout =
CGF.CGM.getDataLayout().getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i);
llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
uint64_t EltOffset = Layout->getElementOffset(i);
CharUnits EltAlign =
DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset));
CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(),
DestIsVolatile);
}
} else {
CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(),
DestIsVolatile);
}
}
/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
/// where the source and destination may have different types. The
/// destination is known to be aligned to \arg DstAlign bytes.
///
/// This safely handles the case when the src type is larger than the
/// destination type; the upper bits of the src will be lost.
static void CreateCoercedStore(llvm::Value *Src,
llvm::Value *DstPtr,
bool DstIsVolatile,
CharUnits DstAlign,
CodeGenFunction &CGF) {
llvm::Type *SrcTy = Src->getType();
llvm::Type *DstTy =
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
if (SrcTy == DstTy) {
CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
DstIsVolatile);
return;
}
uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
}
// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
(isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
DstIsVolatile);
return;
}
uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
// If store is legal, just bitcast the src pointer.
if (SrcSize <= DstSize) {
llvm::Value *Casted =
CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
// Generally SrcSize is never greater than DstSize, since this means we are
// losing bits. However, this can happen in cases where the structure has
// additional padding, for example due to a user specified alignment.
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy);
Tmp->setAlignment(DstAlign.getQuantity());
CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity());
llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
CGF.Builder.CreateMemCpy(DstCasted, Casted,
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
DstAlign.getQuantity(), false);
}
}
namespace {
/// Encapsulates information about the way function arguments from
/// CGFunctionInfo should be passed to actual LLVM IR function.
class ClangToLLVMArgMapping {
static const unsigned InvalidIndex = ~0U;
unsigned InallocaArgNo;
unsigned SRetArgNo;
unsigned TotalIRArgs;
/// Arguments of LLVM IR function corresponding to single Clang argument.
struct IRArgs {
unsigned PaddingArgIndex;
// Argument is expanded to IR arguments at positions
// [FirstArgIndex, FirstArgIndex + NumberOfArgs).
unsigned FirstArgIndex;
unsigned NumberOfArgs;
IRArgs()
: PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
NumberOfArgs(0) {}
};
SmallVector<IRArgs, 8> ArgInfo;
public:
ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
bool OnlyRequiredArgs = false)
: InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
construct(Context, FI, OnlyRequiredArgs);
}
bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
unsigned getInallocaArgNo() const {
assert(hasInallocaArg());
return InallocaArgNo;
}
bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
unsigned getSRetArgNo() const {
assert(hasSRetArg());
return SRetArgNo;
}
unsigned totalIRArgs() const { return TotalIRArgs; }
bool hasPaddingArg(unsigned ArgNo) const {
assert(ArgNo < ArgInfo.size());
return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
}
unsigned getPaddingArgNo(unsigned ArgNo) const {
assert(hasPaddingArg(ArgNo));
return ArgInfo[ArgNo].PaddingArgIndex;
}
/// Returns index of first IR argument corresponding to ArgNo, and their
/// quantity.
std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
assert(ArgNo < ArgInfo.size());
return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
ArgInfo[ArgNo].NumberOfArgs);
}
private:
void construct(const ASTContext &Context, const CGFunctionInfo &FI,
bool OnlyRequiredArgs);
};
void ClangToLLVMArgMapping::construct(const ASTContext &Context,
const CGFunctionInfo &FI,
bool OnlyRequiredArgs) {
unsigned IRArgNo = 0;
bool SwapThisWithSRet = false;
const ABIArgInfo &RetAI = FI.getReturnInfo();
if (RetAI.getKind() == ABIArgInfo::Indirect) {
SwapThisWithSRet = RetAI.isSRetAfterThis();
SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
}
unsigned ArgNo = 0;
unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
++I, ++ArgNo) {
assert(I != FI.arg_end());
QualType ArgType = I->type;
const ABIArgInfo &AI = I->info;
// Collect data about IR arguments corresponding to Clang argument ArgNo.
auto &IRArgs = ArgInfo[ArgNo];
if (AI.getPaddingType())
IRArgs.PaddingArgIndex = IRArgNo++;
switch (AI.getKind()) {
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// FIXME: handle sseregparm someday...
llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
IRArgs.NumberOfArgs = STy->getNumElements();
} else {
IRArgs.NumberOfArgs = 1;
}
break;
}
case ABIArgInfo::Indirect:
IRArgs.NumberOfArgs = 1;
break;
case ABIArgInfo::Ignore:
case ABIArgInfo::InAlloca:
// ignore and inalloca doesn't have matching LLVM parameters.
IRArgs.NumberOfArgs = 0;
break;
case ABIArgInfo::Expand: {
IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
break;
}
}
if (IRArgs.NumberOfArgs > 0) {
IRArgs.FirstArgIndex = IRArgNo;
IRArgNo += IRArgs.NumberOfArgs;
}
// Skip over the sret parameter when it comes second. We already handled it
// above.
if (IRArgNo == 1 && SwapThisWithSRet)
IRArgNo++;
}
assert(ArgNo == ArgInfo.size());
if (FI.usesInAlloca())
InallocaArgNo = IRArgNo++;
TotalIRArgs = IRArgNo;
}
} // namespace
/***/
bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
return FI.getReturnInfo().isIndirect();
}
bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
return ReturnTypeUsesSRet(FI) &&
getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
}
bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
switch (BT->getKind()) {
default:
return false;
case BuiltinType::Float:
return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
case BuiltinType::Double:
return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
case BuiltinType::LongDouble:
return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
}
}
return false;
}
bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::LongDouble)
return getTarget().useObjCFP2RetForComplexLongDouble();
}
}
return false;
}
llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
return GetFunctionType(FI);
}
llvm::FunctionType *
CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
(void)Inserted;
assert(Inserted && "Recursively being processed?");
llvm::Type *resultType = nullptr;
const ABIArgInfo &retAI = FI.getReturnInfo();
switch (retAI.getKind()) {
case ABIArgInfo::Expand:
llvm_unreachable("Invalid ABI kind for return argument");
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
resultType = retAI.getCoerceToType();
break;
case ABIArgInfo::InAlloca:
if (retAI.getInAllocaSRet()) {
// sret things on win32 aren't void, they return the sret pointer.
QualType ret = FI.getReturnType();
llvm::Type *ty = ConvertType(ret);
unsigned addressSpace = Context.getTargetAddressSpace(ret);
resultType = llvm::PointerType::get(ty, addressSpace);
} else {
resultType = llvm::Type::getVoidTy(getLLVMContext());
}
break;
case ABIArgInfo::Indirect: {
assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
resultType = llvm::Type::getVoidTy(getLLVMContext());
break;
}
case ABIArgInfo::Ignore:
resultType = llvm::Type::getVoidTy(getLLVMContext());
break;
}
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
// Add type for sret argument.
if (IRFunctionArgs.hasSRetArg()) {
QualType Ret = FI.getReturnType();
llvm::Type *Ty = ConvertType(Ret);
unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
ArgTypes[IRFunctionArgs.getSRetArgNo()] =
llvm::PointerType::get(Ty, AddressSpace);
}
// Add type for inalloca argument.
if (IRFunctionArgs.hasInallocaArg()) {
auto ArgStruct = FI.getArgStruct();
assert(ArgStruct);
ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
}
// Add in all of the required arguments.
unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = it + FI.getNumRequiredArgs();
for (; it != ie; ++it, ++ArgNo) {
const ABIArgInfo &ArgInfo = it->info;
// Insert a padding type to ensure proper alignment.
if (IRFunctionArgs.hasPaddingArg(ArgNo))
ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
ArgInfo.getPaddingType();
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgInfo.getKind()) {
case ABIArgInfo::Ignore:
case ABIArgInfo::InAlloca:
assert(NumIRArgs == 0);
break;
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
// indirect arguments are always on the stack, which is addr space #0.
llvm::Type *LTy = ConvertTypeForMem(it->type);
ArgTypes[FirstIRArg] = LTy->getPointerTo();
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::Type *argType = ArgInfo.getCoerceToType();
llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
assert(NumIRArgs == st->getNumElements());
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
ArgTypes[FirstIRArg + i] = st->getElementType(i);
} else {
assert(NumIRArgs == 1);
ArgTypes[FirstIRArg] = argType;
}
break;
}
case ABIArgInfo::Expand:
auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
getExpandedTypes(it->type, ArgTypesIter);
assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
break;
}
}
bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
assert(Erased && "Not in set?");
return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
}
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
if (!isFuncTypeConvertible(FPT))
return llvm::StructType::get(getLLVMContext());
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
Info =
&arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
else
Info = &arrangeCXXMethodDeclaration(MD);
return GetFunctionType(*Info);
}
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
AttributeListType &PAL,
unsigned &CallingConv,
bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
bool HasOptnone = false;
CallingConv = FI.getEffectiveCallingConvention();
if (FI.isNoReturn())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
if (TargetDecl->hasAttr<NoThrowAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
if (TargetDecl->hasAttr<NoReturnAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
if (TargetDecl->hasAttr<NoDuplicateAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
if (FPT && FPT->isNothrow(getContext()))
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
// Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
// These attributes are not inherited by overloads.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
}
// 'const' and 'pure' attribute functions are also nounwind.
if (TargetDecl->hasAttr<ConstAttr>()) {
FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
} else if (TargetDecl->hasAttr<PureAttr>()) {
FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
if (TargetDecl->hasAttr<RestrictAttr>())
RetAttrs.addAttribute(llvm::Attribute::NoAlias);
if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
RetAttrs.addAttribute(llvm::Attribute::NonNull);
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
}
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
if (!HasOptnone) {
if (CodeGenOpts.OptimizeSize)
FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
if (CodeGenOpts.OptimizeSize == 2)
FuncAttrs.addAttribute(llvm::Attribute::MinSize);
}
if (CodeGenOpts.DisableRedZone)
FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
if (CodeGenOpts.NoImplicitFloat)
FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
if (CodeGenOpts.EnableSegmentedStacks &&
!(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
FuncAttrs.addAttribute("split-stack");
if (AttrOnCallSite) {
// Attributes that should go on the call site only.
if (!CodeGenOpts.SimplifyLibCalls)
FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
if (!CodeGenOpts.TrapFuncName.empty())
FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
} else if (!getLangOpts().HLSL) {
// Attributes that should go on the function, but not the call site.
if (!CodeGenOpts.DisableFPElim) {
FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
} else if (CodeGenOpts.OmitLeafFramePointer) {
FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
} else {
FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
}
FuncAttrs.addAttribute("disable-tail-calls",
llvm::toStringRef(CodeGenOpts.DisableTailCalls));
FuncAttrs.addAttribute("less-precise-fpmad",
llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
FuncAttrs.addAttribute("no-infs-fp-math",
llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
FuncAttrs.addAttribute("no-nans-fp-math",
llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
FuncAttrs.addAttribute("unsafe-fp-math",
llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
FuncAttrs.addAttribute("use-soft-float",
llvm::toStringRef(CodeGenOpts.SoftFloat));
FuncAttrs.addAttribute("stack-protector-buffer-size",
llvm::utostr(CodeGenOpts.SSPBufferSize));
if (!CodeGenOpts.StackRealignment)
FuncAttrs.addAttribute("no-realign-stack");
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
StringRef TargetCPU = getTarget().getTargetOpts().CPU;
// TODO: Features gets us the features on the command line including
// feature dependencies. For canonicalization purposes we might want to
// avoid putting features in the target-features set if we know it'll be
// one of the default features in the backend, e.g. corei7-avx and +avx or
// figure out non-explicit dependencies.
// Canonicalize the existing features in a new feature map.
// TODO: Migrate the existing backends to keep the map around rather than
// the vector.
llvm::StringMap<bool> FeatureMap;
for (auto F : getTarget().getTargetOpts().Features) {
const char *Name = F.c_str();
bool Enabled = Name[0] == '+';
getTarget().setFeatureEnabled(FeatureMap, Name + 1, Enabled);
}
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
if (FD) {
if (const auto *TD = FD->getAttr<TargetAttr>()) {
StringRef FeaturesStr = TD->getFeatures();
SmallVector<StringRef, 1> AttrFeatures;
FeaturesStr.split(AttrFeatures, ",");
// Grab the various features and prepend a "+" to turn on the feature to
// the backend and add them to our existing set of features.
for (auto &Feature : AttrFeatures) {
// Go ahead and trim whitespace rather than either erroring or
// accepting it weirdly.
Feature = Feature.trim();
// While we're here iterating check for a different target cpu.
if (Feature.startswith("arch="))
TargetCPU = Feature.split("=").second.trim();
else if (Feature.startswith("tune="))
// We don't support cpu tuning this way currently.
;
else if (Feature.startswith("fpmath="))
// TODO: Support the fpmath option this way. It will require checking
// overall feature validity for the function with the rest of the
// attributes on the function.
;
else if (Feature.startswith("mno-"))
getTarget().setFeatureEnabled(FeatureMap, Feature.split("-").second,
false);
else
getTarget().setFeatureEnabled(FeatureMap, Feature, true);
}
}
}
// Produce the canonical string for this set of features.
std::vector<std::string> Features;
for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
ie = FeatureMap.end();
it != ie; ++it)
Features.push_back((it->second ? "+" : "-") + it->first().str());
// Now add the target-cpu and target-features to the function.
if (TargetCPU != "")
FuncAttrs.addAttribute("target-cpu", TargetCPU);
if (!Features.empty()) {
std::sort(Features.begin(), Features.end());
FuncAttrs.addAttribute("target-features",
llvm::join(Features.begin(), Features.end(), ","));
}
}
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
QualType RetTy = FI.getReturnType();
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
if (RetTy->hasSignedIntegerRepresentation())
RetAttrs.addAttribute(llvm::Attribute::SExt);
else if (RetTy->hasUnsignedIntegerRepresentation())
RetAttrs.addAttribute(llvm::Attribute::ZExt);
LLVM_FALLTHROUGH; // HLSL Change
case ABIArgInfo::Direct:
if (RetAI.getInReg())
RetAttrs.addAttribute(llvm::Attribute::InReg);
break;
case ABIArgInfo::Ignore:
break;
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
// inalloca and sret disable readnone and readonly
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
break;
}
case ABIArgInfo::Expand:
llvm_unreachable("Invalid ABI kind for return argument");
}
if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
.getQuantity());
else if (getContext().getTargetAddressSpace(PTy) == 0)
RetAttrs.addAttribute(llvm::Attribute::NonNull);
}
// Attach return attributes.
if (RetAttrs.hasAttributes()) {
PAL.push_back(llvm::AttributeSet::get(
getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
}
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
llvm::AttrBuilder SRETAttrs;
SRETAttrs.addAttribute(llvm::Attribute::StructRet);
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
PAL.push_back(llvm::AttributeSet::get(
getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
}
// Attach attributes to inalloca argument.
if (IRFunctionArgs.hasInallocaArg()) {
llvm::AttrBuilder Attrs;
Attrs.addAttribute(llvm::Attribute::InAlloca);
PAL.push_back(llvm::AttributeSet::get(
getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
}
unsigned ArgNo = 0;
for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
E = FI.arg_end();
I != E; ++I, ++ArgNo) {
QualType ParamType = I->type;
const ABIArgInfo &AI = I->info;
llvm::AttrBuilder Attrs;
// Add attribute for padding argument, if necessary.
if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
if (AI.getPaddingInReg())
PAL.push_back(llvm::AttributeSet::get(
getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
llvm::Attribute::InReg));
}
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
// sense to do it here because parameters are so messed up.
switch (AI.getKind()) {
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerOrEnumerationType())
Attrs.addAttribute(llvm::Attribute::SExt);
else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
Attrs.addAttribute(llvm::Attribute::SExt);
else
Attrs.addAttribute(llvm::Attribute::ZExt);
}
LLVM_FALLTHROUGH; // HLSL Change
case ABIArgInfo::Direct:
if (ArgNo == 0 && FI.isChainCall())
Attrs.addAttribute(llvm::Attribute::Nest);
else if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
break;
case ABIArgInfo::Indirect:
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
if (AI.getIndirectByVal())
Attrs.addAttribute(llvm::Attribute::ByVal);
Attrs.addAlignmentAttr(AI.getIndirectAlign());
// byval disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
break;
case ABIArgInfo::Ignore:
case ABIArgInfo::Expand:
continue;
case ABIArgInfo::InAlloca:
// inalloca disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
continue;
}
if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
.getQuantity());
else if (getContext().getTargetAddressSpace(PTy) == 0)
Attrs.addAttribute(llvm::Attribute::NonNull);
}
if (Attrs.hasAttributes()) {
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
for (unsigned i = 0; i < NumIRArgs; i++)
PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
FirstIRArg + i + 1, Attrs));
}
}
assert(ArgNo == FI.arg_size());
if (FuncAttrs.hasAttributes())
PAL.push_back(llvm::
AttributeSet::get(getLLVMContext(),
llvm::AttributeSet::FunctionIndex,
FuncAttrs));
}
/// An argument came in as a promoted argument; demote it back to its
/// declared type.
static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
const VarDecl *var,
llvm::Value *value) {
llvm::Type *varType = CGF.ConvertType(var->getType());
// This can happen with promotions that actually don't change the
// underlying type, like the enum promotions.
if (value->getType() == varType) return value;
assert((varType->isIntegerTy() || varType->isFloatingPointTy())
&& "unexpected promotion type");
if (isa<llvm::IntegerType>(varType))
return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
}
/// Returns the attribute (either parameter attribute, or function
/// attribute), which declares argument ArgNo to be non-null.
static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
QualType ArgType, unsigned ArgNo) {
// FIXME: __attribute__((nonnull)) can also be applied to:
// - references to pointers, where the pointee is known to be
// nonnull (apparently a Clang extension)
// - transparent unions containing pointers
// In the former case, LLVM IR cannot represent the constraint. In
// the latter case, we have no guarantee that the transparent union
// is in fact passed as a pointer.
if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
return nullptr;
// First, check attribute on parameter itself.
if (PVD) {
if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
return ParmNNAttr;
}
// Check function attributes.
if (!FD)
return nullptr;
for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
if (NNAttr->isNonNull(ArgNo))
return NNAttr;
}
return nullptr;
}
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args) {
if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
// Naked functions don't have prologues.
return;
// If this is an implicit-return-zero function, go ahead and
// initialize the return value. TODO: it might be nice to have
// a more general mechanism for this that didn't require synthesized
// return statements.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
if (FD->hasImplicitReturnZero()) {
QualType RetTy = FD->getReturnType().getUnqualifiedType();
llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
Builder.CreateStore(Zero, ReturnValue);
}
}
// FIXME: We no longer need the types from FunctionArgList; lift up and
// simplify.
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
// Flattened function arguments.
SmallVector<llvm::Argument *, 16> FnArgs;
FnArgs.reserve(IRFunctionArgs.totalIRArgs());
for (auto &Arg : Fn->args()) {
FnArgs.push_back(&Arg);
}
assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
llvm::Value *ArgStruct = nullptr;
if (IRFunctionArgs.hasInallocaArg()) {
ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
}
// Name the struct return parameter.
if (IRFunctionArgs.hasSRetArg()) {
auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
AI->setName("agg.result");
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
llvm::Attribute::NoAlias));
}
// Track if we received the parameter as a pointer (indirect, byval, or
// inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
// into a local alloca for us.
enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
SmallVector<ValueAndIsPtr, 16> ArgVals;
ArgVals.reserve(Args.size());
// Create a pointer value for every parameter declaration. This usually
// entails copying one or more LLVM IR arguments into an alloca. Don't push
// any cleanups or do anything that might unwind. We do that separately, so
// we can push the cleanups in the correct order for the ABI.
assert(FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.");
unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i, ++info_it, ++ArgNo) {
const VarDecl *Arg = *i;
QualType Ty = info_it->type;
const ABIArgInfo &ArgI = info_it->info;
bool isPromoted = !getLangOpts().HLSL && // HLSL Change - no knr promotion in HLSL
isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgI.getKind()) {
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
llvm::Value *V =
Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct,
ArgI.getInAllocaFieldIndex(), Arg->getName());
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
llvm::Value *V = FnArgs[FirstIRArg];
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
// need to do is realign the value, if requested
if (ArgI.getIndirectRealign()) {
llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
// Copy from the incoming argument pointer to the temporary with the
// appropriate alignment.
//
// FIXME: We should have a common utility for generating an aggregate
// copy.
llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
CharUnits Size = getContext().getTypeSizeInChars(Ty);
llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
Builder.CreateMemCpy(Dst,
Src,
llvm::ConstantInt::get(IntPtrTy,
Size.getQuantity()),
ArgI.getIndirectAlign(),
false);
V = AlignedTemp;
}
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
} else {
// Load scalar value from indirect argument.
V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty,
Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
}
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// HLSL Change Begins
if (hlsl::IsHLSLMatType(Ty)) {
assert(NumIRArgs == 1);
auto AI = FnArgs[FirstIRArg];
llvm::Value *V = AI;
ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
break;
}
// HLSL Change Ends
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
ArgI.getCoerceToType() == ConvertType(Ty) &&
ArgI.getDirectOffset() == 0) {
assert(NumIRArgs == 1);
auto AI = FnArgs[FirstIRArg];
llvm::Value *V = AI;
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
PVD->getFunctionScopeIndex()))
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
QualType OTy = PVD->getOriginalType();
if (const auto *ArrTy =
getContext().getAsConstantArrayType(OTy)) {
// A C99 array parameter declaration with the static keyword also
// indicates dereferenceability, and if the size is constant we can
// use the dereferenceable attribute (which requires the size in
// bytes).
if (ArrTy->getSizeModifier() == ArrayType::Static) {
QualType ETy = ArrTy->getElementType();
uint64_t ArrSize = ArrTy->getSize().getZExtValue();
if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
ArrSize) {
llvm::AttrBuilder Attrs;
Attrs.addDereferenceableAttr(
getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1, Attrs));
} else if (getContext().getTargetAddressSpace(ETy) == 0) {
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
}
}
} else if (const auto *ArrTy =
getContext().getAsVariableArrayType(OTy)) {
// For C99 VLAs with the static keyword, we don't know the size so
// we can't use the dereferenceable attribute, but in addrspace(0)
// we know that it must be nonnull.
if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
!getContext().getTargetAddressSpace(ArrTy->getElementType()))
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
}
const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
if (!AVAttr)
if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
if (AVAttr) {
llvm::Value *AlignmentValue =
EmitScalarExpr(AVAttr->getAlignment());
llvm::ConstantInt *AlignmentCI =
cast<llvm::ConstantInt>(AlignmentValue);
unsigned Alignment =
std::min((unsigned) AlignmentCI->getZExtValue(),
+llvm::Value::MaximumAlignment);
llvm::AttrBuilder Attrs;
Attrs.addAlignmentAttr(Alignment);
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1, Attrs));
}
}
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1,
llvm::Attribute::NoAlias));
// Ensure the argument is the correct type.
if (V->getType() != ArgI.getCoerceToType())
V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
if (const CXXMethodDecl *MD =
dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
if (MD->isVirtual() && Arg == CXXABIThisDecl)
V = CGM.getCXXABI().
adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
}
// Because of merging of function types from multiple decls it is
// possible for the type of an argument to not match the corresponding
// type in the function type. Since we are codegening the callee
// in here, add a cast to the argument type.
llvm::Type *LTy = ConvertType(Arg->getType());
if (V->getType() != LTy)
V = Builder.CreateBitCast(V, LTy);
ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
break;
}
llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
// The alignment we need to use is the max of the requested alignment for
// the argument plus the alignment required by our access code below.
unsigned AlignmentToUse =
CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
AlignmentToUse = std::max(AlignmentToUse,
(unsigned)getContext().getDeclAlign(Arg).getQuantity());
Alloca->setAlignment(AlignmentToUse);
llvm::Value *V = Alloca;
llvm::Value *Ptr = V; // Pointer to store into.
CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse);
// If the value is offset in memory, apply the offset now.
if (unsigned Offs = ArgI.getDirectOffset()) {
Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs);
Ptr = Builder.CreateBitCast(Ptr,
llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
}
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
if (SrcSize <= DstSize) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i);
Builder.CreateStore(AI, EltPtr);
}
} else {
llvm::AllocaInst *TempAlloca =
CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
TempAlloca->setAlignment(AlignmentToUse);
llvm::Value *TempV = TempAlloca;
assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr =
Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i);
Builder.CreateStore(AI, EltPtr);
}
Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
assert(NumIRArgs == 1);
auto AI = FnArgs[FirstIRArg];
AI->setName(Arg->getName() + ".coerce");
CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this);
}
// Match to what EmitParmDecl is expecting for this type.
if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
} else {
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
}
break;
}
case ABIArgInfo::Expand: {
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
CharUnits Align = getContext().getDeclAlign(Arg);
Alloca->setAlignment(Align.getQuantity());
LValue LV = MakeAddrLValue(Alloca, Ty, Align);
ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
auto FnArgIter = FnArgs.begin() + FirstIRArg;
ExpandTypeFromArgs(Ty, LV, FnArgIter);
assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + "." + Twine(i));
}
break;
}
case ABIArgInfo::Ignore:
assert(NumIRArgs == 0);
// Initialize the local variable appropriately.
if (!hasScalarEvaluationKind(Ty)) {
ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
} else {
llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
}
break;
}
}
if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
for (int I = Args.size() - 1; I >= 0; --I)
EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
I + 1);
} else {
for (unsigned I = 0, E = Args.size(); I != E; ++I)
EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
I + 1);
}
// HLSL Change Begins.
if (getLangOpts().HLSL) {
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
CGM.getHLSLRuntime().EmitHLSLFunctionProlog(Fn, FD);
}
}
// HLSL Change Ends.
}
#if 0 // HLSL Change Start - no ObjC support
static void eraseUnusedBitCasts(llvm::Instruction *insn) {
while (insn->use_empty()) {
llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
if (!bitcast) return;
// This is "safe" because we would have used a ConstantExpr otherwise.
insn = cast<llvm::Instruction>(bitcast->getOperand(0));
bitcast->eraseFromParent();
}
}
/// Try to emit a fused autorelease of a return result.
static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
// We must be immediately followed the cast.
llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
if (BB->empty()) return nullptr;
if (&BB->back() != result) return nullptr;
llvm::Type *resultType = result->getType();
// result is in a BasicBlock and is therefore an Instruction.
llvm::Instruction *generator = cast<llvm::Instruction>(result);
SmallVector<llvm::Instruction*,4> insnsToKill;
// Look for:
// %generator = bitcast %type1* %generator2 to %type2*
while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
// We would have emitted this as a constant if the operand weren't
// an Instruction.
generator = cast<llvm::Instruction>(bitcast->getOperand(0));
// Require the generator to be immediately followed by the cast.
if (generator->getNextNode() != bitcast)
return nullptr;
insnsToKill.push_back(bitcast);
}
// Look for:
// %generator = call i8* @objc_retain(i8* %originalResult)
// or
// %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
if (!call) return nullptr;
bool doRetainAutorelease;
if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
doRetainAutorelease = true;
} else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
.objc_retainAutoreleasedReturnValue) {
doRetainAutorelease = false;
// If we emitted an assembly marker for this call (and the
// ARCEntrypoints field should have been set if so), go looking
// for that call. If we can't find it, we can't do this
// optimization. But it should always be the immediately previous
// instruction, unless we needed bitcasts around the call.
if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
llvm::Instruction *prev = call->getPrevNode();
assert(prev);
if (isa<llvm::BitCastInst>(prev)) {
prev = prev->getPrevNode();
assert(prev);
}
assert(isa<llvm::CallInst>(prev));
assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
insnsToKill.push_back(prev);
}
} else {
return nullptr;
}
result = call->getArgOperand(0);
insnsToKill.push_back(call);
// Keep killing bitcasts, for sanity. Note that we no longer care
// about precise ordering as long as there's exactly one use.
while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
if (!bitcast->hasOneUse()) break;
insnsToKill.push_back(bitcast);
result = bitcast->getOperand(0);
}
// Delete all the unnecessary instructions, from latest to earliest.
for (SmallVectorImpl<llvm::Instruction*>::iterator
i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
(*i)->eraseFromParent();
// Do the fused retain/autorelease if we were asked to.
if (doRetainAutorelease)
result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
// Cast back to the result type.
return CGF.Builder.CreateBitCast(result, resultType);
}
/// If this is a +1 of the value of an immutable 'self', remove it.
static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
llvm::Value *result) {
// This is only applicable to a method with an immutable 'self'.
const ObjCMethodDecl *method =
dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
if (!method) return nullptr;
const VarDecl *self = method->getSelfDecl();
if (!self->getType().isConstQualified()) return nullptr;
// Look for a retain call.
llvm::CallInst *retainCall =
dyn_cast<llvm::CallInst>(result->stripPointerCasts());
if (!retainCall ||
retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
return nullptr;
// Look for an ordinary load of 'self'.
llvm::Value *retainedValue = retainCall->getArgOperand(0);
llvm::LoadInst *load =
dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
if (!load || load->isAtomic() || load->isVolatile() ||
load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
return nullptr;
// Okay! Burn it all down. This relies for correctness on the
// assumption that the retain is emitted as part of the return and
// that thereafter everything is used "linearly".
llvm::Type *resultType = result->getType();
eraseUnusedBitCasts(cast<llvm::Instruction>(result));
assert(retainCall->use_empty());
retainCall->eraseFromParent();
eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
return CGF.Builder.CreateBitCast(load, resultType);
}
/// Emit an ARC autorelease of the result of a function.
///
/// \return the value to actually return from the function
static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
llvm::Value *result) {
// If we're returning 'self', kill the initial retain. This is a
// heuristic attempt to "encourage correctness" in the really unfortunate
// case where we have a return of self during a dealloc and we desperately
// need to avoid the possible autorelease.
if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
return self;
// At -O0, try to emit a fused retain/autorelease.
if (CGF.shouldUseFusedARCCalls())
if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
return fused;
return CGF.EmitARCAutoreleaseReturnValue(result);
}
#endif // HLSL Change Ends - no ObjC support
/// Heuristically search for a dominating store to the return-value slot.
static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// If there are multiple uses of the return-value slot, just check
// for something immediately preceding the IP. Sometimes this can
// happen with how we generate implicit-returns; it can also happen
// with noreturn cleanups.
if (!CGF.ReturnValue->hasOneUse()) {
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
if (IP->empty()) return nullptr;
llvm::Instruction *I = &IP->back();
// Skip lifetime markers
for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
IE = IP->rend();
II != IE; ++II) {
if (llvm::IntrinsicInst *Intrinsic =
dyn_cast<llvm::IntrinsicInst>(&*II)) {
if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
++II;
if (II == IE)
break;
if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
continue;
}
}
I = &*II;
break;
}
llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I);
if (!store) return nullptr;
if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
assert(!store->isAtomic() && !store->isVolatile()); // see below
return store;
}
llvm::StoreInst *store =
dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
if (!store) return nullptr;
// These aren't actually possible for non-coerced returns, and we
// only care about non-coerced returns on this code path.
assert(!store->isAtomic() && !store->isVolatile());
// Now do a first-and-dirty dominance check: just walk up the
// single-predecessors chain from the current insertion point.
llvm::BasicBlock *StoreBB = store->getParent();
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
while (IP != StoreBB) {
if (!(IP = IP->getSinglePredecessor()))
return nullptr;
}
// Okay, the store's basic block dominates the insertion point; we
// can do our thing.
return store;
}
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
bool EmitRetDbgLoc,
SourceLocation EndLoc) {
if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
// Naked functions don't have epilogues.
Builder.CreateUnreachable();
return;
}
// Functions with no result always return void.
if (!ReturnValue) {
Builder.CreateRetVoid();
return;
}
llvm::DebugLoc RetDbgLoc;
llvm::Value *RV = nullptr;
QualType RetTy = FI.getReturnType();
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::InAlloca:
// Aggregrates get evaluated directly into the destination. Sometimes we
// need to return the sret value in a register, though.
assert(hasAggregateEvaluationKind(RetTy));
if (RetAI.getInAllocaSRet()) {
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
llvm::Value *ArgStruct = EI;
llvm::Value *SRet = Builder.CreateStructGEP(
nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
RV = Builder.CreateLoad(SRet, "sret");
}
break;
case ABIArgInfo::Indirect: {
auto AI = CurFn->arg_begin();
if (RetAI.isSRetAfterThis())
++AI;
switch (getEvaluationKind(RetTy)) {
case TEK_Complex: {
ComplexPairTy RT =
EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
EndLoc);
EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
/*isInit*/ true);
break;
}
case TEK_Aggregate:
// Do nothing; aggregrates get evaluated directly into the destination.
break;
case TEK_Scalar:
EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
MakeNaturalAlignAddrLValue(AI, RetTy),
/*isInit*/ true);
break;
}
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
RetAI.getDirectOffset() == 0) {
// HLSL Change Begin.
// If optimization is disabled, just load return value.
if (CGM.getCodeGenOpts().DisableLLVMOpts) {
// HLSL Change Begins
if (hlsl::IsHLSLMatType(RetTy))
RV = CGM.getHLSLRuntime().EmitHLSLMatrixLoad(*this, ReturnValue,
FnRetTy); // FnRetTy retains attributed type
else
// HLSL Change Ends
RV = Builder.CreateLoad(ReturnValue);
} else {
// HLSL Change End.
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
// If there is a dominating store to ReturnValue, we can elide
// the load, zap the store, and usually zap the alloca.
if (llvm::StoreInst *SI =
findDominatingStoreToReturnValue(*this)) {
// Reuse the debug location from the store unless there is
// cleanup code to be emitted between the store and return
// instruction.
if (EmitRetDbgLoc && !AutoreleaseResult)
RetDbgLoc = SI->getDebugLoc();
// Get the stored value and nuke the now-dead store.
RV = SI->getValueOperand();
SI->eraseFromParent();
// If that was the only use of the return value, nuke it as well now.
if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
ReturnValue = nullptr;
}
// Otherwise, we have to do a simple load.
} else {
// HLSL Change Begins
if (hlsl::IsHLSLMatType(RetTy))
RV = CGM.getHLSLRuntime().EmitHLSLMatrixLoad(*this, ReturnValue,
FnRetTy); // FnRetTy retains attributed type
else
// HLSL Change Ends
RV = Builder.CreateLoad(ReturnValue);
}
} // HLSL Change
} else {
llvm::Value *V = ReturnValue;
CharUnits Align = getContext().getTypeAlignInChars(RetTy);
// If the value is offset in memory, apply the offset now.
if (unsigned Offs = RetAI.getDirectOffset()) {
V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs);
V = Builder.CreateBitCast(V,
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs));
}
RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this);
}
// In ARC, end functions that return a retainable type with a call
// to objc_autoreleaseReturnValue.
#if 0 // HLSL Change - no ObjC support
if (AutoreleaseResult) {
assert(getLangOpts().ObjCAutoRefCount &&
!FI.isReturnsRetained() &&
RetTy->isObjCRetainableType());
RV = emitAutoreleaseOfResult(*this, RV);
}
#else
assert(!AutoreleaseResult && "autorelease not supported in HLSL");
#endif // HLSL Change - no ObjC support
break;
case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Expand:
llvm_unreachable("Invalid ABI kind for return argument");
}
llvm::Instruction *Ret;
if (RV) {
if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
SanitizerScope SanScope(this);
llvm::Value *Cond = Builder.CreateICmpNE(
RV, llvm::Constant::getNullValue(RV->getType()));
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(EndLoc),
EmitCheckSourceLocation(RetNNAttr->getLocation()),
};
EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
"nonnull_return", StaticData, None);
}
}
Ret = Builder.CreateRet(RV);
} else {
Ret = Builder.CreateRetVoid();
}
if (RetDbgLoc)
Ret->setDebugLoc(std::move(RetDbgLoc));
}
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
}
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
// FIXME: Generate IR in one pass, rather than going back and fixing up these
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
llvm::Value *Placeholder =
llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
Placeholder = CGF.Builder.CreateLoad(Placeholder);
return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
Ty.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
}
void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
const VarDecl *param,
SourceLocation loc) {
// StartFunction converted the ABI-lowered parameter(s) into a
// local alloca. We need to turn that into an r-value suitable
// for EmitCall.
llvm::Value *local = GetAddrOfLocalVar(param);
QualType type = param->getType();
// For the most part, we just need to load the alloca, except:
// 1) aggregate r-values are actually pointers to temporaries, and
// 2) references to non-scalars are pointers directly to the aggregate.
// I don't know why references to scalars are different here.
if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
if (!hasScalarEvaluationKind(ref->getPointeeType()))
return args.add(RValue::getAggregate(local), type);
// Locals which are references to scalars are represented
// with allocas holding the pointer.
return args.add(RValue::get(Builder.CreateLoad(local)), type);
}
assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
"cannot emit delegate call arguments for inalloca arguments!");
args.add(convertTempToRValue(local, type, loc), type);
}
#if 0 // HLSL Change - no ObjC support
static bool isProvablyNull(llvm::Value *addr) {
return isa<llvm::ConstantPointerNull>(addr);
}
static bool isProvablyNonNull(llvm::Value *addr) {
return isa<llvm::AllocaInst>(addr);
}
/// Emit the actual writing-back of a writeback.
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
const LValue &srcLV = writeback.Source;
llvm::Value *srcAddr = srcLV.getAddress();
assert(!isProvablyNull(srcAddr) &&
"shouldn't have writeback for provably null argument");
llvm::BasicBlock *contBB = nullptr;
// If the argument wasn't provably non-null, we need to null check
// before doing the store.
bool provablyNonNull = isProvablyNonNull(srcAddr);
if (!provablyNonNull) {
llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
contBB = CGF.createBasicBlock("icr.done");
llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
CGF.EmitBlock(writebackBB);
}
// Load the value to writeback.
llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
// Cast it back, in case we're writing an id to a Foo* or something.
value = CGF.Builder.CreateBitCast(value,
cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
"icr.writeback-cast");
// Perform the writeback.
// If we have a "to use" value, it's something we need to emit a use
// of. This has to be carefully threaded in: if it's done after the
// release it's potentially undefined behavior (and the optimizer
// will ignore it), and if it happens before the retain then the
// optimizer could move the release there.
if (writeback.ToUse) {
assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
// Retain the new value. No need to block-copy here: the block's
// being passed up the stack.
value = CGF.EmitARCRetainNonBlock(value);
// Emit the intrinsic use here.
CGF.EmitARCIntrinsicUse(writeback.ToUse);
// Load the old value (primitively).
llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
// Put the new value in place (primitively).
CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
// Release the old value.
CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
// Otherwise, we can just do a normal lvalue store.
} else {
CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
}
// Jump to the continuation block.
if (!provablyNonNull)
CGF.EmitBlock(contBB);
}
static void emitWritebacks(CodeGenFunction &CGF,
const CallArgList &args) {
for (const auto &I : args.writebacks())
emitWriteback(CGF, I);
}
#endif // HLSL Change - no ObjC support
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
const CallArgList &CallArgs) {
assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
ArrayRef<CallArgList::CallArgCleanup> Cleanups =
CallArgs.getCleanupsToDeactivate();
// Iterate in reverse to increase the likelihood of popping the cleanup.
for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
I->IsActiveIP->eraseFromParent();
}
}
#if 0 // HLSL Change - no ObjC support
static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
if (uop->getOpcode() == UO_AddrOf)
return uop->getSubExpr();
return nullptr;
}
/// Emit an argument that's being passed call-by-writeback. That is,
/// we are passing the address of
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
const ObjCIndirectCopyRestoreExpr *CRE) {
LValue srcLV;
// Make an optimistic effort to emit the address as an l-value.
// This can fail if the argument expression is more complicated.
if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
srcLV = CGF.EmitLValue(lvExpr);
// Otherwise, just emit it as a scalar.
} else {
llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
QualType srcAddrType =
CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
}
llvm::Value *srcAddr = srcLV.getAddress();
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
llvm::PointerType *destType =
cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
// If the address is a constant null, just pass the appropriate null.
if (isProvablyNull(srcAddr)) {
args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
CRE->getType());
return;
}
// Create the temporary.
llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
"icr.temp");
// Loading an l-value can introduce a cleanup if the l-value is __weak,
// and that cleanup will be conditional if we can't prove that the l-value
// isn't null, so we need to register a dominating point so that the cleanups
// system will make valid IR.
CodeGenFunction::ConditionalEvaluation condEval(CGF);
// Zero-initialize it if we're not doing a copy-initialization.
bool shouldCopy = CRE->shouldCopy();
if (!shouldCopy) {
llvm::Value *null =
llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(destType->getElementType()));
CGF.Builder.CreateStore(null, temp);
}
llvm::BasicBlock *contBB = nullptr;
llvm::BasicBlock *originBB = nullptr;
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
bool provablyNonNull = isProvablyNonNull(srcAddr);
if (provablyNonNull) {
finalArgument = temp;
} else {
llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
finalArgument = CGF.Builder.CreateSelect(isNull,
llvm::ConstantPointerNull::get(destType),
temp, "icr.argument");
// If we need to copy, then the load has to be conditional, which
// means we need control flow.
if (shouldCopy) {
originBB = CGF.Builder.GetInsertBlock();
contBB = CGF.createBasicBlock("icr.cont");
llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
CGF.EmitBlock(copyBB);
condEval.begin(CGF);
}
}
llvm::Value *valueToUse = nullptr;
// Perform a copy if necessary.
if (shouldCopy) {
RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
assert(srcRV.isScalar());
llvm::Value *src = srcRV.getScalarVal();
src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
"icr.cast");
// Use an ordinary store, not a store-to-lvalue.
CGF.Builder.CreateStore(src, temp);
// If optimization is enabled, and the value was held in a
// __strong variable, we need to tell the optimizer that this
// value has to stay alive until we're doing the store back.
// This is because the temporary is effectively unretained,
// and so otherwise we can violate the high-level semantics.
if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
valueToUse = src;
}
}
// Finish the control flow if we needed it.
if (shouldCopy && !provablyNonNull) {
llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
CGF.EmitBlock(contBB);
// Make a phi for the value to intrinsically use.
if (valueToUse) {
llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
"icr.to-use");
phiToUse->addIncoming(valueToUse, copyBB);
phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
originBB);
valueToUse = phiToUse;
}
condEval.end(CGF);
}
args.addWriteback(srcLV, temp, valueToUse);
args.add(RValue::get(finalArgument), CRE->getType());
}
#endif // HLSL Change - no ObjC support
void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
assert(!StackBase && !StackCleanup.isValid());
// Save the stack.
llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
// Control gets really tied up in landing pads, so we have to spill the
// stacksave to an alloca to avoid violating SSA form.
// TODO: This is dead if we never emit the cleanup. We should create the
// alloca and store lazily on the first cleanup emission.
StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
CGF.Builder.CreateStore(StackBase, StackBaseMem);
CGF.pushStackRestore(EHCleanup, StackBaseMem);
StackCleanup = CGF.EHStack.getInnermostEHScope();
assert(StackCleanup.isValid());
}
void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
if (StackBase) {
CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
// We could load StackBase from StackBaseMem, but in the non-exceptional
// case we can skip it.
CGF.Builder.CreateCall(F, StackBase);
}
}
void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
SourceLocation ArgLoc,
const FunctionDecl *FD,
unsigned ParmNum) {
if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
return;
auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
if (!NNAttr)
return;
SanitizerScope SanScope(this);
assert(RV.isScalar());
llvm::Value *V = RV.getScalarVal();
llvm::Value *Cond =
Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(ArgLoc),
EmitCheckSourceLocation(NNAttr->getLocation()),
llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
};
EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
"nonnull_arg", StaticData, None);
}
void CodeGenFunction::EmitCallArgs(CallArgList &Args,
ArrayRef<QualType> ArgTypes,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
const FunctionDecl *CalleeDecl,
unsigned ParamsToSkip) {
// We *have* to evaluate arguments from right to left in the MS C++ ABI,
// because arguments are destroyed left to right in the callee.
if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
// Insert a stack save if we're going to need any inalloca args.
bool HasInAllocaArgs = false;
for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
I != E && !HasInAllocaArgs; ++I)
HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
if (HasInAllocaArgs) {
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
Args.allocateArgumentMemory(*this);
}
// Evaluate each argument.
size_t CallArgsStart = Args.size();
for (int I = ArgTypes.size() - 1; I >= 0; --I) {
CallExpr::const_arg_iterator Arg = ArgBeg + I;
EmitCallArg(Args, *Arg, ArgTypes[I]);
// HLSL Change begin.
RValue CallArg = Args.back().RV;
if (CallArg.isAggregate())
CGM.getHLSLRuntime().MarkPotentialResourceTemp(
*this, CallArg.getAggregateAddr(), ArgTypes[I]);
// HLSL Change end.
EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
CalleeDecl, ParamsToSkip + I);
}
// Un-reverse the arguments we just evaluated so they match up with the LLVM
// IR function.
std::reverse(Args.begin() + CallArgsStart, Args.end());
return;
}
for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
CallExpr::const_arg_iterator Arg = ArgBeg + I;
assert(Arg != ArgEnd);
EmitCallArg(Args, *Arg, ArgTypes[I]);
EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
CalleeDecl, ParamsToSkip + I);
}
}
namespace {
struct DestroyUnpassedArg : EHScopeStack::Cleanup {
DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
: Addr(Addr), Ty(Ty) {}
llvm::Value *Addr;
QualType Ty;
void Emit(CodeGenFunction &CGF, Flags flags) override {
const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
assert(!Dtor->isTrivial());
CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
/*Delegating=*/false, Addr);
}
};
}
struct DisableDebugLocationUpdates {
CodeGenFunction &CGF;
bool disabledDebugInfo;
DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
CGF.disableDebugInfo();
}
~DisableDebugLocationUpdates() {
if (disabledDebugInfo)
CGF.enableDebugInfo();
}
};
void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
QualType type) {
DisableDebugLocationUpdates Dis(*this, E);
#if 0 // HLSL Change - no ObjC support
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
assert(getLangOpts().ObjCAutoRefCount);
assert(getContext().hasSameType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
#endif // HLSL Change - no ObjC support
assert(type->isReferenceType() == E->isGLValue() &&
"reference binding to unmaterialized r-value!");
if (E->isGLValue()) {
// HLSL Change Begins.
if (E->getObjectKind() == OK_VectorComponent) {
if (const HLSLVectorElementExpr *VecElt = dyn_cast<HLSLVectorElementExpr>(E)) {
LValue LV = EmitHLSLVectorElementExpr(VecElt);
llvm::Value *Ptr = nullptr;
if (LV.isSimple()) {
// Handle the special case when the vector component access
// is done on a scalar using .x or .r.
//
// Example 1:
// groupshared uint g;
// InterlockedAdd(g.x, 1);
//
// Example 2:
// RWBuffer<uint> buf;
// InterlockedAdd(buf[0].r, 1);
llvm::Value *V = LV.getAddress();
Ptr = Builder.CreateGEP(V, Builder.getInt32(0));
} else {
llvm::Value *V = LV.getExtVectorAddr();
llvm::Constant *Elts = LV.getExtVectorElts();
// Only support scalar for atomic operations.
assert(Elts->getType()->getVectorNumElements() == 1);
llvm::Value *ch = Builder.CreateExtractElement(Elts, (uint64_t)0);
Ptr = Builder.CreateGEP(V, { Builder.getInt32(0), ch });
}
RValue RV = RValue::get(Ptr);
return args.add(RV, type);
} else {
LValue LV = EmitExtMatrixElementExpr(cast<ExtMatrixElementExpr>(E));
llvm::Value *Ptr = LV.getAddress();
// Only support scalar for atomic operations.
assert(Ptr->getType()->getPointerElementType() == Ptr->getType()->getPointerElementType()->getScalarType());
RValue RV = RValue::get(Ptr);
return args.add(RV, type);
}
}
// HLSL Change Ends.
assert(E->getObjectKind() == OK_Ordinary);
return args.add(EmitReferenceBindingToExpr(E), type);
}
bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
// In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
if (HasAggregateEvalKind &&
!LangOptions().HLSL && // HLSL Change : Do not generate agg.tmp for HLSL
CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
AggValueSlot Slot;
if (args.isUsingInAlloca())
Slot = createPlaceholderSlot(*this, type);
else
Slot = CreateAggTemp(type, "agg.tmp");
const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
bool DestroyedInCallee =
RD && RD->hasNonTrivialDestructor() &&
CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
if (DestroyedInCallee)
Slot.setExternallyDestructed();
EmitAggExpr(E, Slot);
RValue RV = Slot.asRValue();
args.add(RV, type);
if (DestroyedInCallee) {
// Create a no-op GEP between the placeholder and the cleanup so we can
// RAUW it successfully. It also serves as a marker of the first
// instruction where the cleanup is active.
pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
// This unreachable is a temporary marker which will be removed later.
llvm::Instruction *IsActive = Builder.CreateUnreachable();
args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
}
return;
}
if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
// HLSL Change Begin - don't copy input arg.
// Copy for out param is done at CGMSHLSLRuntime::EmitHLSLOutParamConversion*.
args.add(L.asAggregateRValue(), type); // /*NeedsCopy*/true);
// HLSL Change End
} else {
// We can't represent a misaligned lvalue in the CallArgList, so copy
// to an aligned temporary now.
llvm::Value *tmp = CreateMemTemp(type);
EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
L.getAlignment());
args.add(RValue::getAggregate(tmp), type);
}
return;
}
// HLSL Change Begins.
// For DeclRefExpr of aggregate type, don't create temp.
if (HasAggregateEvalKind && LangOptions().HLSL &&
isa<DeclRefExpr>(E)) {
LValue LV = EmitDeclRefLValue(cast<DeclRefExpr>(E));
RValue RV = RValue::getAggregate(LV.getAddress());
args.add(RV, type);
return;
}
// HLSL Change Ends.
args.add(EmitAnyExprToTemp(E), type);
}
QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
// System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
// implicitly widens null pointer constants that are arguments to varargs
// functions to pointer-sized ints.
if (!getTarget().getTriple().isOSWindows())
return Arg->getType();
if (Arg->getType()->isIntegerType() &&
getContext().getTypeSize(Arg->getType()) <
getContext().getTargetInfo().getPointerWidth(0) &&
Arg->isNullPointerConstant(getContext(),
Expr::NPC_ValueDependentIsNotNull)) {
return getContext().getIntPtrType();
}
return Arg->getType();
}
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
void
CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
!CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
CGM.getNoObjCARCExceptionsMetadata());
}
/// Emits a call to the given no-arguments nounwind runtime function.
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
return EmitNounwindRuntimeCall(callee, None, name);
}
/// Emits a call to the given nounwind runtime function.
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const llvm::Twine &name) {
llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
call->setDoesNotThrow();
return call;
}
/// Emits a simple call (never an invoke) to the given no-arguments
/// runtime function.
llvm::CallInst *
CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
return EmitRuntimeCall(callee, None, name);
}
/// Emits a simple call (never an invoke) to the given runtime
/// function.
llvm::CallInst *
CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const llvm::Twine &name) {
llvm::CallInst *call = Builder.CreateCall(callee, args, name);
call->setCallingConv(getRuntimeCC());
return call;
}
/// Emits a call or invoke to the given noreturn runtime function.
void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args) {
if (getInvokeDest()) {
llvm::InvokeInst *invoke =
Builder.CreateInvoke(callee,
getUnreachableBlock(),
getInvokeDest(),
args);
invoke->setDoesNotReturn();
invoke->setCallingConv(getRuntimeCC());
} else {
llvm::CallInst *call = Builder.CreateCall(callee, args);
call->setDoesNotReturn();
call->setCallingConv(getRuntimeCC());
Builder.CreateUnreachable();
}
}
/// Emits a call or invoke instruction to the given nullary runtime
/// function.
llvm::CallSite
CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
const Twine &name) {
return EmitRuntimeCallOrInvoke(callee, None, name);
}
/// Emits a call or invoke instruction to the given runtime function.
llvm::CallSite
CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const Twine &name) {
llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
callSite.setCallingConv(getRuntimeCC());
return callSite;
}
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
const Twine &Name) {
return EmitCallOrInvoke(Callee, None, Name);
}
/// Emits a call or invoke instruction to the given function, depending
/// on the current state of the EH stack.
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name) {
llvm::BasicBlock *InvokeDest = getInvokeDest();
llvm::Instruction *Inst;
if (!InvokeDest)
Inst = Builder.CreateCall(Callee, Args, Name);
else {
llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
EmitBlock(ContBB);
}
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
if (CGM.getLangOpts().ObjCAutoRefCount)
AddObjCARCExceptionMetadata(Inst);
return llvm::CallSite(Inst);
}
/// \brief Store a non-aggregate value to an address to initialize it. For
/// initialization, a non-atomic store will be used.
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
LValue Dst) {
if (Src.isScalar())
CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
else
CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
}
void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
llvm::Value *New) {
DeferredReplacements.push_back(std::make_pair(Old, New));
}
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
const Decl *TargetDecl,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
llvm::FunctionType *IRFuncTy =
cast<llvm::FunctionType>(
cast<llvm::PointerType>(Callee->getType())->getElementType());
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
llvm::AllocaInst *ArgMemory = nullptr;
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
llvm::Instruction *IP = CallArgs.getStackBase();
llvm::AllocaInst *AI;
if (IP) {
IP = IP->getNextNode();
AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
} else {
AI = CreateTempAlloca(ArgStruct, "argmem");
}
AI->setUsedWithInAlloca(true);
assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
ArgMemory = AI;
}
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
llvm::Value *SRetPtr = nullptr;
size_t UnusedReturnSize = 0;
if (RetAI.isIndirect() || RetAI.isInAlloca()) {
SRetPtr = ReturnValue.getValue();
if (!SRetPtr) {
SRetPtr = CreateMemTemp(RetTy);
if (HaveInsertPoint() && ReturnValue.isUnused()) {
uint64_t size =
CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
if (EmitLifetimeStart(size, SRetPtr))
UnusedReturnSize = size;
}
}
// HLSL Change begin.
CGM.getHLSLRuntime().MarkPotentialResourceTemp(*this, SRetPtr, RetTy);
// HLSL Change end.
if (IRFunctionArgs.hasSRetArg()) {
IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
} else {
llvm::Value *Addr =
Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
RetAI.getInAllocaFieldIndex());
Builder.CreateStore(SRetPtr, Addr);
}
}
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
I != E; ++I, ++info_it, ++ArgNo) {
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
// Insert a padding argument to ensure proper alignment.
if (IRFunctionArgs.hasPaddingArg(ArgNo))
IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
llvm::UndefValue::get(ArgInfo.getPaddingType());
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgInfo.getKind()) {
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (RV.isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
llvm::Instruction *Placeholder =
cast<llvm::Instruction>(RV.getAggregateAddr());
CGBuilderTy::InsertPoint IP = Builder.saveIP();
Builder.SetInsertPoint(Placeholder);
llvm::Value *Addr =
Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
ArgInfo.getInAllocaFieldIndex());
Builder.restoreIP(IP);
deferPlaceholderReplacement(Placeholder, Addr);
} else {
// Store the RValue into the argument struct.
llvm::Value *Addr =
Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
ArgInfo.getInAllocaFieldIndex());
unsigned AS = Addr->getType()->getPointerAddressSpace();
llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
// There are some cases where a trivial bitcast is not avoidable. The
// definition of a type later in a translation unit may change it's type
// from {}* to (%struct.foo*)*.
if (Addr->getType() != MemType)
Addr = Builder.CreateBitCast(Addr, MemType);
LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
}
break;
}
case ABIArgInfo::Indirect: {
assert(NumIRArgs == 1);
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (ArgInfo.getIndirectAlign() > AI->getAlignment())
AI->setAlignment(ArgInfo.getIndirectAlign());
IRCallArgs[FirstIRArg] = AI;
LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
// 1. If the argument is not byval, and we are required to copy the
// source. (This case doesn't occur on any common architecture.)
// 2. If the argument is byval, RV is not sufficiently aligned, and
// we cannot force it to be sufficiently aligned.
// 3. If the argument is byval, but RV is located in an address space
// different than that of the argument (0).
llvm::Value *Addr = RV.getAggregateAddr();
unsigned Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
const unsigned ArgAddrSpace =
(FirstIRArg < IRFuncTy->getNumParams()
? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
: 0);
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
(ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) ||
(ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
// Create an aligned temporary, and copy to it.
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (Align > AI->getAlignment())
AI->setAlignment(Align);
IRCallArgs[FirstIRArg] = AI;
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
} else {
// Skip the extra memcpy call.
// HLSL Change Starts
// Generate AddrSpaceCast for shared memory.
if (RVAddrSpace != ArgAddrSpace) {
Addr = Builder.CreateAddrSpaceCast(
Addr, IRFuncTy->getParamType(FirstIRArg));
}
// HLSL Change Ends
IRCallArgs[FirstIRArg] = Addr;
}
}
break;
}
case ABIArgInfo::Ignore:
assert(NumIRArgs == 0);
break;
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
assert(NumIRArgs == 1);
llvm::Value *V;
if (RV.isScalar())
V = RV.getScalarVal();
else
V = Builder.CreateLoad(RV.getAggregateAddr());
// We might have to widen integers, but we should never truncate.
if (ArgInfo.getCoerceToType() != V->getType() &&
V->getType()->isIntegerTy())
V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
// If the argument doesn't match, perform a bitcast to coerce it. This
// can happen due to trivial type mismatches.
if (FirstIRArg < IRFuncTy->getNumParams() &&
V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
// HLSL Change Starts
// Generate AddrSpaceCast for shared memory.
if (V->getType()->isPointerTy())
V = Builder.CreatePointerBitCastOrAddrSpaceCast(
V, IRFuncTy->getParamType(FirstIRArg));
else
// HLSL Change Ends
V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
}
IRCallArgs[FirstIRArg] = V;
break;
}
// HLSL Change Begins
if (hlsl::IsHLSLMatType(I->Ty)) {
// For matrix, just use the val directly
IRCallArgs[FirstIRArg] = RV.getScalarVal();
continue;
}
// HLSL Change Ends
// FIXME: Avoid the conversion through memory if possible.
llvm::Value *SrcPtr;
CharUnits SrcAlign;
if (RV.isScalar() || RV.isComplex()) {
SrcPtr = CreateMemTemp(I->Ty, "coerce");
SrcAlign = TypeAlign;
LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
} else {
SrcPtr = RV.getAggregateAddr();
// This alignment is guaranteed by EmitCallArg.
SrcAlign = TypeAlign;
}
// If the value is offset in memory, apply the offset now.
if (unsigned Offs = ArgInfo.getDirectOffset()) {
SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs);
SrcPtr = Builder.CreateBitCast(SrcPtr,
llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
}
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
// If the source type is smaller than the destination type of the
// coerce-to logic, copy the source value into a temp alloca the size
// of the destination type to allow loading all of it. The bits past
// the source value are left undef.
if (SrcSize < DstSize) {
llvm::AllocaInst *TempAlloca
= CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
SrcPtr = TempAlloca;
} else {
SrcPtr = Builder.CreateBitCast(SrcPtr,
llvm::PointerType::getUnqual(STy));
}
assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i);
llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
// We don't know what we're loading from.
LI->setAlignment(1);
IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
assert(NumIRArgs == 1);
IRCallArgs[FirstIRArg] =
CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
SrcAlign, *this);
}
break;
}
case ABIArgInfo::Expand:
unsigned IRArgPos = FirstIRArg;
ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
}
if (ArgMemory) {
llvm::Value *Arg = ArgMemory;
if (CallInfo.isVariadic()) {
// When passing non-POD arguments by value to variadic functions, we will
// end up with a variadic prototype and an inalloca call site. In such
// cases, we can't do any parameter mismatch checks. Give up and bitcast
// the callee.
unsigned CalleeAS =
cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
Callee = Builder.CreateBitCast(
Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
} else {
llvm::Type *LastParamTy =
IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
if (Arg->getType() != LastParamTy) {
#ifndef NDEBUG
// Assert that these structs have equivalent element types.
llvm::StructType *FullTy = CallInfo.getArgStruct();
llvm::StructType *DeclaredTy = cast<llvm::StructType>(
cast<llvm::PointerType>(LastParamTy)->getElementType());
assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
DE = DeclaredTy->element_end(),
FI = FullTy->element_begin();
DI != DE; ++DI, ++FI)
assert(*DI == *FI);
#endif
Arg = Builder.CreateBitCast(Arg, LastParamTy);
}
}
assert(IRFunctionArgs.hasInallocaArg());
IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
if (!CallArgs.getCleanupsToDeactivate().empty())
deactivateArgCleanupsBeforeCall(*this, CallArgs);
// If the callee is a bitcast of a function to a varargs pointer to function
// type, check to see if we can remove the bitcast. This handles some cases
// with unprototyped functions.
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
llvm::FunctionType *CurFT =
cast<llvm::FunctionType>(CurPT->getElementType());
llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
ActualFT->getNumParams() == CurFT->getNumParams() &&
ActualFT->getNumParams() == IRCallArgs.size() &&
(CurFT->isVarArg() || !ActualFT->isVarArg())) {
bool ArgsMatch = true;
for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
ArgsMatch = false;
break;
}
// Strip the cast if we can get away with it. This is a nice cleanup,
// but also allows us to inline the function at -O0 if it is marked
// always_inline.
if (ArgsMatch)
Callee = CalleeF;
}
}
assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
// Inalloca argument can have different type.
if (IRFunctionArgs.hasInallocaArg() &&
i == IRFunctionArgs.getInallocaArgNo())
continue;
if (i < IRFuncTy->getNumParams())
assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
}
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
CallingConv, true);
llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
AttributeList);
llvm::BasicBlock *InvokeDest = nullptr;
if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoUnwind) ||
currentFunctionUsesSEHTry())
InvokeDest = getInvokeDest();
llvm::CallSite CS;
if (!InvokeDest) {
// HLSL changes begin
// When storing a matrix to memory, make sure to change its orientation to match in-memory
// orientation.
if (getLangOpts().HLSL && CGM.getHLSLRuntime().NeedHLSLMartrixCastForStoreOp(TargetDecl, IRCallArgs)) {
llvm::SmallVector<clang::QualType, 16> tyList;
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I) {
tyList.emplace_back(I->Ty);
}
CGM.getHLSLRuntime().EmitHLSLMartrixCastForStoreOp(*this, IRCallArgs, tyList);
}
// HLSL changes end
CS = Builder.CreateCall(Callee, IRCallArgs);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
EmitBlock(Cont);
}
if (callOrInvoke)
*callOrInvoke = CS.getInstruction();
if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
!CS.hasFnAttr(llvm::Attribute::NoInline))
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
llvm::Attribute::AlwaysInline);
// Disable inlining inside SEH __try blocks.
if (isSEHTryScope())
Attrs =
Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoInline);
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
if (CGM.getLangOpts().ObjCAutoRefCount)
AddObjCARCExceptionMetadata(CS.getInstruction());
// If the call doesn't return, finish the basic block and clear the
// insertion point; this allows the rest of IRgen to discard
// unreachable code.
if (CS.doesNotReturn()) {
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
SRetPtr);
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
// FIXME: For now, emit a dummy basic block because expr emitters in
// generally are not ready to handle emitting expressions at unreachable
// points.
EnsureInsertPoint();
// Return a reasonable RValue.
return GetUndefRValue(RetTy);
}
llvm::Instruction *CI = CS.getInstruction();
if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
CI->setName("call");
#if 0 // HLSL Change - no ObjC support
// Emit any writebacks immediately. Arguably this should happen
// after any return-value munging.
if (CallArgs.hasWritebacks())
emitWritebacks(*this, CallArgs);
#else
assert(!CallArgs.hasWritebacks() && "writebacks are unavailable in HLSL");
#endif // HLSL Change - no ObjC support
// The stack cleanup for inalloca arguments has to run out of the normal
// lexical order, so deactivate it and run it manually here.
CallArgs.freeArgumentMemory(*this);
RValue Ret = [&] {
switch (RetAI.getKind()) {
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
if (UnusedReturnSize)
EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
SRetPtr);
return ret;
}
case ABIArgInfo::Ignore:
// If we are ignoring an argument that had a result, make sure to
// construct the appropriate return value for our caller.
return GetUndefRValue(RetTy);
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
llvm::Type *RetIRTy = ConvertType(RetTy);
if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
switch (getEvaluationKind(RetTy)) {
case TEK_Complex: {
llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
return RValue::getComplex(std::make_pair(Real, Imag));
}
case TEK_Aggregate: {
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
if (!DestPtr) {
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
DestIsVolatile = false;
}
BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign);
return RValue::getAggregate(DestPtr);
}
case TEK_Scalar: {
// If the argument doesn't match, perform a bitcast to coerce it. This
// can happen due to trivial type mismatches.
llvm::Value *V = CI;
if (V->getType() != RetIRTy)
V = Builder.CreateBitCast(V, RetIRTy);
return RValue::get(V);
}
}
llvm_unreachable("bad evaluation kind");
}
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
if (!DestPtr) {
DestPtr = CreateMemTemp(RetTy, "coerce");
DestIsVolatile = false;
}
// If the value is offset in memory, apply the offset now.
llvm::Value *StorePtr = DestPtr;
CharUnits StoreAlign = DestAlign;
if (unsigned Offs = RetAI.getDirectOffset()) {
StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
StorePtr =
Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs);
StorePtr = Builder.CreateBitCast(StorePtr,
llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
StoreAlign =
StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
}
CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this);
return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
case ABIArgInfo::Expand:
llvm_unreachable("Invalid ABI kind for return argument");
}
llvm_unreachable("Unhandled ABIArgInfo::Kind");
} ();
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
if (const auto *Offset = AA->getOffset())
OffsetValue = EmitScalarExpr(Offset);
llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
OffsetValue);
}
}
return Ret;
}
/* VarArg handling */
llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/ABIInfo.h | //===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#include "clang/AST/Type.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Type.h"
namespace llvm {
class Value;
class LLVMContext;
class DataLayout;
}
namespace clang {
class ASTContext;
class TargetInfo;
namespace CodeGen {
class CGCXXABI;
class CGFunctionInfo;
class CodeGenFunction;
class CodeGenTypes;
}
// FIXME: All of this stuff should be part of the target interface
// somehow. It is currently here because it is not clear how to factor
// the targets to support this, since the Targets currently live in a
// layer below types n'stuff.
/// ABIInfo - Target specific hooks for defining how a type should be
/// passed or returned from functions.
class ABIInfo {
public:
CodeGen::CodeGenTypes &CGT;
protected:
llvm::CallingConv::ID RuntimeCC;
llvm::CallingConv::ID BuiltinCC;
public:
ABIInfo(CodeGen::CodeGenTypes &cgt)
: CGT(cgt),
RuntimeCC(llvm::CallingConv::C),
BuiltinCC(llvm::CallingConv::C) {}
virtual ~ABIInfo();
CodeGen::CGCXXABI &getCXXABI() const;
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
const llvm::DataLayout &getDataLayout() const;
const TargetInfo &getTarget() const;
/// Return the calling convention to use for system runtime
/// functions.
llvm::CallingConv::ID getRuntimeCC() const {
return RuntimeCC;
}
/// Return the calling convention to use for compiler builtins
llvm::CallingConv::ID getBuiltinCC() const {
return BuiltinCC;
}
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
// FIXME: This is a gaping layering violation if we wanted to drop
// the ABI information any lower than CodeGen. Of course, for
// VAArg handling it has to be at this level; there is no way to
// abstract this out.
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGen::CodeGenFunction &CGF) const = 0;
virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const;
virtual bool shouldSignExtUnsignedType(QualType Ty) const;
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
};
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGHLSLRuntime.cpp | //===----- CGHLSLRuntime.cpp - Interface to HLSL Runtime ----------------===//
///////////////////////////////////////////////////////////////////////////////
// //
// CGHLSLRuntime.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This provides a class for HLSL code generation. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/HlslTypes.h"
#include "clang/AST/Type.h"
#include "CGHLSLRuntime.h"
using namespace clang;
using namespace CodeGen;
CGHLSLRuntime::~CGHLSLRuntime() {}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenFunction.h | //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the internal per-function state used for llvm translation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
#define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
#include "CGBuilder.h"
#include "CGDebugInfo.h"
#include "CGLoopInfo.h"
#include "CGValue.h"
#include "CodeGenModule.h"
#include "CodeGenPGO.h"
#include "EHScopeStack.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Type.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
namespace CGHLSLMSHelper
{
struct Scope;
}
namespace llvm {
class BasicBlock;
class LLVMContext;
class MDNode;
class Module;
class SwitchInst;
class Twine;
class Value;
class CallSite;
}
namespace clang {
class ASTContext;
class BlockDecl;
class CXXDestructorDecl;
class CXXForRangeStmt;
class CXXTryStmt;
class Decl;
class LabelDecl;
class EnumConstantDecl;
class FunctionDecl;
class FunctionProtoType;
class LabelStmt;
class ObjCContainerDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
class ObjCMethodDecl;
class ObjCImplementationDecl;
class ObjCPropertyImplDecl;
class TargetInfo;
class TargetCodeGenInfo;
class VarDecl;
class ObjCForCollectionStmt;
class ObjCAtTryStmt;
class ObjCAtThrowStmt;
class ObjCAtSynchronizedStmt;
class ObjCAutoreleasePoolStmt;
namespace CodeGen {
class CodeGenTypes;
class CGFunctionInfo;
class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
class BlockFlags;
class BlockFieldFlags;
/// The kind of evaluation to perform on values of a particular
/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
/// CGExprAgg?
///
/// TODO: should vectors maybe be split out into their own thing?
enum TypeEvaluationKind {
TEK_Scalar,
TEK_Complex,
TEK_Aggregate
};
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public CodeGenTypeCache {
CodeGenFunction(const CodeGenFunction &) = delete;
void operator=(const CodeGenFunction &) = delete;
friend class CGCXXABI;
public:
/// A jump destination is an abstract label, branching to which may
/// require a jump out through normal cleanups.
struct JumpDest {
JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
JumpDest(llvm::BasicBlock *Block,
EHScopeStack::stable_iterator Depth,
unsigned Index)
: Block(Block), ScopeDepth(Depth), Index(Index) {}
bool isValid() const { return Block != nullptr; }
llvm::BasicBlock *getBlock() const { return Block; }
EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
unsigned getDestIndex() const { return Index; }
// This should be used cautiously.
void setScopeDepth(EHScopeStack::stable_iterator depth) {
ScopeDepth = depth;
}
private:
llvm::BasicBlock *Block;
EHScopeStack::stable_iterator ScopeDepth;
unsigned Index;
};
CodeGenModule &CGM; // Per-module state.
const TargetInfo &Target;
typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
LoopInfoStack LoopStack;
CGBuilderTy Builder;
/// \brief CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const;
/// CurFuncDecl - Holds the Decl for the current outermost
/// non-closure context.
const Decl *CurFuncDecl;
/// CurCodeDecl - This is the inner-most code context, which includes blocks.
const Decl *CurCodeDecl;
const CGFunctionInfo *CurFnInfo;
QualType FnRetTy;
llvm::Function *CurFn;
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
/// PrologueCleanupDepth - The cleanup depth enclosing all the
/// cleanups associated with the parameters.
EHScopeStack::stable_iterator PrologueCleanupDepth;
/// ReturnBlock - Unified return block.
JumpDest ReturnBlock;
/// ReturnValue - The temporary alloca to hold the return value. This is null
/// iff the function has no return value.
llvm::Value *ReturnValue;
/// AllocaInsertPoint - This is an instruction in the entry block before which
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
/// \brief API for captured statement code generation.
class CGCapturedStmtInfo {
public:
explicit CGCapturedStmtInfo(CapturedRegionKind K = CR_Default)
: Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
explicit CGCapturedStmtInfo(const CapturedStmt &S,
CapturedRegionKind K = CR_Default)
: Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
RecordDecl::field_iterator Field =
S.getCapturedRecordDecl()->field_begin();
for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
E = S.capture_end();
I != E; ++I, ++Field) {
if (I->capturesThis())
CXXThisFieldDecl = *Field;
else if (I->capturesVariable())
CaptureFields[I->getCapturedVar()] = *Field;
}
}
virtual ~CGCapturedStmtInfo();
CapturedRegionKind getKind() const { return Kind; }
virtual void setContextValue(llvm::Value *V) { ThisValue = V; }
// \brief Retrieve the value of the context parameter.
virtual llvm::Value *getContextValue() const { return ThisValue; }
/// \brief Lookup the captured field decl for a variable.
virtual const FieldDecl *lookup(const VarDecl *VD) const {
return CaptureFields.lookup(VD);
}
bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
static bool classof(const CGCapturedStmtInfo *) {
return true;
}
/// \brief Emit the captured statement body.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) {
CGF.incrementProfileCounter(S);
CGF.EmitStmt(S);
}
/// \brief Get the name of the capture helper.
virtual StringRef getHelperName() const { return "__captured_stmt"; }
private:
/// \brief The kind of captured statement being generated.
CapturedRegionKind Kind;
/// \brief Keep the map between VarDecl and FieldDecl.
llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
/// \brief The base address of the captured record, passed in as the first
/// argument of the parallel region function.
llvm::Value *ThisValue;
/// \brief Captured 'this' type.
FieldDecl *CXXThisFieldDecl;
};
CGCapturedStmtInfo *CapturedStmtInfo;
/// \brief RAII for correct setting/restoring of CapturedStmtInfo.
class CGCapturedStmtRAII {
private:
CodeGenFunction &CGF;
CGCapturedStmtInfo *PrevCapturedStmtInfo;
public:
CGCapturedStmtRAII(CodeGenFunction &CGF,
CGCapturedStmtInfo *NewCapturedStmtInfo)
: CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo) {
CGF.CapturedStmtInfo = NewCapturedStmtInfo;
}
~CGCapturedStmtRAII() { CGF.CapturedStmtInfo = PrevCapturedStmtInfo; }
};
/// BoundsChecking - Emit run-time bounds checks. Higher values mean
/// potentially higher performance penalties.
unsigned char BoundsChecking;
/// \brief Sanitizers enabled for this function.
SanitizerSet SanOpts;
/// \brief True if CodeGen currently emits code implementing sanitizer checks.
bool IsSanitizerScope;
/// \brief RAII object to set/unset CodeGenFunction::IsSanitizerScope.
class SanitizerScope {
CodeGenFunction *CGF;
public:
SanitizerScope(CodeGenFunction *CGF);
~SanitizerScope();
};
/// In C++, whether we are code generating a thunk. This controls whether we
/// should emit cleanups.
bool CurFuncIsThunk;
/// In ARC, whether we should autorelease the return value.
bool AutoreleaseResult;
/// Whether we processed a Microsoft-style asm block during CodeGen. These can
/// potentially set the return value.
bool SawAsmBlock;
/// True if the current function is an outlined SEH helper. This can be a
/// finally block or filter expression.
bool IsOutlinedSEHHelper;
const CodeGen::CGBlockInfo *BlockInfo;
llvm::Value *BlockPointer;
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField;
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
EHScopeStack EHStack;
llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack;
/// Header for data within LifetimeExtendedCleanupStack.
struct LifetimeExtendedCleanupHeader {
/// The size of the following cleanup object.
unsigned Size;
/// The kind of cleanup to push: a value from the CleanupKind enumeration.
CleanupKind Kind;
size_t getSize() const { return Size; }
CleanupKind getKind() const { return Kind; }
};
/// i32s containing the indexes of the cleanup destinations.
llvm::AllocaInst *NormalCleanupDest;
unsigned NextCleanupDestIndex;
/// FirstBlockInfo - The head of a singly-linked-list of block layouts.
CGBlockInfo *FirstBlockInfo;
/// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
llvm::BasicBlock *EHResumeBlock;
/// The exception slot. All landing pads write the current exception pointer
/// into this alloca.
llvm::Value *ExceptionSlot;
/// The selector slot. Under the MandatoryCleanup model, all landing pads
/// write the current selector value into this alloca.
llvm::AllocaInst *EHSelectorSlot;
/// A stack of exception code slots. Entering an __except block pushes a slot
/// on the stack and leaving pops one. The __exception_code() intrinsic loads
/// a value from the top of the stack.
SmallVector<llvm::Value *, 1> SEHCodeSlotStack;
/// Value returned by __exception_info intrinsic.
llvm::Value *SEHInfo = nullptr;
/// Emits a landing pad for the current EH stack.
llvm::BasicBlock *EmitLandingPad();
llvm::BasicBlock *getInvokeDestImpl();
template <class T>
typename DominatingValue<T>::saved_type saveValueInCond(T value) {
return DominatingValue<T>::save(*this, value);
}
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
SmallVector<llvm::Value*, 8> ObjCEHValueStack;
/// A class controlling the emission of a finally block.
class FinallyInfo {
/// Where the catchall's edge through the cleanup should go.
JumpDest RethrowDest;
/// A function to call to enter the catch.
llvm::Constant *BeginCatchFn;
/// An i1 variable indicating whether or not the @finally is
/// running for an exception.
llvm::AllocaInst *ForEHVar;
/// An i8* variable into which the exception pointer to rethrow
/// has been saved.
llvm::AllocaInst *SavedExnVar;
public:
void enter(CodeGenFunction &CGF, const Stmt *Finally,
llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
llvm::Constant *rethrowFn);
void exit(CodeGenFunction &CGF);
};
/// Returns true inside SEH __try blocks.
bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); }
/// pushFullExprCleanup - Push a cleanup to be run at the end of the
/// current full-expression. Safe against the possibility that
/// we're currently inside a conditionally-evaluated expression.
template <class T, class... As>
void pushFullExprCleanup(CleanupKind kind, As... A) {
// If we're not in a conditional branch, or if none of the
// arguments requires saving, then use the unconditional cleanup.
if (!isInConditionalBranch())
return EHStack.pushCleanup<T>(kind, A...);
// Stash values in a tuple so we can guarantee the order of saves.
typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
SavedTuple Saved{saveValueInCond(A)...};
typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType;
EHStack.pushCleanupTuple<CleanupType>(kind, Saved);
initFullExprCleanup();
}
/// \brief Queue a cleanup to be pushed after finishing the current
/// full-expression.
template <class T, class... As>
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) {
assert(!isInConditionalBranch() && "can't defer conditional cleanup");
LifetimeExtendedCleanupHeader Header = { sizeof(T), Kind };
size_t OldSize = LifetimeExtendedCleanupStack.size();
LifetimeExtendedCleanupStack.resize(
LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size);
static_assert(sizeof(Header) % llvm::AlignOf<T>::Alignment == 0,
"Cleanup will be allocated on misaligned address");
char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
new (Buffer) LifetimeExtendedCleanupHeader(Header);
new (Buffer + sizeof(Header)) T(A...);
}
/// Set up the last cleaup that was pushed as a conditional
/// full-expression cleanup.
void initFullExprCleanup();
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object destructor of an object of the given type at the
/// given address. Does nothing if T is not a C++ class type with a
/// non-trivial destructor.
void PushDestructorCleanup(QualType T, llvm::Value *Addr);
/// PushDestructorCleanup - Push a cleanup to call the
/// complete-object variant of the given destructor on the object at
/// the given address.
void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
llvm::Value *Addr);
/// PopCleanupBlock - Will pop the cleanup entry on the stack and
/// process all branch fixups.
void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
/// DeactivateCleanupBlock - Deactivates the given cleanup block.
/// The block cannot be reactivated. Pops it if it's the top of the
/// stack.
///
/// \param DominatingIP - An instruction which is known to
/// dominate the current IP (if set) and which lies along
/// all paths of execution between the current IP and the
/// the point at which the cleanup comes into scope.
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
llvm::Instruction *DominatingIP);
/// ActivateCleanupBlock - Activates an initially-inactive cleanup.
/// Cannot be used to resurrect a deactivated cleanup.
///
/// \param DominatingIP - An instruction which is known to
/// dominate the current IP (if set) and which lies along
/// all paths of execution between the current IP and the
/// the point at which the cleanup comes into scope.
void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
llvm::Instruction *DominatingIP);
/// \brief Enters a new scope for capturing cleanups, all of which
/// will be executed once the scope is exited.
class RunCleanupsScope {
EHScopeStack::stable_iterator CleanupStackDepth;
size_t LifetimeExtendedCleanupStackSize;
bool OldDidCallStackSave;
protected:
bool PerformCleanup;
private:
RunCleanupsScope(const RunCleanupsScope &) = delete;
void operator=(const RunCleanupsScope &) = delete;
protected:
CodeGenFunction& CGF;
public:
/// \brief Enter a new cleanup scope.
explicit RunCleanupsScope(CodeGenFunction &CGF)
: PerformCleanup(true), CGF(CGF)
{
CleanupStackDepth = CGF.EHStack.stable_begin();
LifetimeExtendedCleanupStackSize =
CGF.LifetimeExtendedCleanupStack.size();
OldDidCallStackSave = CGF.DidCallStackSave;
CGF.DidCallStackSave = false;
}
/// \brief Exit this cleanup scope, emitting any accumulated
/// cleanups.
~RunCleanupsScope() {
if (PerformCleanup) {
CGF.DidCallStackSave = OldDidCallStackSave;
CGF.PopCleanupBlocks(CleanupStackDepth,
LifetimeExtendedCleanupStackSize);
}
}
/// \brief Determine whether this scope requires any cleanups.
bool requiresCleanups() const {
return CGF.EHStack.stable_begin() != CleanupStackDepth;
}
/// \brief Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
void ForceCleanup() {
assert(PerformCleanup && "Already forced cleanup");
CGF.DidCallStackSave = OldDidCallStackSave;
CGF.PopCleanupBlocks(CleanupStackDepth,
LifetimeExtendedCleanupStackSize);
PerformCleanup = false;
}
};
class LexicalScope : public RunCleanupsScope {
SourceRange Range;
SmallVector<const LabelDecl*, 4> Labels;
LexicalScope *ParentScope;
LexicalScope(const LexicalScope &) = delete;
void operator=(const LexicalScope &) = delete;
public:
/// \brief Enter a new cleanup scope.
explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
: RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
CGF.CurLexicalScope = this;
if (CGDebugInfo *DI = CGF.getDebugInfo())
DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
}
void addLabel(const LabelDecl *label) {
assert(PerformCleanup && "adding label to dead scope?");
Labels.push_back(label);
}
/// \brief Exit this cleanup scope, emitting any accumulated
/// cleanups.
~LexicalScope() {
if (CGDebugInfo *DI = CGF.getDebugInfo())
DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
// If we should perform a cleanup, force them now. Note that
// this ends the cleanup scope before rescoping any labels.
if (PerformCleanup) {
ApplyDebugLocation DL(CGF, Range.getEnd());
ForceCleanup();
}
}
/// \brief Force the emission of cleanups now, instead of waiting
/// until this object is destroyed.
void ForceCleanup() {
CGF.CurLexicalScope = ParentScope;
RunCleanupsScope::ForceCleanup();
if (!Labels.empty())
rescopeLabels();
}
void rescopeLabels();
};
/// \brief The scope used to remap some variables as private in the OpenMP
/// loop body (or other captured region emitted without outlining), and to
/// restore old vars back on exit.
class OMPPrivateScope : public RunCleanupsScope {
typedef llvm::DenseMap<const VarDecl *, llvm::Value *> VarDeclMapTy;
VarDeclMapTy SavedLocals;
VarDeclMapTy SavedPrivates;
private:
OMPPrivateScope(const OMPPrivateScope &) = delete;
void operator=(const OMPPrivateScope &) = delete;
public:
/// \brief Enter a new OpenMP private scope.
explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
/// \brief Registers \a LocalVD variable as a private and apply \a
/// PrivateGen function for it to generate corresponding private variable.
/// \a PrivateGen returns an address of the generated private variable.
/// \return true if the variable is registered as private, false if it has
/// been privatized already.
bool
addPrivate(const VarDecl *LocalVD,
const std::function<llvm::Value *()> &PrivateGen) {
assert(PerformCleanup && "adding private to dead scope");
if (SavedLocals.count(LocalVD) > 0) return false;
SavedLocals[LocalVD] = CGF.LocalDeclMap.lookup(LocalVD);
CGF.LocalDeclMap.erase(LocalVD);
SavedPrivates[LocalVD] = PrivateGen();
CGF.LocalDeclMap[LocalVD] = SavedLocals[LocalVD];
return true;
}
/// \brief Privatizes local variables previously registered as private.
/// Registration is separate from the actual privatization to allow
/// initializers use values of the original variables, not the private one.
/// This is important, for example, if the private variable is a class
/// variable initialized by a constructor that references other private
/// variables. But at initialization original variables must be used, not
/// private copies.
/// \return true if at least one variable was privatized, false otherwise.
bool Privatize() {
for (auto VDPair : SavedPrivates) {
CGF.LocalDeclMap[VDPair.first] = VDPair.second;
}
SavedPrivates.clear();
return !SavedLocals.empty();
}
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
// Remap vars back to the original values.
for (auto I : SavedLocals) {
CGF.LocalDeclMap[I.first] = I.second;
}
SavedLocals.clear();
}
/// \brief Exit scope - all the mapped variables are restored.
~OMPPrivateScope() {
if (PerformCleanup)
ForceCleanup();
}
};
// HLSL Change Begins
/// \brief The scope used to add temp variables for out parameter in HLSL.
class HLSLOutParamScope : public RunCleanupsScope {
typedef llvm::DenseSet<const VarDecl *> VarDeclSetTy;
VarDeclSetTy SavedLocals;
private:
HLSLOutParamScope(const HLSLOutParamScope &) = delete;
void operator=(const HLSLOutParamScope &) = delete;
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
// Remap vars back to the original values.
for (auto I : SavedLocals) {
CGF.LocalDeclMap.erase(I);
}
SavedLocals.clear();
}
public:
/// \brief Enter a new OpenMP private scope.
explicit HLSLOutParamScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
/// \brief Registers \a LocalVD variable as TmpArg
void addTemp(const VarDecl *LocalVD, llvm::Value *TmpArg) {
CGF.LocalDeclMap[LocalVD] = TmpArg;
SavedLocals.insert(LocalVD);
}
/// \brief Exit scope - all the mapped variables are restored.
~HLSLOutParamScope() { ForceCleanup(); }
};
// HLSL Change Ends
/// \brief Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
/// \brief Takes the old cleanup stack size and emits the cleanup blocks
/// that have been added, then adds all lifetime-extended cleanups from
/// the given position to the stack.
void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
size_t OldLifetimeExtendedStackSize);
void ResolveBranchFixups(llvm::BasicBlock *Target);
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
return JumpDest(Target,
EHStack.getInnermostNormalCleanup(),
NextCleanupDestIndex++);
}
/// The given basic block lies in the current EH scope, but may be a
/// target of a potentially scope-crossing jump; get a stable handle
/// to which we can perform this jump later.
JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
return getJumpDestInCurrentScope(createBasicBlock(Name));
}
/// EmitBranchThroughCleanup - Emit a branch from the current insert
/// block through the normal cleanup handling code (if any) and then
/// on to \arg Dest.
// HLSL Change - allow to use pre-generated branch
void EmitBranchThroughCleanup(JumpDest Dest, llvm::BranchInst *PreExistingBr = nullptr);
/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
/// specified destination obviously has no cleanups to run. 'false' is always
/// a conservatively correct answer for this method.
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
/// popCatchScope - Pops the catch scope at the top of the EHScope
/// stack, emitting any required code (other than the catch handlers
/// themselves).
void popCatchScope();
llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
/// An object to manage conditionally-evaluated expressions.
class ConditionalEvaluation {
llvm::BasicBlock *StartBB;
public:
ConditionalEvaluation(CodeGenFunction &CGF)
: StartBB(CGF.Builder.GetInsertBlock()) {}
void begin(CodeGenFunction &CGF) {
assert(CGF.OutermostConditional != this);
if (!CGF.OutermostConditional)
CGF.OutermostConditional = this;
}
void end(CodeGenFunction &CGF) {
assert(CGF.OutermostConditional != nullptr);
if (CGF.OutermostConditional == this)
CGF.OutermostConditional = nullptr;
}
/// Returns a block which will be executed prior to each
/// evaluation of the conditional code.
llvm::BasicBlock *getStartingBlock() const {
return StartBB;
}
};
/// isInConditionalBranch - Return true if we're currently emitting
/// one branch or the other of a conditional expression.
bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
assert(isInConditionalBranch());
llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
new llvm::StoreInst(value, addr, &block->back());
}
/// An RAII object to record that we're evaluating a statement
/// expression.
class StmtExprEvaluation {
CodeGenFunction &CGF;
/// We have to save the outermost conditional: cleanups in a
/// statement expression aren't conditional just because the
/// StmtExpr is.
ConditionalEvaluation *SavedOutermostConditional;
public:
StmtExprEvaluation(CodeGenFunction &CGF)
: CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
CGF.OutermostConditional = nullptr;
}
~StmtExprEvaluation() {
CGF.OutermostConditional = SavedOutermostConditional;
CGF.EnsureInsertPoint();
}
};
/// An object which temporarily prevents a value from being
/// destroyed by aggressive peephole optimizations that assume that
/// all uses of a value have been realized in the IR.
class PeepholeProtection {
llvm::Instruction *Inst;
friend class CodeGenFunction;
public:
PeepholeProtection() : Inst(nullptr) {}
};
/// A non-RAII class containing all the information about a bound
/// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
/// this which makes individual mappings very simple; using this
/// class directly is useful when you have a variable number of
/// opaque values or don't want the RAII functionality for some
/// reason.
class OpaqueValueMappingData {
const OpaqueValueExpr *OpaqueValue;
bool BoundLValue;
CodeGenFunction::PeepholeProtection Protection;
OpaqueValueMappingData(const OpaqueValueExpr *ov,
bool boundLValue)
: OpaqueValue(ov), BoundLValue(boundLValue) {}
public:
OpaqueValueMappingData() : OpaqueValue(nullptr) {}
static bool shouldBindAsLValue(const Expr *expr) {
// gl-values should be bound as l-values for obvious reasons.
// Records should be bound as l-values because IR generation
// always keeps them in memory. Expressions of function type
// act exactly like l-values but are formally required to be
// r-values in C.
return expr->isGLValue() ||
expr->getType()->isFunctionType() ||
hasAggregateEvaluationKind(expr->getType());
}
static OpaqueValueMappingData bind(CodeGenFunction &CGF,
const OpaqueValueExpr *ov,
const Expr *e) {
if (shouldBindAsLValue(ov))
return bind(CGF, ov, CGF.EmitLValue(e));
return bind(CGF, ov, CGF.EmitAnyExpr(e));
}
static OpaqueValueMappingData bind(CodeGenFunction &CGF,
const OpaqueValueExpr *ov,
const LValue &lv) {
assert(shouldBindAsLValue(ov));
CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
return OpaqueValueMappingData(ov, true);
}
static OpaqueValueMappingData bind(CodeGenFunction &CGF,
const OpaqueValueExpr *ov,
const RValue &rv) {
assert(!shouldBindAsLValue(ov));
CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
OpaqueValueMappingData data(ov, false);
// Work around an extremely aggressive peephole optimization in
// EmitScalarConversion which assumes that all other uses of a
// value are extant.
data.Protection = CGF.protectFromPeepholes(rv);
return data;
}
bool isValid() const { return OpaqueValue != nullptr; }
void clear() { OpaqueValue = nullptr; }
void unbind(CodeGenFunction &CGF) {
assert(OpaqueValue && "no data to unbind!");
if (BoundLValue) {
CGF.OpaqueLValues.erase(OpaqueValue);
} else {
CGF.OpaqueRValues.erase(OpaqueValue);
CGF.unprotectFromPeepholes(Protection);
}
}
};
/// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
class OpaqueValueMapping {
CodeGenFunction &CGF;
OpaqueValueMappingData Data;
public:
static bool shouldBindAsLValue(const Expr *expr) {
return OpaqueValueMappingData::shouldBindAsLValue(expr);
}
/// Build the opaque value mapping for the given conditional
/// operator if it's the GNU ?: extension. This is a common
/// enough pattern that the convenience operator is really
/// helpful.
///
OpaqueValueMapping(CodeGenFunction &CGF,
const AbstractConditionalOperator *op) : CGF(CGF) {
if (isa<ConditionalOperator>(op))
// Leave Data empty.
return;
const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
e->getCommon());
}
OpaqueValueMapping(CodeGenFunction &CGF,
const OpaqueValueExpr *opaqueValue,
LValue lvalue)
: CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
}
OpaqueValueMapping(CodeGenFunction &CGF,
const OpaqueValueExpr *opaqueValue,
RValue rvalue)
: CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
}
void pop() {
Data.unbind(CGF);
Data.clear();
}
~OpaqueValueMapping() {
if (Data.isValid()) Data.unbind(CGF);
}
};
/// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
/// number that holds the value.
std::pair<llvm::Type *, unsigned>
getByRefValueLLVMField(const ValueDecl *VD) const;
/// BuildBlockByrefAddress - Computes address location of the
/// variable which is declared as __block.
llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
const VarDecl *V);
private:
CGDebugInfo *DebugInfo;
bool DisableDebugInfo;
/// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
/// calling llvm.stacksave for multiple VLAs in the same scope.
bool DidCallStackSave;
/// IndirectBranch - The first time an indirect goto is seen we create a block
/// with an indirect branch. Every time we see the address of a label taken,
/// we add the label to the indirect goto. Every subsequent indirect goto is
/// codegen'd as a jump to the IndirectBranch's basic block.
llvm::IndirectBrInst *IndirectBranch;
/// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
/// decls.
typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
DeclMapTy LocalDeclMap;
/// Track escaped local variables with auto storage. Used during SEH
/// outlining to produce a call to llvm.localescape.
llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals;
/// LabelMap - This keeps track of the LLVM basic block for each C label.
llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
// BreakContinueStack - This keeps track of where break and continue
// statements should jump to.
struct BreakContinue {
BreakContinue(JumpDest Break, JumpDest Continue)
: BreakBlock(Break), ContinueBlock(Continue) {}
JumpDest BreakBlock;
JumpDest ContinueBlock;
};
SmallVector<BreakContinue, 8> BreakContinueStack;
CodeGenPGO PGO;
/// Calculate branch weights appropriate for PGO data
llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount);
llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights);
llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
uint64_t LoopCount);
public:
/// Increment the profiler's counter for the given statement.
void incrementProfileCounter(const Stmt *S) {
if (CGM.getCodeGenOpts().ProfileInstrGenerate)
PGO.emitCounterIncrement(Builder, S);
PGO.setCurrentStmt(S);
}
/// Get the profiler's count for the given statement.
uint64_t getProfileCount(const Stmt *S) {
Optional<uint64_t> Count = PGO.getStmtCount(S);
if (!Count.hasValue())
return 0;
return *Count;
}
/// Set the profiler's current count.
void setCurrentProfileCount(uint64_t Count) {
PGO.setCurrentRegionCount(Count);
}
/// Get the profiler's current count. This is generally the count for the most
/// recently incremented counter.
uint64_t getCurrentProfileCount() {
return PGO.getCurrentRegionCount();
}
private:
/// SwitchInsn - This is nearest current switch instruction. It is null if
/// current context is not in a switch.
llvm::SwitchInst *SwitchInsn;
/// The branch weights of SwitchInsn when doing instrumentation based PGO.
SmallVector<uint64_t, 16> *SwitchWeights;
/// CaseRangeBlock - This block holds if condition check for last case
/// statement range in current switch instruction.
llvm::BasicBlock *CaseRangeBlock;
/// OpaqueLValues - Keeps track of the current set of opaque value
/// expressions.
llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
// VLASizeMap - This keeps track of the associated size for each VLA type.
// We track this by the size expression rather than the type itself because
// in certain situations, like a const qualifier applied to an VLA typedef,
// multiple VLA types can share the same size expression.
// FIXME: Maybe this could be a stack of maps that is pushed/popped as we
// enter/leave scopes.
llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
/// A block containing a single 'unreachable' instruction. Created
/// lazily by getUnreachableBlock().
llvm::BasicBlock *UnreachableBlock;
/// Counts of the number return expressions in the function.
unsigned NumReturnExprs;
/// Count the number of simple (constant) return expressions in the function.
unsigned NumSimpleReturnExprs;
/// The last regular (non-return) debug location (breakpoint) in the function.
SourceLocation LastStopPoint;
public:
/// A scope within which we are constructing the fields of an object which
/// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
/// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
class FieldConstructionScope {
public:
FieldConstructionScope(CodeGenFunction &CGF, llvm::Value *This)
: CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
CGF.CXXDefaultInitExprThis = This;
}
~FieldConstructionScope() {
CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
}
private:
CodeGenFunction &CGF;
llvm::Value *OldCXXDefaultInitExprThis;
};
/// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
/// is overridden to be the object under construction.
class CXXDefaultInitExprScope {
public:
CXXDefaultInitExprScope(CodeGenFunction &CGF)
: CGF(CGF), OldCXXThisValue(CGF.CXXThisValue) {
CGF.CXXThisValue = CGF.CXXDefaultInitExprThis;
}
~CXXDefaultInitExprScope() {
CGF.CXXThisValue = OldCXXThisValue;
}
public:
CodeGenFunction &CGF;
llvm::Value *OldCXXThisValue;
};
private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *CXXABIThisDecl;
llvm::Value *CXXABIThisValue;
llvm::Value *CXXThisValue;
/// The value of 'this' to use when evaluating CXXDefaultInitExprs within
/// this expression.
llvm::Value *CXXDefaultInitExprThis;
/// CXXStructorImplicitParamDecl - When generating code for a constructor or
/// destructor, this will hold the implicit argument (e.g. VTT).
ImplicitParamDecl *CXXStructorImplicitParamDecl;
llvm::Value *CXXStructorImplicitParamValue;
/// OutermostConditional - Points to the outermost active
/// conditional control. This is used so that we know if a
/// temporary should be destroyed conditionally.
ConditionalEvaluation *OutermostConditional;
/// The current lexical scope.
LexicalScope *CurLexicalScope;
/// The current source location that should be used for exception
/// handling code.
SourceLocation CurEHLocation;
/// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
/// type as well as the field number that contains the actual data.
llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
unsigned> > ByRefValueInfo;
llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
/// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
/// In the kernel metadata node, reference the kernel function and metadata
/// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
/// - A node for the vec_type_hint(<type>) qualifier contains string
/// "vec_type_hint", an undefined value of the <type> data type,
/// and a Boolean that is true if the <type> is integer and signed.
/// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
/// "work_group_size_hint", and three 32-bit integers X, Y and Z.
/// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
/// "reqd_work_group_size", and three 32-bit integers X, Y and Z.
void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
llvm::Function *Fn);
public:
CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
~CodeGenFunction();
CodeGenTypes &getTypes() const { return CGM.getTypes(); }
ASTContext &getContext() const { return CGM.getContext(); }
CGDebugInfo *getDebugInfo() {
if (DisableDebugInfo)
return nullptr;
return DebugInfo;
}
void disableDebugInfo() { DisableDebugInfo = true; }
void enableDebugInfo() { DisableDebugInfo = false; }
bool shouldUseFusedARCCalls() {
return CGM.getCodeGenOpts().OptimizationLevel == 0;
}
const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
/// Returns a pointer to the function's exception object and selector slot,
/// which is assigned in every landing pad.
llvm::Value *getExceptionSlot();
llvm::Value *getEHSelectorSlot();
/// Returns the contents of the function's exception object and selector
/// slots.
llvm::Value *getExceptionFromSlot();
llvm::Value *getSelectorFromSlot();
llvm::Value *getNormalCleanupDestSlot();
llvm::BasicBlock *getUnreachableBlock() {
if (!UnreachableBlock) {
UnreachableBlock = createBasicBlock("unreachable");
new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
}
return UnreachableBlock;
}
llvm::BasicBlock *getInvokeDest() {
#if 0 // HLSL Change - no exception handling
if (!EHStack.requiresLandingPad()) return nullptr;
return getInvokeDestImpl();
#else
return nullptr;
#endif // HLSL Change - no exception handling
}
bool currentFunctionUsesSEHTry() const {
const auto *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
return FD && FD->usesSEHTry();
}
const TargetInfo &getTarget() const { return Target; }
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
//===--------------------------------------------------------------------===//
// Cleanups
//===--------------------------------------------------------------------===//
typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEndPointer,
QualType elementType,
Destroyer *destroyer);
void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
llvm::Value *arrayEnd,
QualType elementType,
Destroyer *destroyer);
void pushDestroy(QualType::DestructionKind dtorKind,
llvm::Value *addr, QualType type);
void pushEHDestroy(QualType::DestructionKind dtorKind,
llvm::Value *addr, QualType type);
void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
Destroyer *destroyer, bool useEHCleanupForArray);
void pushLifetimeExtendedDestroy(CleanupKind kind, llvm::Value *addr,
QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
void pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
llvm::Value *CompletePtr,
QualType ElementType);
void pushStackRestore(CleanupKind kind, llvm::Value *SPMem);
void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
llvm::Function *generateDestroyHelper(llvm::Constant *addr, QualType type,
Destroyer *destroyer,
bool useEHCleanupForArray,
const VarDecl *VD);
void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
QualType type, Destroyer *destroyer,
bool checkZeroLength, bool useEHCleanup);
Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
/// Determines whether an EH cleanup is required to destroy a type
/// with the given destruction kind.
bool needsEHCleanup(QualType::DestructionKind kind) {
switch (kind) {
case QualType::DK_none:
return false;
case QualType::DK_cxx_destructor:
case QualType::DK_objc_weak_lifetime:
return getLangOpts().Exceptions;
case QualType::DK_objc_strong_lifetime:
return getLangOpts().Exceptions &&
CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
}
llvm_unreachable("bad destruction kind");
}
CleanupKind getCleanupKind(QualType::DestructionKind kind) {
return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
}
//===--------------------------------------------------------------------===//
// Objective-C
//===--------------------------------------------------------------------===//
void GenerateObjCMethod(const ObjCMethodDecl *OMD);
void StartObjCMethod(const ObjCMethodDecl *MD, const ObjCContainerDecl *CD);
/// GenerateObjCGetter - Synthesize an Objective-C property getter function.
void GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
const ObjCMethodDecl *GetterMothodDecl,
llvm::Constant *AtomicHelperFn);
void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD, bool ctor);
/// GenerateObjCSetter - Synthesize an Objective-C property setter function
/// for the given property.
void GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
llvm::Constant *AtomicHelperFn);
bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
bool IvarTypeWithAggrGCObjects(QualType Ty);
//===--------------------------------------------------------------------===//
// Block Bits
//===--------------------------------------------------------------------===//
llvm::Value *EmitBlockLiteral(const BlockExpr *);
llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
static void destroyBlockInfos(CGBlockInfo *info);
llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
const CGBlockInfo &Info,
llvm::StructType *,
llvm::Constant *BlockVarLayout);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
const CGBlockInfo &Info,
const DeclMapTy &ldm,
bool IsLambdaConversionToBlock);
llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID);
llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID);
llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
class AutoVarEmission;
void emitByrefStructureInit(const AutoVarEmission &emission);
void enterByrefCleanup(const AutoVarEmission &emission);
llvm::Value *LoadBlockStruct() {
assert(BlockPointer && "no block pointer set!");
return BlockPointer;
}
void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
void AllocateBlockDecl(const DeclRefExpr *E);
llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
llvm::Type *BuildByRefType(const VarDecl *var);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo);
/// \brief Emit code for the start of a function.
/// \param Loc The location to be associated with the function.
/// \param StartLoc The location of the function body.
void StartFunction(GlobalDecl GD,
QualType RetTy,
llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
const FunctionArgList &Args,
SourceLocation Loc = SourceLocation(),
SourceLocation StartLoc = SourceLocation());
void EmitConstructorBody(FunctionArgList &Args);
void EmitDestructorBody(FunctionArgList &Args);
void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
CallArgList &CallArgs);
void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
void EmitLambdaBlockInvokeBody();
void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
void EmitAsanPrologueOrEpilogue(bool Prologue);
/// \brief Emit the unified return block, trying to avoid its emission when
/// possible.
/// \return The debug location of the user written return statement if the
/// return block is is avoided.
llvm::DebugLoc EmitReturnBlock();
/// FinishFunction - Complete IR generation of the current function. It is
/// legal to call this function even if there is no current insertion point.
void FinishFunction(SourceLocation EndLoc=SourceLocation());
void StartThunk(llvm::Function *Fn, GlobalDecl GD,
const CGFunctionInfo &FnInfo);
void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk);
/// Emit a musttail call for a thunk with a potentially adjusted this pointer.
void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
llvm::Value *Callee);
/// Generate a thunk for the given method.
void generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
GlobalDecl GD, const ThunkInfo &Thunk);
llvm::Function *GenerateVarArgsThunk(llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
GlobalDecl GD, const ThunkInfo &Thunk);
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
ArrayRef<VarDecl *> ArrayIndexes);
/// InitializeVTablePointer - Initialize the vtable pointer of the given
/// subobject.
///
void InitializeVTablePointer(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
CharUnits OffsetFromNearestVBase,
const CXXRecordDecl *VTableClass);
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
void InitializeVTablePointers(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
CharUnits OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
const CXXRecordDecl *VTableClass,
VisitedVirtualBasesSetTy& VBases);
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
/// GetVTablePtr - Return the Value of the vtable pointer member pointed
/// to by This.
llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
enum CFITypeCheckKind {
CFITCK_VCall,
CFITCK_NVCall,
CFITCK_DerivedCast,
CFITCK_UnrelatedCast,
};
/// \brief Derived is the presumed address of an object of type T after a
/// cast. If T is a polymorphic class type, emit a check that the virtual
/// table for Derived belongs to a class derived from T.
void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
bool MayBeNull, CFITypeCheckKind TCK,
SourceLocation Loc);
/// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
/// If vptr CFI is enabled, emit a check that VTable is valid.
void EmitVTablePtrCheckForCall(const CXXMethodDecl *MD, llvm::Value *VTable,
CFITypeCheckKind TCK, SourceLocation Loc);
/// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
/// RD using llvm.bitset.test.
void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable,
CFITypeCheckKind TCK, SourceLocation Loc);
/// CanDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
/// expr can be devirtualized.
bool CanDevirtualizeMemberFunctionCall(const Expr *Base,
const CXXMethodDecl *MD);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
/// given phase of destruction for a destructor. The end result
/// should call destructors on members and base classes in reverse
/// order of their construction.
void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
/// ShouldInstrumentFunction - Return true if the current function should be
/// instrumented with __cyg_profile_func_* calls
bool ShouldInstrumentFunction();
/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
/// instrumentation function with the current function and the call site, if
/// function instrumentation is enabled.
void EmitFunctionInstrumentation(const char *Fn);
/// EmitMCountInstrumentation - Emit call to .mcount.
void EmitMCountInstrumentation();
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
/// LLVM function arguments.
void EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args);
/// EmitFunctionEpilog - Emit the target specific LLVM code to return the
/// given temporary.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
SourceLocation EndLoc);
/// EmitStartEHSpec - Emit the start of the exception spec.
void EmitStartEHSpec(const Decl *D);
/// EmitEndEHSpec - Emit the end of the exception spec.
void EmitEndEHSpec(const Decl *D);
/// getTerminateLandingPad - Return a landing pad that just calls terminate.
llvm::BasicBlock *getTerminateLandingPad();
/// getTerminateHandler - Return a handler (not a landing pad, just
/// a catch handler) that just calls terminate. This is used when
/// a terminate scope encloses a try.
llvm::BasicBlock *getTerminateHandler();
llvm::Type *ConvertTypeForMem(QualType T);
llvm::Type *ConvertType(QualType T);
llvm::Type *ConvertType(const TypeDecl *T) {
return ConvertType(getContext().getTypeDeclType(T));
}
/// LoadObjCSelf - Load the value of self. This function is only valid while
/// generating code for an Objective-C method.
llvm::Value *LoadObjCSelf();
/// TypeOfSelfObject - Return type of object that this self represents.
QualType TypeOfSelfObject();
/// hasAggregateLLVMType - Return true if the specified AST type will map into
/// an aggregate LLVM type or is void.
static TypeEvaluationKind getEvaluationKind(QualType T);
static bool hasScalarEvaluationKind(QualType T) {
return getEvaluationKind(T) == TEK_Scalar;
}
static bool hasAggregateEvaluationKind(QualType T) {
return getEvaluationKind(T) == TEK_Aggregate;
}
/// createBasicBlock - Create an LLVM basic block.
llvm::BasicBlock *createBasicBlock(const Twine &name = "",
llvm::Function *parent = nullptr,
llvm::BasicBlock *before = nullptr) {
#ifdef NDEBUG
return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
#else
return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
#endif
}
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
/// label maps to.
JumpDest getJumpDestForLabel(const LabelDecl *S);
/// SimplifyForwardingBlocks - If the given basic block is only a branch to
/// another basic block, simplify it. This assumes that no other code could
/// potentially reference the basic block.
///
/// HLSL CHANGE: Pass the loop scope to update the simplified block pointer.
///
void SimplifyForwardingBlocks(llvm::BasicBlock *BB, CGHLSLMSHelper::Scope *LoopScope);
/// EmitBlock - Emit the given block \arg BB and set it as the insert point,
/// adding a fall-through branch from the current insert block if
/// necessary. It is legal to call this function even if there is no current
/// insertion point.
///
/// IsFinished - If true, indicates that the caller has finished emitting
/// branches to the given block and does not expect to emit code into it. This
/// means the block can be ignored if it is unreachable.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
/// EmitBlockAfterUses - Emit the given block somewhere hopefully
/// near its uses, and leave the insertion point in it.
void EmitBlockAfterUses(llvm::BasicBlock *BB);
/// EmitBranch - Emit a branch to the specified basic block from the current
/// insert block, taking care to avoid creation of branches from dummy
/// blocks. It is legal to call this function even if there is no current
/// insertion point.
///
/// This function clears the current insertion point. The caller should follow
/// calls to this function with calls to Emit*Block prior to generation new
/// code.
void EmitBranch(llvm::BasicBlock *Block);
/// HaveInsertPoint - True if an insertion point is defined. If not, this
/// indicates that the current code being emitted is unreachable.
bool HaveInsertPoint() const {
return Builder.GetInsertBlock() != nullptr;
}
/// EnsureInsertPoint - Ensure that an insertion point is defined so that
/// emitted IR has a place to go. Note that by definition, if this function
/// creates a block then that block is unreachable; callers may do better to
/// detect when no insertion point is defined and simply skip IR generation.
void EnsureInsertPoint() {
if (!HaveInsertPoint())
EmitBlock(createBasicBlock());
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified stmt yet.
void ErrorUnsupported(const Stmt *S, const char *Type);
//===--------------------------------------------------------------------===//
// Helpers
//===--------------------------------------------------------------------===//
LValue MakeAddrLValue(llvm::Value *V, QualType T,
CharUnits Alignment = CharUnits()) {
return LValue::MakeAddr(V, T, Alignment, getContext(),
CGM.getTBAAInfo(T));
}
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T);
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block. The caller is responsible for setting an appropriate alignment on
/// the alloca.
llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
const Twine &Name = "tmp");
/// InitTempAlloca - Provide an initial value for the given alloca.
void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
/// value needs to be stored into an alloca (for example, to avoid explicit
/// PHI construction), but the type is the IR type, not the type appropriate
/// for storing in memory.
llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignment.
llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
/// CreateAggTemp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
CharUnits Alignment = getContext().getTypeAlignInChars(T);
return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
T.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
}
/// CreateInAllocaTmp - Create a temporary memory object for the given
/// aggregate type.
AggValueSlot CreateInAllocaTmp(QualType T, const Twine &Name = "inalloca");
/// Emit a cast to void* in the appropriate address space.
llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
/// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
void EmitIgnoredExpr(const Expr *E);
/// EmitAnyExpr - Emit code to compute the specified expression which can have
/// any type. The result is returned as an RValue struct. If this is an
/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
/// the result should be returned.
///
/// \param ignoreResult True if the resulting value isn't used.
RValue EmitAnyExpr(const Expr *E,
AggValueSlot aggSlot = AggValueSlot::ignored(),
bool ignoreResult = false);
// EmitVAListRef - Emit a "reference" to a va_list; this is either the address
// or the value of the expression, depending on how va_list is defined.
llvm::Value *EmitVAListRef(const Expr *E);
/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
/// always be accessible even if no aggregate location is provided.
RValue EmitAnyExprToTemp(const Expr *E);
/// EmitAnyExprToMem - Emits the code necessary to evaluate an
/// arbitrary expression into the given memory location.
void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
Qualifiers Quals, bool IsInitializer);
void EmitAnyExprToExn(const Expr *E, llvm::Value *Addr);
/// EmitExprAsInit - Emits the code necessary to initialize a
/// location in memory with the given initializer.
void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue,
bool capturedByInit);
/// hasVolatileMember - returns true if aggregate type has a volatile
/// member.
bool hasVolatileMember(QualType T) {
if (const RecordType *RT = T->getAs<RecordType>()) {
const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
return RD->hasVolatileMember();
}
return false;
}
/// EmitAggregateCopy - Emit an aggregate assignment.
///
/// The difference to EmitAggregateCopy is that tail padding is not copied.
/// This is required for correctness when assigning non-POD structures in C++.
void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType EltTy) {
bool IsVolatile = hasVolatileMember(EltTy);
EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, CharUnits::Zero(),
true);
}
void EmitAggregateCopyCtor(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType DestTy, QualType SrcTy) {
CharUnits DestTypeAlign = getContext().getTypeAlignInChars(DestTy);
CharUnits SrcTypeAlign = getContext().getTypeAlignInChars(SrcTy);
EmitAggregateCopy(DestPtr, SrcPtr, SrcTy, /*IsVolatile=*/false,
std::min(DestTypeAlign, SrcTypeAlign),
/*IsAssignment=*/false);
}
/// EmitAggregateCopy - Emit an aggregate copy.
///
/// \param isVolatile - True iff either the source or the destination is
/// volatile.
/// \param isAssignment - If false, allow padding to be copied. This often
/// yields more efficient.
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType EltTy, bool isVolatile=false,
CharUnits Alignment = CharUnits::Zero(),
bool isAssignment = false);
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
void StartBlock(const char *N);
/// GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
llvm::Value *Res = LocalDeclMap[VD];
assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
return Res;
}
/// getOpaqueLValueMapping - Given an opaque value expression (which
/// must be mapped to an l-value), return its mapping.
const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
assert(OpaqueValueMapping::shouldBindAsLValue(e));
llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
it = OpaqueLValues.find(e);
assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
return it->second;
}
/// getOpaqueRValueMapping - Given an opaque value expression (which
/// must be mapped to an r-value), return its mapping.
const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
assert(!OpaqueValueMapping::shouldBindAsLValue(e));
llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
it = OpaqueRValues.find(e);
assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
return it->second;
}
/// getAccessedFieldNo - Given an encoded value and a result number, return
/// the input field number being accessed.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
llvm::BasicBlock *GetIndirectGotoBlock();
/// EmitNullInitialization - Generate code to set a value of the given type to
/// null, If the type contains data member pointers, they will be initialized
/// to -1 in accordance with the Itanium C++ ABI.
void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
// EmitVAArg - Generate code to get an argument from the passed in pointer
// and update it accordingly. The return value is a pointer to the argument.
// FIXME: We should be able to get rid of this method and use the va_arg
// instruction in LLVM instead once it works well enough.
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
/// emitArrayLength - Compute the length of an array, even if it's a
/// VLA, and drill down to the base element type.
llvm::Value *emitArrayLength(const ArrayType *arrayType,
QualType &baseType,
llvm::Value *&addr);
/// EmitVLASize - Capture all the sizes for the VLA expressions in
/// the given variably-modified type and store them in the VLASizeMap.
///
/// This function can be called with a null (unreachable) insert point.
void EmitVariablyModifiedType(QualType Ty);
/// getVLASize - Returns an LLVM value that corresponds to the size,
/// in non-variably-sized elements, of a variable length array type,
/// plus that largest non-variably-sized element type. Assumes that
/// the type has already been emitted with EmitVariablyModifiedType.
std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
/// LoadCXXThis - Load the value of 'this'. This function is only valid while
/// generating code for an C++ member function.
llvm::Value *LoadCXXThis() {
assert(CXXThisValue && "no 'this' value for this function");
return CXXThisValue;
}
/// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
/// virtual bases.
// FIXME: Every place that calls LoadCXXVTT is something
// that needs to be abstracted properly.
llvm::Value *LoadCXXVTT() {
assert(CXXStructorImplicitParamValue && "no VTT value for this function");
return CXXStructorImplicitParamValue;
}
/// LoadCXXStructorImplicitParam - Load the implicit parameter
/// for a constructor/destructor.
llvm::Value *LoadCXXStructorImplicitParam() {
assert(CXXStructorImplicitParamValue &&
"no implicit argument value for this function");
return CXXStructorImplicitParamValue;
}
/// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
/// complete class to the given direct base.
llvm::Value *
GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
const CXXRecordDecl *Base,
bool BaseIsVirtual);
/// GetAddressOfBaseClass - This function will add the necessary delta to the
/// load of 'this' and returns address of the base class.
llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd,
bool NullCheckValue, SourceLocation Loc);
llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
const CXXRecordDecl *Derived,
CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd,
bool NullCheckValue);
/// GetVTTParameter - Return the VTT parameter that should be passed to a
/// base constructor/destructor with virtual bases.
/// FIXME: VTTs are Itanium ABI-specific, so the definition should move
/// to ItaniumCXXABI.cpp together with all the references to VTT.
llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
bool Delegating);
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args,
SourceLocation Loc);
// It's important not to confuse this and the previous function. Delegating
// constructors are the C++0x feature. The constructor delegate optimization
// is used to reduce duplication in the base and complete consturctors where
// they are substantially the same.
void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
const FunctionArgList &Args);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
bool ForVirtualBase, bool Delegating,
llvm::Value *This, const CXXConstructExpr *E);
void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
llvm::Value *This, llvm::Value *Src,
const CXXConstructExpr *E);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
const ConstantArrayType *ArrayTy,
llvm::Value *ArrayPtr,
const CXXConstructExpr *E,
bool ZeroInitialization = false);
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
llvm::Value *NumElements,
llvm::Value *ArrayPtr,
const CXXConstructExpr *E,
bool ZeroInitialization = false);
static Destroyer destroyCXXObject;
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
bool ForVirtualBase, bool Delegating,
llvm::Value *This);
void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
llvm::Type *ElementTy, llvm::Value *NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie);
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
llvm::Value *Ptr);
llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
QualType DeleteTy);
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
const Expr *Arg, bool IsDelete);
llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
/// \brief Situations in which we might emit a check for the suitability of a
/// pointer or glvalue.
enum TypeCheckKind {
/// Checking the operand of a load. Must be suitably sized and aligned.
TCK_Load,
/// Checking the destination of a store. Must be suitably sized and aligned.
TCK_Store,
/// Checking the bound value in a reference binding. Must be suitably sized
/// and aligned, but is not required to refer to an object (until the
/// reference is used), per core issue 453.
TCK_ReferenceBinding,
/// Checking the object expression in a non-static data member access. Must
/// be an object within its lifetime.
TCK_MemberAccess,
/// Checking the 'this' pointer for a call to a non-static member function.
/// Must be an object within its lifetime.
TCK_MemberCall,
/// Checking the 'this' pointer for a constructor call.
TCK_ConstructorCall,
/// Checking the operand of a static_cast to a derived pointer type. Must be
/// null or an object within its lifetime.
TCK_DowncastPointer,
/// Checking the operand of a static_cast to a derived reference type. Must
/// be an object within its lifetime.
TCK_DowncastReference,
/// Checking the operand of a cast to a base object. Must be suitably sized
/// and aligned.
TCK_Upcast,
/// Checking the operand of a cast to a virtual base object. Must be an
/// object within its lifetime.
TCK_UpcastToVirtualBase
};
/// \brief Whether any type-checking sanitizers are enabled. If \c false,
/// calls to EmitTypeCheck can be skipped.
bool sanitizePerformTypeCheck() const;
/// \brief Emit a check that \p V is the address of storage of the
/// appropriate size and alignment for an object of type \p Type.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
QualType Type, CharUnits Alignment = CharUnits::Zero(),
bool SkipNullCheck = false);
/// \brief Emit a check that \p Base points into an array object, which
/// we can access at index \p Index. \p Accessed should be \c false if we
/// this expression is used as an lvalue, for instance in "&Arr[Idx]".
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
QualType IndexType, bool Accessed);
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment,
llvm::Value *OffsetValue = nullptr) {
Builder.CreateAlignmentAssumption(CGM.getDataLayout(), PtrValue, Alignment,
OffsetValue);
}
//===--------------------------------------------------------------------===//
// Declaration Emission
//===--------------------------------------------------------------------===//
/// EmitDecl - Emit a declaration.
///
/// This function can be called with a null (unreachable) insert point.
void EmitDecl(const Decl &D);
/// EmitVarDecl - Emit a local variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
void EmitVarDecl(const VarDecl &D);
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
bool capturedByInit);
void EmitScalarInit(llvm::Value *init, LValue lvalue);
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
/// \brief Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
bool isTrivialInitializer(const Expr *Init);
/// EmitAutoVarDecl - Emit an auto variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
void EmitAutoVarDecl(const VarDecl &D);
class AutoVarEmission {
friend class CodeGenFunction;
const VarDecl *Variable;
/// The alignment of the variable.
CharUnits Alignment;
/// The address of the alloca. Null if the variable was emitted
/// as a global constant.
llvm::Value *Address;
llvm::Value *NRVOFlag;
/// True if the variable is a __block variable.
bool IsByRef;
/// True if the variable is of aggregate type and has a constant
/// initializer.
bool IsConstantAggregate;
/// Non-null if we should use lifetime annotations.
llvm::Value *SizeForLifetimeMarkers;
struct Invalid {};
AutoVarEmission(Invalid) : Variable(nullptr) {}
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Address(nullptr), NRVOFlag(nullptr),
IsByRef(false), IsConstantAggregate(false),
SizeForLifetimeMarkers(nullptr) {}
bool wasEmittedAsGlobal() const { return Address == nullptr; }
public:
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
bool useLifetimeMarkers() const {
return SizeForLifetimeMarkers != nullptr;
}
llvm::Value *getSizeForLifetimeMarkers() const {
assert(useLifetimeMarkers());
return SizeForLifetimeMarkers;
}
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself.
llvm::Value *getAllocatedAddress() const {
return Address;
}
/// Returns the address of the object within this declaration.
/// Note that this does not chase the forwarding pointer for
/// __block decls.
llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
if (!IsByRef) return Address;
auto F = CGF.getByRefValueLLVMField(Variable);
return CGF.Builder.CreateStructGEP(F.first, Address, F.second,
Variable->getNameAsString());
}
};
AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
void EmitAutoVarInit(const AutoVarEmission &emission);
void EmitAutoVarCleanups(const AutoVarEmission &emission);
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
QualType::DestructionKind dtorKind);
void EmitStaticVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
/// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, bool ArgIsPointer,
unsigned ArgNo);
/// protectFromPeepholes - Protect a value that we're intending to
/// store to the side, but which will probably be used later, from
/// aggressive peepholing optimizations that might delete it.
///
/// Pass the result to unprotectFromPeepholes to declare that
/// protection is no longer required.
///
/// There's no particular reason why this shouldn't apply to
/// l-values, it's just that no existing peepholes work on pointers.
PeepholeProtection protectFromPeepholes(RValue rvalue);
void unprotectFromPeepholes(PeepholeProtection protection);
//===--------------------------------------------------------------------===//
// Statement Emission
//===--------------------------------------------------------------------===//
/// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
void EmitStopPoint(const Stmt *S);
/// EmitStmt - Emit the code for the statement \arg S. It is legal to call
/// this function even if there is no current insertion point.
///
/// This function may clear the current insertion point; callers should use
/// EnsureInsertPoint if they wish to subsequently generate code without first
/// calling EmitBlock, EmitBranch, or EmitStmt.
void EmitStmt(const Stmt *S);
/// EmitSimpleStmt - Try to emit a "simple" statement which does not
/// necessarily require an insertion point or debug information; typically
/// because the statement amounts to a jump or a container of other
/// statements.
///
/// \return True if the statement was handled.
bool EmitSimpleStmt(const Stmt *S);
llvm::Value *EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
AggValueSlot AVS = AggValueSlot::ignored());
llvm::Value *EmitCompoundStmtWithoutScope(const CompoundStmt &S,
bool GetLast = false,
AggValueSlot AVS =
AggValueSlot::ignored());
/// EmitLabel - Emit the block for the given label. It is legal to call this
/// function even if there is no current insertion point.
void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
void EmitLabelStmt(const LabelStmt &S);
void EmitAttributedStmt(const AttributedStmt &S);
void EmitGotoStmt(const GotoStmt &S);
void EmitDiscardStmt(const DiscardStmt &S); // HLSL Change
void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
void EmitIfStmt(const IfStmt &S,
ArrayRef<const Attr *> Attrs = None); // HLSL Change
void EmitCondBrHints(llvm::LLVMContext &Context, llvm::BranchInst *CondBr,
ArrayRef<const Attr *> Attrs);
void EmitWhileStmt(const WhileStmt &S,
ArrayRef<const Attr *> Attrs = None);
void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
void EmitForStmt(const ForStmt &S,
ArrayRef<const Attr *> Attrs = None);
void EmitReturnStmt(const ReturnStmt &S);
void EmitDeclStmt(const DeclStmt &S);
void EmitBreakStmt(const BreakStmt &S);
void EmitContinueStmt(const ContinueStmt &S);
void EmitSwitchStmt(const SwitchStmt &S,
ArrayRef<const Attr *> Attrs = None); // HLSL Change
void EmitDefaultStmt(const DefaultStmt &S);
void EmitCaseStmt(const CaseStmt &S);
void EmitCaseStmtRange(const CaseStmt &S);
void EmitAsmStmt(const AsmStmt &S);
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void EmitCXXTryStmt(const CXXTryStmt &S);
void EmitSEHTryStmt(const SEHTryStmt &S);
void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
void EnterSEHTryStmt(const SEHTryStmt &S);
void ExitSEHTryStmt(const SEHTryStmt &S);
void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
const Stmt *OutlinedStmt);
llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
const SEHExceptStmt &Except);
llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
const SEHFinallyStmt &Finally);
void EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
llvm::Value *ParentFP,
llvm::Value *EntryEBP);
llvm::Value *EmitSEHExceptionCode();
llvm::Value *EmitSEHExceptionInfo();
llvm::Value *EmitSEHAbnormalTermination();
/// Scan the outlined statement for captures from the parent function. For
/// each capture, mark the capture as escaped and emit a call to
/// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt,
bool IsFilter);
/// Recovers the address of a local in a parent function. ParentVar is the
/// address of the variable used in the immediate parent function. It can
/// either be an alloca or a call to llvm.localrecover if there are nested
/// outlined functions. ParentFP is the frame pointer of the outermost parent
/// frame.
llvm::Value *recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
llvm::Value *ParentVar,
llvm::Value *ParentFP);
void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
ArrayRef<const Attr *> Attrs = None);
LValue InitCapturedStruct(const CapturedStmt &S);
llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
void GenerateCapturedStmtFunctionProlog(const CapturedStmt &S);
llvm::Function *GenerateCapturedStmtFunctionEpilog(const CapturedStmt &S);
llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
llvm::Value *GenerateCapturedStmtArgument(const CapturedStmt &S);
/// \brief Perform element by element copying of arrays with type \a
/// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
/// generated by \a CopyGen.
///
/// \param DestAddr Address of the destination array.
/// \param SrcAddr Address of the source array.
/// \param OriginalType Type of destination and source arrays.
/// \param CopyGen Copying procedure that copies value of single array element
/// to another single array element.
void EmitOMPAggregateAssign(
llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType,
const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen);
/// \brief Emit proper copying of data from one variable to another.
///
/// \param OriginalType Original type of the copied variables.
/// \param DestAddr Destination address.
/// \param SrcAddr Source address.
/// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
/// type of the base array element).
/// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
/// the base array element).
/// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
/// DestVD.
void EmitOMPCopy(CodeGenFunction &CGF, QualType OriginalType,
llvm::Value *DestAddr, llvm::Value *SrcAddr,
const VarDecl *DestVD, const VarDecl *SrcVD,
const Expr *Copy);
/// \brief Emit atomic update code for constructs: \a X = \a X \a BO \a E or
/// \a X = \a E \a BO \a E.
///
/// \param X Value to be updated.
/// \param E Update value.
/// \param BO Binary operation for update operation.
/// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
/// expression, false otherwise.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param CommonGen Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
/// generated, <false, RValue::get(nullptr)> otherwise.
std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr(
LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
llvm::AtomicOrdering AO, SourceLocation Loc,
const llvm::function_ref<RValue(RValue)> &CommonGen);
bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
void EmitOMPPrivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
/// \brief Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
/// \code
/// threadprivate_var1 = master_threadprivate_var1;
/// operator=(threadprivate_var2, master_threadprivate_var2);
/// ...
/// __kmpc_barrier(&loc, global_tid);
/// \endcode
///
/// \param D OpenMP directive possibly with 'copyin' clause(s).
/// \returns true if at least one copyin variable is found, false otherwise.
bool EmitOMPCopyinClause(const OMPExecutableDirective &D);
/// \brief Emit initial code for lastprivate variables. If some variable is
/// not also firstprivate, then the default initialization is used. Otherwise
/// initialization of this variable is performed by EmitOMPFirstprivateClause
/// method.
///
/// \param D Directive that may have 'lastprivate' directives.
/// \param PrivateScope Private scope for capturing lastprivate variables for
/// proper codegen in internal captured statement.
///
/// \returns true if there is at least one lastprivate variable, false
/// otherwise.
bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
/// \brief Emit final copying of lastprivate values to original variables at
/// the end of the worksharing or simd directive.
///
/// \param D Directive that has at least one 'lastprivate' directives.
/// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
/// it is the last iteration of the loop code in associated directive, or to
/// 'i1 false' otherwise. If this item is nullptr, no final check is required.
void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D,
llvm::Value *IsLastIterCond = nullptr);
/// \brief Emit initial code for reduction variables. Creates reduction copies
/// and initializes them with the values according to OpenMP standard.
///
/// \param D Directive (possibly) with the 'reduction' clause.
/// \param PrivateScope Private scope for capturing reduction variables for
/// proper codegen in internal captured statement.
///
void EmitOMPReductionClauseInit(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
/// \brief Emit final update of reduction values to original variables at
/// the end of the directive.
///
/// \param D Directive that has at least one 'reduction' directives.
void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D);
/// \brief Emit initial code for linear variables. Creates private copies
/// and initializes them with the values according to OpenMP standard.
///
/// \param D Directive (possibly) with the 'linear' clause.
void EmitOMPLinearClauseInit(const OMPLoopDirective &D);
void EmitOMPParallelDirective(const OMPParallelDirective &S);
void EmitOMPSimdDirective(const OMPSimdDirective &S);
void EmitOMPForDirective(const OMPForDirective &S);
void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
void EmitOMPSectionDirective(const OMPSectionDirective &S);
void EmitOMPSingleDirective(const OMPSingleDirective &S);
void EmitOMPMasterDirective(const OMPMasterDirective &S);
void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
void EmitOMPTaskDirective(const OMPTaskDirective &S);
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
void EmitOMPFlushDirective(const OMPFlushDirective &S);
void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
void EmitOMPTargetDirective(const OMPTargetDirective &S);
void EmitOMPTeamsDirective(const OMPTeamsDirective &S);
void
EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S);
void EmitOMPCancelDirective(const OMPCancelDirective &S);
/// \brief Emit inner loop of the worksharing/simd construct.
///
/// \param S Directive, for which the inner loop must be emitted.
/// \param RequiresCleanup true, if directive has some associated private
/// variables.
/// \param LoopCond Bollean condition for loop continuation.
/// \param IncExpr Increment expression for loop control variable.
/// \param BodyGen Generator for the inner body of the inner loop.
/// \param PostIncGen Genrator for post-increment code (required for ordered
/// loop directvies).
void EmitOMPInnerLoop(
const Stmt &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
const llvm::function_ref<void(CodeGenFunction &)> &BodyGen,
const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen);
JumpDest getOMPCancelDestination(OpenMPDirectiveKind Kind);
private:
/// Helpers for the OpenMP loop directives.
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
void EmitOMPSimdInit(const OMPLoopDirective &D);
void EmitOMPSimdFinal(const OMPLoopDirective &D);
/// \brief Emit code for the worksharing loop-based directive.
/// \return true, if this construct has any lastprivate clause, false -
/// otherwise.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S);
void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
const OMPLoopDirective &S,
OMPPrivateScope &LoopScope, bool Ordered,
llvm::Value *LB, llvm::Value *UB, llvm::Value *ST,
llvm::Value *IL, llvm::Value *Chunk);
/// \brief Emit code for sections directive.
OpenMPDirectiveKind EmitSections(const OMPExecutableDirective &S);
public:
//===--------------------------------------------------------------------===//
// LValue Expression Emission
//===--------------------------------------------------------------------===//
/// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
RValue GetUndefRValue(QualType Ty);
/// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
/// and issue an ErrorUnsupported style diagnostic (using the
/// provided Name).
RValue EmitUnsupportedRValue(const Expr *E,
const char *Name);
/// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
/// an ErrorUnsupported style diagnostic (using the provided Name).
LValue EmitUnsupportedLValue(const Expr *E,
const char *Name);
/// EmitLValue - Emit code to compute a designator that specifies the location
/// of the expression.
///
/// This can return one of two things: a simple address or a bitfield
/// reference. In either case, the LLVM Value* in the LValue structure is
/// guaranteed to be an LLVM pointer type.
///
/// If this returns a bitfield reference, nothing about the pointee type of
/// the LLVM value is known: For example, it may not be a pointer to an
/// integer.
///
/// If this returns a normal address, and if the lvalue's C type is fixed
/// size, this method guarantees that the returned pointer type will point to
/// an LLVM type of the same size of the lvalue's type. If the lvalue has a
/// variable length type, this is not possible.
///
LValue EmitLValue(const Expr *E);
/// \brief Same as EmitLValue but additionally we generate checking code to
/// guard against undefined behavior. This is only suitable when we know
/// that the address will be used to access the object.
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
RValue convertTempToRValue(llvm::Value *addr, QualType type,
SourceLocation Loc);
void EmitAtomicInit(Expr *E, LValue lvalue);
bool LValueIsSuitableForInlineAtomic(LValue Src);
bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const;
RValue EmitAtomicLoad(LValue LV, SourceLocation SL,
AggValueSlot Slot = AggValueSlot::ignored());
RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
llvm::AtomicOrdering AO, bool IsVolatile = false,
AggValueSlot slot = AggValueSlot::ignored());
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO,
bool IsVolatile, bool isInit);
std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored());
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
bool IsVolatile);
/// EmitToMemory - Change a scalar value from its value
/// representation to its in-memory representation.
llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
/// EmitFromMemory - Change a scalar value from its memory
/// representation to its value representation.
llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
unsigned Alignment, QualType Ty,
SourceLocation Loc,
llvm::MDNode *TBAAInfo = nullptr,
QualType TBAABaseTy = QualType(),
uint64_t TBAAOffset = 0);
/// EmitLoadOfScalar - Load a scalar value from an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation. The l-value must be a simple
/// l-value.
llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment, QualType Ty,
llvm::MDNode *TBAAInfo = nullptr, bool isInit = false,
QualType TBAABaseTy = QualType(),
uint64_t TBAAOffset = 0);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation. The l-value must be a simple
/// l-value. The isInit flag indicates whether this is an initialization.
/// If so, atomic qualifiers are ignored and the store is always non-atomic.
void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
/// rvalue, returning the rvalue.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
RValue EmitLoadOfExtVectorElementLValue(LValue V);
RValue EmitLoadOfBitfieldLValue(LValue LV);
RValue EmitLoadOfGlobalRegLValue(LValue LV);
RValue EmitLoadOfExtMatrixElementLValue(LValue V); // HLSL Change
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false);
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
/// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
/// as EmitStoreThroughLValue.
///
/// \param Result [out] - If non-null, this will be set to a Value* for the
/// bit-field contents after the store, appropriate for use as the result of
/// an assignment to the bit-field.
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
llvm::Value **Result=nullptr);
/// Emit an l-value for an assignment (simple or compound) of complex type.
LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E,
llvm::Value *&Result);
// Note: only available for agg return types
LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
// Note: only available for agg return types
LValue EmitCallExprLValue(const CallExpr *E);
// Note: only available for agg return types
LValue EmitVAArgExprLValue(const VAArgExpr *E);
LValue EmitDeclRefLValue(const DeclRefExpr *E);
LValue EmitReadRegister(const VarDecl *VD);
LValue EmitStringLiteralLValue(const StringLiteral *E);
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
LValue EmitPredefinedLValue(const PredefinedExpr *E);
LValue EmitUnaryOpLValue(const UnaryOperator *E);
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed = false);
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
LValue EmitExtMatrixElementExpr(const ExtMatrixElementExpr *E); // HLSL Change
LValue EmitHLSLVectorElementExpr(const HLSLVectorElementExpr *E); // HLSL Change
LValue EmitMemberExpr(const MemberExpr *E);
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
LValue EmitInitListLValue(const InitListExpr *E);
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
llvm::Value *EmitExtVectorElementLValue(LValue V);
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
class ConstantEmission {
llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
ConstantEmission(llvm::Constant *C, bool isReference)
: ValueAndIsReference(C, isReference) {}
public:
ConstantEmission() {}
static ConstantEmission forReference(llvm::Constant *C) {
return ConstantEmission(C, true);
}
static ConstantEmission forValue(llvm::Constant *C) {
return ConstantEmission(C, false);
}
explicit operator bool() const {
return ValueAndIsReference.getOpaqueValue() != nullptr;
}
bool isReference() const { return ValueAndIsReference.getInt(); }
LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
assert(isReference());
return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
refExpr->getType());
}
llvm::Constant *getValue() const {
assert(!isReference());
return ValueAndIsReference.getPointer();
}
};
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
AggValueSlot slot = AggValueSlot::ignored());
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
LValue EmitLValueForLambdaField(const FieldDecl *Field);
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
/// if the Field is a reference, this will return the address of the reference
/// and not the address of the value stored in the reference.
LValue EmitLValueForFieldInitialization(LValue Base,
const FieldDecl* Field);
LValue EmitLValueForIvar(QualType ObjectTy,
llvm::Value* Base, const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers);
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
LValue EmitLambdaLValue(const LambdaExpr *E);
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
//===--------------------------------------------------------------------===//
// Scalar Expression Emission
//===--------------------------------------------------------------------===//
/// EmitCall - Generate a call of the given function, expecting the given
/// result type, and using the given argument list which specifies both the
/// LLVM arguments and the types they were derived from.
///
/// \param TargetDecl - If given, the decl of the function in a direct call;
/// used to set attributes on the call (noreturn, etc.).
RValue EmitCall(const CGFunctionInfo &FnInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
const CallArgList &Args,
const Decl *TargetDecl = nullptr,
llvm::Instruction **callOrInvoke = nullptr);
RValue EmitCall(QualType FnType, llvm::Value *Callee, const CallExpr *E,
ReturnValueSlot ReturnValue,
const Decl *TargetDecl = nullptr,
llvm::Value *Chain = nullptr);
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
const Twine &name = "");
llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const Twine &name = "");
llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
const Twine &name = "");
llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const Twine &name = "");
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
ArrayRef<llvm::Value *> Args,
const Twine &Name = "");
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
const Twine &Name = "");
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args,
const Twine &name = "");
llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
const Twine &name = "");
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args);
llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
llvm::Type *Ty);
llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
const CXXRecordDecl *RD);
RValue
EmitCXXMemberOrOperatorCall(const CXXMethodDecl *MD, llvm::Value *Callee,
ReturnValueSlot ReturnValue, llvm::Value *This,
llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *E);
RValue EmitCXXStructorCall(const CXXMethodDecl *MD, llvm::Value *Callee,
ReturnValueSlot ReturnValue, llvm::Value *This,
llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *E,
StructorType Type);
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
RValue EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr *CE,
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue,
bool HasQualifier,
NestedNameSpecifier *Qualifier,
bool IsArrow, const Expr *Base);
// Compute the object pointer.
RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
ReturnValueSlot ReturnValue);
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue);
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue);
// HLSL Change Begins
RValue EmitHLSLBuiltinCallExpr(const FunctionDecl *FD, const CallExpr *E,
ReturnValueSlot ReturnValue);
// HLSL Change Ends
RValue EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue);
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
/// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
/// is unhandled by the current target.
llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
const llvm::CmpInst::Predicate Fp,
const llvm::CmpInst::Predicate Ip,
const llvm::Twine &Name = "");
llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
unsigned LLVMIntrinsic,
unsigned AltLLVMIntrinsic,
const char *NameHint,
unsigned Modifier,
const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops,
llvm::Value *Align = nullptr);
llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier, llvm::Type *ArgTy,
const CallExpr *E);
llvm::Value *EmitNeonCall(llvm::Function *F,
SmallVectorImpl<llvm::Value*> &O,
const char *name,
unsigned shift = 0, bool rightshift = false);
llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
bool negateForRightShift);
llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
llvm::Type *Ty, bool usgn, const char *name);
// Helper functions for EmitAArch64BuiltinExpr.
llvm::Value *vectorWrapScalar8(llvm::Value *Op);
llvm::Value *vectorWrapScalar16(llvm::Value *Op);
llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
const ObjCMethodDecl *MethodWithObjects);
llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return = ReturnValueSlot());
/// Retrieves the default cleanup kind for an ARC cleanup.
/// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
CleanupKind getARCCleanupKind() {
return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
? NormalAndEHCleanup : NormalCleanup;
}
// ARC primitives.
void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
void EmitARCDestroyWeak(llvm::Value *addr);
llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
bool ignored);
void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
bool resultIgnored);
llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
bool resultIgnored);
llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
void EmitARCDestroyStrong(llvm::Value *addr, ARCPreciseLifetime_t precise);
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
llvm::Value *EmitARCAutorelease(llvm::Value *value);
llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
std::pair<LValue,llvm::Value*>
EmitARCStoreAutoreleasing(const BinaryOperator *e);
std::pair<LValue,llvm::Value*>
EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
llvm::Value *EmitObjCThrowOperand(const Expr *expr);
llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
static Destroyer destroyARCStrongImprecise;
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
llvm::Value *EmitObjCAutoreleasePoolPush();
llvm::Value *EmitObjCMRRAutoreleasePoolPush();
void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
/// \brief Emits a reference binding to the passed in expression.
RValue EmitReferenceBindingToExpr(const Expr *E);
//===--------------------------------------------------------------------===//
// Expression Emission
//===--------------------------------------------------------------------===//
// Expressions are broken into three classes: scalar, complex, aggregate.
/// EmitScalarExpr - Emit the computation of the specified expression of LLVM
/// scalar type, returning the result.
llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
QualType DstTy);
/// EmitComplexToScalarConversion - Emit a conversion from the specified
/// complex type to the specified destination type, where the destination type
/// is an LLVM scalar type.
llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
QualType DstTy);
/// EmitAggExpr - Emit the computation of the specified expression
/// of aggregate type. The result is computed into the given slot,
/// which may be null to indicate that the value is not needed.
void EmitAggExpr(const Expr *E, AggValueSlot AS);
/// EmitAggExprToLValue - Emit the computation of the specified expression of
/// aggregate type into a temporary LValue.
LValue EmitAggExprToLValue(const Expr *E);
/// EmitGCMemmoveCollectable - Emit special API for structs with object
/// pointers.
void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType Ty);
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
void EmitExtendGCLifetime(llvm::Value *object);
/// EmitComplexExpr - Emit the computation of the specified expression of
/// complex type, returning the result.
ComplexPairTy EmitComplexExpr(const Expr *E,
bool IgnoreReal = false,
bool IgnoreImag = false);
/// EmitComplexExprIntoLValue - Emit the given expression of complex
/// type and place its result into the specified l-value.
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
/// EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
/// EmitLoadOfComplex - Load a complex number from the specified l-value.
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
/// global variable that has already been created for it. If the initializer
/// has a different type than GV does, this may free GV and return a different
/// one. Otherwise it just returns GV.
llvm::GlobalVariable *
AddInitializerToStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable *GV);
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
bool PerformInit);
llvm::Constant *createAtExitStub(const VarDecl &VD, llvm::Constant *Dtor,
llvm::Constant *Addr);
/// Call atexit() with a function that passes the given argument to
/// the given function.
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
llvm::Constant *addr);
/// Emit code in this function to perform a guarded variable
/// initialization. Guarded initializations are used when it's not
/// possible to prove that an initialization will be done exactly
/// once, e.g. with a static local variable or a static data member
/// of a class template.
void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
bool PerformInit);
/// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Function *> CXXThreadLocals,
llvm::GlobalVariable *Guard = nullptr);
/// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
/// variables.
void GenerateCXXGlobalDtorsFunc(
llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>>
&DtorsAndObjects);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit);
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
const Expr *Exp);
void enterFullExpression(const ExprWithCleanups *E) {
if (E->getNumObjects() == 0) return;
enterNonTrivialFullExpression(E);
}
void enterNonTrivialFullExpression(const ExprWithCleanups *E);
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = nullptr);
//===--------------------------------------------------------------------===//
// Annotations Emission
//===--------------------------------------------------------------------===//
/// Emit an annotation call (intrinsic or builtin).
llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
llvm::Value *AnnotatedVal,
StringRef AnnotationStr,
SourceLocation Location);
/// Emit local annotations for the local variable V, declared by D.
void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
/// Emit field annotations for the given field & value. Returns the
/// annotation result.
llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
//===--------------------------------------------------------------------===//
// Internal Helpers
//===--------------------------------------------------------------------===//
/// ContainsLabel - Return true if the statement contains a label in it. If
/// this statement is not executed normally, it not containing a label means
/// that we can just remove the code.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
/// containsBreak - Return true if the statement contains a break out of it.
/// If the statement (recursively) contains a switch or loop with a break
/// inside of it, this is fine.
static bool containsBreak(const Stmt *S);
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the boolean result in Result.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
/// to a constant, or if it does but contains a label, return false. If it
/// constant folds return true and set the folded value.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
/// if statement) to the specified blocks. Based on the condition, this might
/// try to simplify the codegen of the conditional based on the branch.
/// TrueCount should be the number of times we expect the condition to
/// evaluate to true based on PGO data.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
/// \brief Emit a description of a type in a format suitable for passing to
/// a runtime sanitizer handler.
llvm::Constant *EmitCheckTypeDescriptor(QualType T);
/// \brief Convert a value into a format suitable for passing to a runtime
/// sanitizer handler.
llvm::Value *EmitCheckValue(llvm::Value *V);
/// \brief Emit a description of a source location in a format suitable for
/// passing to a runtime sanitizer handler.
llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
/// \brief Create a basic block that will call a handler function in a
/// sanitizer runtime with the provided arguments, and create a conditional
/// branch to it.
void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs);
/// \brief Create a basic block that will call the trap intrinsic, and emit a
/// conditional branch to it, for the -ftrapv checks.
void EmitTrapCheck(llvm::Value *Checked);
/// \brief Emit a call to trap or debugtrap and attach function attribute
/// "trap-func-name" if specified.
llvm::CallInst *EmitTrapCall(llvm::Intrinsic::ID IntrID);
/// \brief Create a check for a function parameter that may potentially be
/// declared as non-null.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc,
const FunctionDecl *FD, unsigned ParmNum);
/// EmitCallArg - Emit a single call argument.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
/// EmitDelegateCallArg - We are performing a delegate call; that
/// is, the current function is delegating to another one. Produce
/// a r-value suitable for passing the given parameter.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
SourceLocation loc);
/// SetFPAccuracy - Set the minimum required accuracy of the given floating
/// point operation, expressed as the maximum relative error in ulp.
void SetFPAccuracy(llvm::Value *Val, float Accuracy);
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
DeferredReplacements;
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
/// from function arguments into \arg Dst. See ABIArgInfo::Expand.
///
/// \param AI - The first function argument of the expansion.
void ExpandTypeFromArgs(QualType Ty, LValue Dst,
SmallVectorImpl<llvm::Argument *>::iterator &AI);
/// ExpandTypeToArgs - Expand an RValue \arg RV, with the LLVM type for \arg
/// Ty, into individual arguments on the provided vector \arg IRCallArgs,
/// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
void ExpandTypeToArgs(QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
std::string &ConstraintStr,
SourceLocation Loc);
public:
#ifndef NDEBUG
// Determine whether the given argument is an Objective-C method
// that may have type parameters in its signature.
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
const DeclContext *dc = method->getDeclContext();
if (const ObjCInterfaceDecl *classDecl= dyn_cast<ObjCInterfaceDecl>(dc)) {
return classDecl->getTypeParamListAsWritten();
}
if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
return catDecl->getTypeParamList();
}
return false;
}
template<typename T>
static bool isObjCMethodWithTypeParams(const T *) { return false; }
#endif
/// EmitCallArgs - Emit call arguments for a function.
template <typename T>
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
const FunctionDecl *CalleeDecl = nullptr,
unsigned ParamsToSkip = 0) {
SmallVector<QualType, 16> ArgTypes;
CallExpr::const_arg_iterator Arg = ArgBeg;
assert((ParamsToSkip == 0 || CallArgTypeInfo) &&
"Can't skip parameters if type info is not provided");
if (CallArgTypeInfo) {
#ifndef NDEBUG
bool isGenericMethod = isObjCMethodWithTypeParams(CallArgTypeInfo);
#endif
// First, use the argument types that the type info knows about
for (auto I = CallArgTypeInfo->param_type_begin() + ParamsToSkip,
E = CallArgTypeInfo->param_type_end();
I != E; ++I, ++Arg) {
assert(Arg != ArgEnd && "Running over edge of argument list!");
assert((isGenericMethod ||
((*I)->isVariablyModifiedType() ||
(*I).getNonReferenceType()->isObjCRetainableType() ||
getContext()
.getCanonicalType((*I).getNonReferenceType())
.getTypePtr() ==
getContext()
.getCanonicalType(Arg->getType())
.getTypePtr())) &&
"type mismatch in call argument!");
ArgTypes.push_back(*I);
}
}
// Either we've emitted all the call args, or we have a call to variadic
// function.
assert(
(Arg == ArgEnd || !CallArgTypeInfo || CallArgTypeInfo->isVariadic()) &&
"Extra arguments in non-variadic function!");
// If we still have any arguments, emit them using the type of the argument.
for (; Arg != ArgEnd; ++Arg)
ArgTypes.push_back(getVarArgType(*Arg));
EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, CalleeDecl, ParamsToSkip);
}
void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
const FunctionDecl *CalleeDecl = nullptr,
unsigned ParamsToSkip = 0);
private:
QualType getVarArgType(const Expr *Arg);
const TargetCodeGenInfo &getTargetHooks() const {
return CGM.getTargetCodeGenInfo();
}
void EmitDeclMetadata();
CodeGenModule::ByrefHelpers *
buildByrefHelpers(llvm::StructType &byrefType,
const AutoVarEmission &emission);
void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
/// GetPointeeAlignment - Given an expression with a pointer type, emit the
/// value and compute our best estimate of the alignment of the pointee.
std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
llvm::Value *GetValueForARMHint(unsigned BuiltinID);
};
/// Helper class with most of the code for saving a value for a
/// conditional expression cleanup.
struct DominatingLLVMValue {
typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
/// Answer whether the given value needs extra work to be saved.
static bool needsSaving(llvm::Value *value) {
// If it's not an instruction, we don't need to save.
if (!isa<llvm::Instruction>(value)) return false;
// If it's an instruction in the entry block, we don't need to save.
llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
return (block != &block->getParent()->getEntryBlock());
}
/// Try to save the given value.
static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
if (!needsSaving(value)) return saved_type(value, false);
// Otherwise we need an alloca.
llvm::Value *alloca =
CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
CGF.Builder.CreateStore(value, alloca);
return saved_type(alloca, true);
}
static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
if (!value.getInt()) return value.getPointer();
return CGF.Builder.CreateLoad(value.getPointer());
}
};
/// A partial specialization of DominatingValue for llvm::Values that
/// might be llvm::Instructions.
template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
typedef T *type;
static type restore(CodeGenFunction &CGF, saved_type value) {
return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
}
};
/// A specialization of DominatingValue for RValue.
template <> struct DominatingValue<RValue> {
typedef RValue type;
class saved_type {
enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
AggregateAddress, ComplexAddress };
llvm::Value *Value;
Kind K;
saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
public:
static bool needsSaving(RValue value);
static saved_type save(CodeGenFunction &CGF, RValue value);
RValue restore(CodeGenFunction &CGF);
// implementations in CGExprCXX.cpp
};
static bool needsSaving(type value) {
return saved_type::needsSaving(value);
}
static saved_type save(CodeGenFunction &CGF, type value) {
return saved_type::save(CGF, value);
}
static type restore(CodeGenFunction &CGF, saved_type value) {
return value.restore(CGF);
}
};
} // end namespace CodeGen
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/ValueHandle.h"
namespace llvm {
class ArrayType;
class Constant;
class Function;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
} // namespace llvm
namespace clang {
class Expr;
class OMPExecutableDirective;
class VarDecl;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
typedef llvm::function_ref<void(CodeGenFunction &)> RegionCodeGenTy;
class CGOpenMPRuntime {
private:
enum OpenMPRTLFunction {
/// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
/// kmpc_micro microtask, ...);
OMPRTL__kmpc_fork_call,
/// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
/// kmp_int32 global_tid, void *data, size_t size, void ***cache);
OMPRTL__kmpc_threadprivate_cached,
/// \brief Call to void __kmpc_threadprivate_register( ident_t *,
/// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
OMPRTL__kmpc_threadprivate_register,
// Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
OMPRTL__kmpc_global_thread_num,
// Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *crit);
OMPRTL__kmpc_critical,
// Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *crit);
OMPRTL__kmpc_end_critical,
// Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
// global_tid);
OMPRTL__kmpc_cancel_barrier,
// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_barrier,
// Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_for_static_fini,
// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
OMPRTL__kmpc_serialized_parallel,
// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
// global_tid);
OMPRTL__kmpc_end_serialized_parallel,
// Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 num_threads);
OMPRTL__kmpc_push_num_threads,
// Call to void __kmpc_flush(ident_t *loc);
OMPRTL__kmpc_flush,
// Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
OMPRTL__kmpc_master,
// Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
OMPRTL__kmpc_end_master,
// Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
// int end_part);
OMPRTL__kmpc_omp_taskyield,
// Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
OMPRTL__kmpc_single,
// Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
OMPRTL__kmpc_end_single,
// Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
// kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
// kmp_routine_entry_t *task_entry);
OMPRTL__kmpc_omp_task_alloc,
// Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
// new_task);
OMPRTL__kmpc_omp_task,
// Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
// size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
// kmp_int32 didit);
OMPRTL__kmpc_copyprivate,
// Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
// (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
OMPRTL__kmpc_reduce,
// Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
// global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
// void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
// *lck);
OMPRTL__kmpc_reduce_nowait,
// Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *lck);
OMPRTL__kmpc_end_reduce,
// Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
// kmp_critical_name *lck);
OMPRTL__kmpc_end_reduce_nowait,
// Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
// kmp_task_t * new_task);
OMPRTL__kmpc_omp_task_begin_if0,
// Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
// kmp_task_t * new_task);
OMPRTL__kmpc_omp_task_complete_if0,
// Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_ordered,
// Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_end_ordered,
// Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
// global_tid);
OMPRTL__kmpc_omp_taskwait,
// Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_taskgroup,
// Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
OMPRTL__kmpc_end_taskgroup,
// Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
// int proc_bind);
OMPRTL__kmpc_push_proc_bind,
// Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
// gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
// *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
OMPRTL__kmpc_omp_task_with_deps,
// Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
// gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
// ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
OMPRTL__kmpc_omp_wait_deps,
// Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
// global_tid, kmp_int32 cncl_kind);
OMPRTL__kmpc_cancellationpoint,
// Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
// kmp_int32 cncl_kind);
OMPRTL__kmpc_cancel,
};
/// \brief Values for bit flags used in the ident_t to describe the fields.
/// All enumeric elements are named and described in accordance with the code
/// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
enum OpenMPLocationFlags {
/// \brief Use trampoline for internal microtask.
OMP_IDENT_IMD = 0x01,
/// \brief Use c-style ident structure.
OMP_IDENT_KMPC = 0x02,
/// \brief Atomic reduction option for kmpc_reduce.
OMP_ATOMIC_REDUCE = 0x10,
/// \brief Explicit 'barrier' directive.
OMP_IDENT_BARRIER_EXPL = 0x20,
/// \brief Implicit barrier in code.
OMP_IDENT_BARRIER_IMPL = 0x40,
/// \brief Implicit barrier in 'for' directive.
OMP_IDENT_BARRIER_IMPL_FOR = 0x40,
/// \brief Implicit barrier in 'sections' directive.
OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0,
/// \brief Implicit barrier in 'single' directive.
OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140
};
CodeGenModule &CGM;
/// \brief Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource;
/// \brief Map of flags and corresponding default locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
llvm::Value *getOrCreateDefaultLocation(OpenMPLocationFlags Flags);
/// \brief Describes ident structure that describes a source location.
/// All descriptions are taken from
/// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
/// Original structure:
/// typedef struct ident {
/// kmp_int32 reserved_1; /**< might be used in Fortran;
/// see above */
/// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags;
/// KMP_IDENT_KMPC identifies this union
/// member */
/// kmp_int32 reserved_2; /**< not really used in Fortran any more;
/// see above */
///#if USE_ITT_BUILD
/// /* but currently used for storing
/// region-specific ITT */
/// /* contextual information. */
///#endif /* USE_ITT_BUILD */
/// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for
/// C++ */
/// char const *psource; /**< String describing the source location.
/// The string is composed of semi-colon separated
// fields which describe the source file,
/// the function and a pair of line numbers that
/// delimit the construct.
/// */
/// } ident_t;
enum IdentFieldIndex {
/// \brief might be used in Fortran
IdentField_Reserved_1,
/// \brief OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member.
IdentField_Flags,
/// \brief Not really used in Fortran any more
IdentField_Reserved_2,
/// \brief Source[4] in Fortran, do not use for C++
IdentField_Reserved_3,
/// \brief String describing the source location. The string is composed of
/// semi-colon separated fields which describe the source file, the function
/// and a pair of line numbers that delimit the construct.
IdentField_PSource
};
llvm::StructType *IdentTy;
/// \brief Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// \brief The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy;
/// \brief Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
};
/// \brief Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// \brief Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// \brief An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// \brief Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy;
QualType KmpRoutineEntryPtrQTy;
/// \brief Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// \brief Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// \brief Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// \brief Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPLocationFlags Flags = OMP_IDENT_KMPC);
/// \brief Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// \brief Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// \brief Returns specified OpenMP runtime function.
/// \param Function OpenMP runtime function.
/// \return Specified function.
llvm::Constant *createRuntimeFunction(OpenMPRTLFunction Function);
/// \brief Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createForStaticInitFunction(unsigned IVSize, bool IVSigned);
/// \brief Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createDispatchInitFunction(unsigned IVSize, bool IVSigned);
/// \brief Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createDispatchNextFunction(unsigned IVSize, bool IVSigned);
/// \brief Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::Constant *createDispatchFiniFunction(unsigned IVSize, bool IVSigned);
/// \brief If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// \brief Emits address of the word in a memory where current thread id is
/// stored.
virtual llvm::Value *emitThreadIDAddress(CodeGenFunction &CGF,
SourceLocation Loc);
/// \brief Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// \brief Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name);
/// \brief Set of threadprivate variables with the generated initializer.
llvm::DenseSet<const VarDecl *> ThreadPrivateWithDefinition;
/// \brief Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, llvm::Value *VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// \brief Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM);
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// \brief Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Value *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// \brief Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, kmp_int32
/// PartID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
///
virtual llvm::Value *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// \brief Cleans up references to the objects in finished function.
///
void functionFinished(CodeGenFunction &CGF);
/// \brief Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedStruct A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *OutlinedFn,
llvm::Value *CapturedStruct,
const Expr *IfCond);
/// \brief Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc);
/// \brief Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// \brief Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// \brief Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// \brief Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// \brief Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc);
/// \brief Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param CheckForCancel true if check for possible cancellation must be
/// performed, false otherwise.
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool CheckForCancel = true);
/// \brief Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// \brief Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// \brief Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// Depending on the loop schedule, it is nesessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the interation variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned nesessary to generated the static_chunked scheduled loop.
/// \param Chunk Value of the chunk for the static_chunked scheduled loop.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPScheduleClauseKind SchedKind, unsigned IVSize,
bool IVSigned, bool Ordered, llvm::Value *IL,
llvm::Value *LB, llvm::Value *UB, llvm::Value *ST,
llvm::Value *Chunk = nullptr);
/// \brief Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the interation variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// \brief Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the interation variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
llvm::Value *IL, llvm::Value *LB,
llvm::Value *UB, llvm::Value *ST);
/// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// \brief Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
OpenMPProcBindClauseKind ProcBind,
SourceLocation Loc);
/// \brief Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual llvm::Value *getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
llvm::Value *VDAddr,
SourceLocation Loc);
/// \brief Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, llvm::Value *VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// \brief Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc);
/// \brief Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param Tied true if the task is tied (the task is tied to the thread that
/// can suspend its task region), false - untied (the task is not tied to any
/// thread).
/// \param Final Contains either constant bool value, or llvm::Value * of i1
/// type for final clause. If the value is true, the task forces all of its
/// child tasks to become final and included tasks.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param PrivateVars List of references to private variables for the task
/// directive.
/// \param PrivateCopies List of private copies for each private variable in
/// \p PrivateVars.
/// \param FirstprivateVars List of references to private variables for the
/// task directive.
/// \param FirstprivateCopies List of private copies for each private variable
/// in \p FirstprivateVars.
/// \param FirstprivateInits List of references to auto generated variables
/// used for initialization of a single array element. Used if firstprivate
/// variable is of array type.
/// \param Dependences List of dependences for the 'task' construct, including
/// original expression and dependency type.
virtual void emitTaskCall(
CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D,
bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final,
llvm::Value *TaskFunction, QualType SharedsTy, llvm::Value *Shareds,
const Expr *IfCond, ArrayRef<const Expr *> PrivateVars,
ArrayRef<const Expr *> PrivateCopies,
ArrayRef<const Expr *> FirstprivateVars,
ArrayRef<const Expr *> FirstprivateCopies,
ArrayRef<const Expr *> FirstprivateInits,
ArrayRef<std::pair<OpenMPDependClauseKind, const Expr *>> Dependences);
/// \brief Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen);
/// \brief Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param WithNowait true if parent directive has also nowait clause, false
/// otherwise.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
bool WithNowait, bool SimpleReduction);
/// \brief Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// \brief Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// \brief Emit code for 'cancel' construct.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
};
} // namespace CodeGen
} // namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCXXABI.h | //===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for C++ code generation. Concrete subclasses
// of this implement code generation for specific C++ ABIs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
#define LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
#include "CodeGenFunction.h"
#include "clang/Basic/LLVM.h"
namespace llvm {
class Constant;
class Type;
class Value;
class CallInst;
}
namespace clang {
class CastExpr;
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
class CXXRecordDecl;
class FieldDecl;
class MangleContext;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
/// \brief Implements C++ ABI-specific code generation functions.
class CGCXXABI {
protected:
CodeGenModule &CGM;
std::unique_ptr<MangleContext> MangleCtx;
CGCXXABI(CodeGenModule &CGM)
: CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
protected:
ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
return CGF.CXXABIThisDecl;
}
llvm::Value *&getThisValue(CodeGenFunction &CGF) {
return CGF.CXXABIThisValue;
}
/// Issue a diagnostic about unsupported features in the ABI.
void ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S);
/// Get a null value for unsupported member pointers.
llvm::Constant *GetBogusMemberPointer(QualType T);
ImplicitParamDecl *&getStructorImplicitParamDecl(CodeGenFunction &CGF) {
return CGF.CXXStructorImplicitParamDecl;
}
llvm::Value *&getStructorImplicitParamValue(CodeGenFunction &CGF) {
return CGF.CXXStructorImplicitParamValue;
}
/// Perform prolog initialization of the parameter variable suitable
/// for 'this' emitted by buildThisParam.
void EmitThisParam(CodeGenFunction &CGF);
ASTContext &getContext() const { return CGM.getContext(); }
virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
virtual bool requiresArrayCookie(const CXXNewExpr *E);
public:
virtual ~CGCXXABI();
/// Gets the mangle context.
MangleContext &getMangleContext() {
return *MangleCtx;
}
/// Returns true if the given constructor or destructor is one of the
/// kinds that the ABI says returns 'this' (only applies when called
/// non-virtually for destructors).
///
/// There currently is no way to indicate if a destructor returns 'this'
/// when called virtually, and code generation does not support the case.
virtual bool HasThisReturn(GlobalDecl GD) const { return false; }
virtual bool hasMostDerivedReturn(GlobalDecl GD) const { return false; }
/// If the C++ ABI requires the given type be returned in a particular way,
/// this method sets RetAI and returns true.
virtual bool classifyReturnType(CGFunctionInfo &FI) const = 0;
/// Specify how one should pass an argument of a record type.
enum RecordArgABI {
/// Pass it using the normal C aggregate rules for the ABI, potentially
/// introducing extra copies and passing some or all of it in registers.
RAA_Default = 0,
/// Pass it on the stack using its defined layout. The argument must be
/// evaluated directly into the correct stack position in the arguments area,
/// and the call machinery must not move it or introduce extra copies.
RAA_DirectInMemory,
/// Pass it as a pointer to temporary memory.
RAA_Indirect
};
/// Returns true if C++ allows us to copy the memory of an object of type RD
/// when it is passed as an argument.
bool canCopyArgument(const CXXRecordDecl *RD) const;
/// Returns how an argument of the given record type should be passed.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const = 0;
/// Returns true if the implicit 'sret' parameter comes after the implicit
/// 'this' parameter of C++ instance methods.
virtual bool isSRetParameterAfterThis() const { return false; }
/// Find the LLVM type used to represent the given member pointer
/// type.
virtual llvm::Type *
ConvertMemberPointerType(const MemberPointerType *MPT);
/// Load a member function from an object and a member function
/// pointer. Apply the this-adjustment and set 'This' to the
/// adjusted value.
virtual llvm::Value *EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
llvm::Value *MemPtr, const MemberPointerType *MPT);
/// Calculate an l-value from an object and a data member pointer.
virtual llvm::Value *
EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
llvm::Value *Base, llvm::Value *MemPtr,
const MemberPointerType *MPT);
/// Perform a derived-to-base, base-to-derived, or bitcast member
/// pointer conversion.
virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src);
/// Perform a derived-to-base, base-to-derived, or bitcast member
/// pointer conversion on a constant value.
virtual llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *Src);
/// Return true if the given member pointer can be zero-initialized
/// (in the C++ sense) with an LLVM zeroinitializer.
virtual bool isZeroInitializable(const MemberPointerType *MPT);
/// Return whether or not a member pointers type is convertible to an IR type.
virtual bool isMemberPointerConvertible(const MemberPointerType *MPT) const {
return true;
}
virtual bool isTypeInfoCalculable(QualType Ty) const {
return !Ty->isIncompleteType();
}
/// Create a null member pointer of the given type.
virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
/// Create a member pointer for the given method.
virtual llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD);
/// Create a member pointer for the given field.
virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset);
/// Create a member pointer for the given member pointer constant.
virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
/// Emit a comparison between two member pointers. Returns an i1.
virtual llvm::Value *
EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
llvm::Value *R,
const MemberPointerType *MPT,
bool Inequality);
/// Determine if a member pointer is non-null. Returns an i1.
virtual llvm::Value *
EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT);
protected:
/// A utility method for computing the offset required for the given
/// base-to-derived or derived-to-base member-pointer conversion.
/// Does not handle virtual conversions (in case we ever fully
/// support an ABI that allows this). Returns null if no adjustment
/// is required.
llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
/// \brief Computes the non-virtual adjustment needed for a member pointer
/// conversion along an inheritance path stored in an APValue. Unlike
/// getMemberPointerAdjustment(), the adjustment can be negative if the path
/// is from a derived type to a base type.
CharUnits getMemberPointerPathAdjustment(const APValue &MP);
public:
virtual void emitVirtualObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
llvm::Value *Ptr, QualType ElementType,
const CXXDestructorDecl *Dtor) = 0;
virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) = 0;
virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) = 0;
virtual llvm::GlobalVariable *getThrowInfo(QualType T) { return nullptr; }
virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) = 0;
virtual llvm::CallInst *
emitTerminateForUnexpectedException(CodeGenFunction &CGF,
llvm::Value *Exn);
virtual llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) = 0;
virtual llvm::Constant *
getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) = 0;
virtual bool shouldTypeidBeNullChecked(bool IsDeref,
QualType SrcRecordTy) = 0;
virtual void EmitBadTypeidCall(CodeGenFunction &CGF) = 0;
virtual llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
llvm::Value *ThisPtr,
llvm::Type *StdTypeInfoPtrTy) = 0;
virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) = 0;
virtual llvm::Value *
EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy, llvm::BasicBlock *CastEnd) = 0;
virtual llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF,
llvm::Value *Value,
QualType SrcRecordTy,
QualType DestTy) = 0;
virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0;
virtual llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
llvm::Value *This,
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) = 0;
virtual llvm::BasicBlock *EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD);
/// Emit the code to initialize hidden members required
/// to handle virtual inheritance, if needed by the ABI.
virtual void
initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {}
/// Emit constructor variants required by this ABI.
virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
/// Build the signature of the given constructor or destructor variant by
/// adding any required parameters. For convenience, ArgTys has been
/// initialized with the type of 'this'.
virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
SmallVectorImpl<CanQualType> &ArgTys) = 0;
/// Returns true if the given destructor type should be emitted as a linkonce
/// delegating thunk, regardless of whether the dtor is defined in this TU or
/// not.
virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
CXXDtorType DT) const = 0;
/// Emit destructor variants required by this ABI.
virtual void EmitCXXDestructors(const CXXDestructorDecl *D) = 0;
/// Get the type of the implicit "this" parameter used by a method. May return
/// zero if no specific type is applicable, e.g. if the ABI expects the "this"
/// parameter to point to some artificial offset in a complete object due to
/// vbases being reordered.
virtual const CXXRecordDecl *
getThisArgumentTypeForMethod(const CXXMethodDecl *MD) {
return MD->getParent();
}
/// Perform ABI-specific "this" argument adjustment required prior to
/// a call of a virtual function.
/// The "VirtualCall" argument is true iff the call itself is virtual.
virtual llvm::Value *
adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD,
llvm::Value *This,
bool VirtualCall) {
return This;
}
/// Build a parameter variable suitable for 'this'.
void buildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
/// Insert any ABI-specific implicit parameters into the parameter list for a
/// function. This generally involves extra data for constructors and
/// destructors.
///
/// ABIs may also choose to override the return type, which has been
/// initialized with the type of 'this' if HasThisReturn(CGF.CurGD) is true or
/// the formal return type of the function otherwise.
virtual void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
FunctionArgList &Params) = 0;
/// Perform ABI-specific "this" parameter adjustment in a virtual function
/// prologue.
virtual llvm::Value *adjustThisParameterInVirtualFunctionPrologue(
CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) {
return This;
}
/// Emit the ABI-specific prolog for the function.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
/// Add any ABI-specific implicit arguments needed to call a constructor.
///
/// \return The number of args added to the call, which is typically zero or
/// one.
virtual unsigned
addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating, CallArgList &Args) = 0;
/// Emit the destructor call.
virtual void EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD, CXXDtorType Type,
bool ForVirtualBase, bool Delegating,
llvm::Value *This) = 0;
/// Emits the VTable definitions required for the given record type.
virtual void emitVTableDefinitions(CodeGenVTables &CGVT,
const CXXRecordDecl *RD) = 0;
/// Get the address point of the vtable for the given base subobject while
/// building a constructor or a destructor. On return, NeedsVirtualOffset
/// tells if a virtual base adjustment is needed in order to get the offset
/// of the base subobject.
virtual llvm::Value *getVTableAddressPointInStructor(
CodeGenFunction &CGF, const CXXRecordDecl *RD, BaseSubobject Base,
const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) = 0;
/// Get the address point of the vtable for the given base subobject while
/// building a constexpr.
virtual llvm::Constant *
getVTableAddressPointForConstExpr(BaseSubobject Base,
const CXXRecordDecl *VTableClass) = 0;
/// Get the address of the vtable for the given record decl which should be
/// used for the vptr at the given offset in RD.
virtual llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) = 0;
/// Build a virtual function pointer in the ABI-specific way.
virtual llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF,
GlobalDecl GD,
llvm::Value *This,
llvm::Type *Ty,
SourceLocation Loc) = 0;
/// Emit the ABI-specific virtual destructor call.
virtual llvm::Value *
EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
CXXDtorType DtorType, llvm::Value *This,
const CXXMemberCallExpr *CE) = 0;
virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
GlobalDecl GD,
CallArgList &CallArgs) {}
/// Emit any tables needed to implement virtual inheritance. For Itanium,
/// this emits virtual table tables. For the MSVC++ ABI, this emits virtual
/// base tables.
virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD) = 0;
virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable,
GlobalDecl GD, bool ReturnAdjustment) = 0;
virtual llvm::Value *performThisAdjustment(CodeGenFunction &CGF,
llvm::Value *This,
const ThisAdjustment &TA) = 0;
virtual llvm::Value *performReturnAdjustment(CodeGenFunction &CGF,
llvm::Value *Ret,
const ReturnAdjustment &RA) = 0;
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType);
virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
FunctionArgList &Args) const = 0;
/// Gets the pure virtual member call function.
virtual StringRef GetPureVirtualCallName() = 0;
/// Gets the deleted virtual member call name.
virtual StringRef GetDeletedVirtualCallName() = 0;
/**************************** Array cookies ******************************/
/// Returns the extra size required in order to store the array
/// cookie for the given new-expression. May return 0 to indicate that no
/// array cookie is required.
///
/// Several cases are filtered out before this method is called:
/// - non-array allocations never need a cookie
/// - calls to \::operator new(size_t, void*) never need a cookie
///
/// \param expr - the new-expression being allocated.
virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
/// Initialize the array cookie for the given allocation.
///
/// \param NewPtr - a char* which is the presumed-non-null
/// return value of the allocation function
/// \param NumElements - the computed number of elements,
/// potentially collapsed from the multidimensional array case;
/// always a size_t
/// \param ElementType - the base element allocated type,
/// i.e. the allocated type after stripping all array types
virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType);
/// Reads the array cookie associated with the given pointer,
/// if it has one.
///
/// \param Ptr - a pointer to the first element in the array
/// \param ElementType - the base element type of elements of the array
/// \param NumElements - an out parameter which will be initialized
/// with the number of elements allocated, or zero if there is no
/// cookie
/// \param AllocPtr - an out parameter which will be initialized
/// with a char* pointing to the address returned by the allocation
/// function
/// \param CookieSize - an out parameter which will be initialized
/// with the size of the cookie, or zero if there is no cookie
virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
const CXXDeleteExpr *expr,
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
/// Return whether the given global decl needs a VTT parameter.
virtual bool NeedsVTTParameter(GlobalDecl GD);
protected:
/// Returns the extra size required in order to store the array
/// cookie for the given type. Assumes that an array cookie is
/// required.
virtual CharUnits getArrayCookieSizeImpl(QualType elementType);
/// Reads the array cookie for an allocation which is known to have one.
/// This is called by the standard implementation of ReadArrayCookie.
///
/// \param ptr - a pointer to the allocation made for an array, as a char*
/// \param cookieSize - the computed cookie size of an array
///
/// Other parameters are as above.
///
/// \return a size_t
virtual llvm::Value *readArrayCookieImpl(CodeGenFunction &IGF,
llvm::Value *ptr,
CharUnits cookieSize);
public:
/*************************** Static local guards ****************************/
/// Emits the guarded initializer and destructor setup for the given
/// variable, given that it couldn't be emitted as a constant.
/// If \p PerformInit is false, the initialization has been folded to a
/// constant and should not be performed.
///
/// The variable may be:
/// - a static local variable
/// - a static data member of a class template instantiation
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::GlobalVariable *DeclPtr,
bool PerformInit) = 0;
/// Emit code to force the execution of a destructor during global
/// teardown. The default implementation of this uses atexit.
///
/// \param Dtor - a function taking a single pointer argument
/// \param Addr - a pointer to pass to the destructor function.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *Dtor,
llvm::Constant *Addr) = 0;
/*************************** thread_local initialization ********************/
/// Emits ABI-required functions necessary to initialize thread_local
/// variables in this translation unit.
///
/// \param CXXThreadLocals - The thread_local declarations in this translation
/// unit.
/// \param CXXThreadLocalInits - If this translation unit contains any
/// non-constant initialization or non-trivial destruction for
/// thread_local variables, a list of functions to perform the
/// initialization.
virtual void EmitThreadLocalInitFuncs(
CodeGenModule &CGM,
ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) = 0;
// Determine if references to thread_local global variables can be made
// directly or require access through a thread wrapper function.
virtual bool usesThreadWrapperFunction() const = 0;
/// Emit a reference to a non-local thread_local variable (including
/// triggering the initialization of all thread_local variables in its
/// translation unit).
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
QualType LValType) = 0;
/// Emit a single constructor/destructor with the given type from a C++
/// constructor Decl.
virtual void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) = 0;
};
// Create an instance of a C++ ABI class:
/// Creates an Itanium-family ABI.
CGCXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
/// Creates a Microsoft-family ABI.
CGCXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
}
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenModule.cpp | //===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This coordinates the per-module state used while generating code.
//
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGCall.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGHLSLRuntime.h" // HLSL Change
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenPGO.h"
#include "CodeGenTBAA.h"
#include "CoverageMappingGen.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TimeProfiler.h" // HLSL Change
#include "dxc/DXIL/DxilConstants.h" // HLSL Change
using namespace clang;
using namespace CodeGen;
static const char AnnotationSection[] = "llvm.metadata";
static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
switch (CGM.getTarget().getCXXABI().getKind()) {
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericARM:
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
return CreateItaniumCXXABI(CGM);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(CGM);
}
llvm_unreachable("invalid C++ ABI kind");
}
CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO,
const CodeGenOptions &CGO, llvm::Module &M,
const llvm::DataLayout &TD,
DiagnosticsEngine &diags,
CoverageSourceInfo *CoverageInfo)
: Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
TheDataLayout(TD), Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
VMContext(M.getContext()), TBAA(nullptr), TheTargetCodeGenInfo(nullptr),
Types(*this), VTables(*this), ObjCRuntime(nullptr),
OpenCLRuntime(nullptr), OpenMPRuntime(nullptr), CUDARuntime(nullptr),
HLSLRuntime(nullptr), DebugInfo(nullptr), ARCData(nullptr), // HLSL Change
NoObjCARCExceptionsMetadata(nullptr), RRData(nullptr), PGOReader(nullptr),
CFConstantStringClassRef(nullptr), ConstantStringClassRef(nullptr),
NSConstantStringType(nullptr), NSConcreteGlobalBlock(nullptr),
NSConcreteStackBlock(nullptr), BlockObjectAssign(nullptr),
BlockObjectDispose(nullptr), BlockDescriptorType(nullptr),
GenericBlockLiteralType(nullptr), LifetimeStartFn(nullptr),
LifetimeEndFn(nullptr), SanitizerMD(new SanitizerMetadata(*this)) {
// Initialize the type cache.
llvm::LLVMContext &LLVMContext = M.getContext();
VoidTy = llvm::Type::getVoidTy(LLVMContext);
Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
FloatTy = llvm::Type::getFloatTy(LLVMContext);
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
PointerAlignInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
Int8PtrTy = Int8Ty->getPointerTo(0);
Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
BuiltinCC = getTargetCodeGenInfo().getABIInfo().getBuiltinCC();
if (LangOpts.ObjC1)
createObjCRuntime();
if (LangOpts.OpenCL)
createOpenCLRuntime();
if (LangOpts.OpenMP)
createOpenMPRuntime();
if (LangOpts.CUDA)
createCUDARuntime();
// HLSL Change Starts
std::unique_ptr<CGHLSLRuntime> RuntimePtr;
std::unique_ptr<CodeGenTBAA> TBAAPtr;
std::unique_ptr<CGDebugInfo> DebugInfoPtr;
if (LangOpts.HLSL) {
createHLSLRuntime();
RuntimePtr.reset(HLSLRuntime);
}
// HLSL Change Ends
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
(!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
TBAA = new CodeGenTBAA(Context, VMContext, CodeGenOpts, getLangOpts(),
getCXXABI().getMangleContext());
TBAAPtr.reset(TBAA); // HLSL Change
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
if (CodeGenOpts.getDebugInfo() != CodeGenOptions::NoDebugInfo ||
CodeGenOpts.EmitGcovArcs ||
CodeGenOpts.EmitGcovNotes)
DebugInfo = new CGDebugInfo(*this);
DebugInfoPtr.reset(DebugInfo); // HLSL Change
Block.GlobalUniqueCount = 0;
#if 0 // HLSL Change Starts - no ARC support
if (C.getLangOpts().ObjCAutoRefCount)
ARCData = new ARCEntrypoints();
RRData = new RREntrypoints();
#endif // HLSL Change Ends - no ARC support
if (!CodeGenOpts.InstrProfileInput.empty()) {
auto ReaderOrErr =
llvm::IndexedInstrProfReader::create(CodeGenOpts.InstrProfileInput);
if (std::error_code EC = ReaderOrErr.getError()) {
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"Could not read profile %0: %1");
getDiags().Report(DiagID) << CodeGenOpts.InstrProfileInput
<< EC.message();
} else
PGOReader = std::move(ReaderOrErr.get());
}
// If coverage mapping generation is enabled, create the
// CoverageMappingModuleGen object.
if (CodeGenOpts.CoverageMapping)
CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
// HLSL Change Starts - release acquired pointers
RuntimePtr.release();
TBAAPtr.release();
DebugInfoPtr.release();
// HLSL Change Ends
}
CodeGenModule::~CodeGenModule() {
#if 0 // HLSL Change - no ObjC, OpenCL, OpenMP, or CUDA support
delete ObjCRuntime;
delete OpenCLRuntime;
delete OpenMPRuntime;
delete CUDARuntime;
#endif // HLSL Change
delete HLSLRuntime; // HLSL Change
TheTargetCodeGenInfo.reset(nullptr); // HLSL Change
delete TBAA;
delete DebugInfo;
delete ARCData;
delete RRData;
}
void CodeGenModule::createObjCRuntime() {
#if 0 // HLSL Change - no ObjC support
// This is just isGNUFamily(), but we want to force implementors of
// new ABIs to decide how best to do this.
switch (LangOpts.ObjCRuntime.getKind()) {
case ObjCRuntime::GNUstep:
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
ObjCRuntime = CreateGNUObjCRuntime(*this);
return;
case ObjCRuntime::FragileMacOSX:
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
ObjCRuntime = CreateMacObjCRuntime(*this);
return;
}
llvm_unreachable("bad runtime kind");
#endif // HLSL Change - no ObjC support
}
void CodeGenModule::createOpenCLRuntime() {
#if 0 // HLSL Change - no OpenCL support
OpenCLRuntime = new CGOpenCLRuntime(*this);
#endif // HLSL Change - no OpenCL support
}
void CodeGenModule::createOpenMPRuntime() {
#if 0 // HLSL Change - no OpenMP support
OpenMPRuntime = new CGOpenMPRuntime(*this);
#endif // HLSL Change - no OpenMP support
}
void CodeGenModule::createCUDARuntime() {
#if 0 // HLSL Change - no CUDA support
CUDARuntime = CreateNVCUDARuntime(*this);
#endif // HLSL Change - no CUDA support
}
// HLSL Change Starts
void CodeGenModule::createHLSLRuntime() {
HLSLRuntime = CreateMSHLSLRuntime(*this);
}
void CodeGenModule::FinishCodeGen() {
HLSLRuntime->FinishCodeGen();
}
// HLSL Change Ends
void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
Replacements[Name] = C;
}
void CodeGenModule::applyReplacements() {
for (auto &I : Replacements) {
StringRef MangledName = I.first();
llvm::Constant *Replacement = I.second;
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (!Entry)
continue;
auto *OldF = cast<llvm::Function>(Entry);
auto *NewF = dyn_cast<llvm::Function>(Replacement);
if (!NewF) {
if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
} else {
auto *CE = cast<llvm::ConstantExpr>(Replacement);
assert(CE->getOpcode() == llvm::Instruction::BitCast ||
CE->getOpcode() == llvm::Instruction::GetElementPtr);
NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
}
}
// Replace old with new, but keep the old order.
OldF->replaceAllUsesWith(Replacement);
if (NewF) {
NewF->removeFromParent();
OldF->getParent()->getFunctionList().insertAfter(OldF, NewF);
}
OldF->eraseFromParent();
}
}
// This is only used in aliases that we created and we know they have a
// linear structure.
static const llvm::GlobalObject *getAliasedGlobal(const llvm::GlobalAlias &GA) {
llvm::SmallPtrSet<const llvm::GlobalAlias*, 4> Visited;
const llvm::Constant *C = &GA;
for (;;) {
C = C->stripPointerCasts();
if (auto *GO = dyn_cast<llvm::GlobalObject>(C))
return GO;
// stripPointerCasts will not walk over weak aliases.
auto *GA2 = dyn_cast<llvm::GlobalAlias>(C);
if (!GA2)
return nullptr;
if (!Visited.insert(GA2).second)
return nullptr;
C = GA2->getAliasee();
}
}
void CodeGenModule::checkAliases() {
// Check if the constructed aliases are well formed. It is really unfortunate
// that we have to do this in CodeGen, but we only construct mangled names
// and aliases during codegen.
bool Error = false;
DiagnosticsEngine &Diags = getDiags();
for (const GlobalDecl &GD : Aliases) {
const auto *D = cast<ValueDecl>(GD.getDecl());
const AliasAttr *AA = D->getAttr<AliasAttr>();
StringRef MangledName = getMangledName(GD);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
auto *Alias = cast<llvm::GlobalAlias>(Entry);
const llvm::GlobalValue *GV = getAliasedGlobal(*Alias);
if (!GV) {
Error = true;
Diags.Report(AA->getLocation(), diag::err_cyclic_alias);
} else if (GV->isDeclaration()) {
Error = true;
Diags.Report(AA->getLocation(), diag::err_alias_to_undefined);
}
llvm::Constant *Aliasee = Alias->getAliasee();
llvm::GlobalValue *AliaseeGV;
if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
else
AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
StringRef AliasSection = SA->getName();
if (AliasSection != AliaseeGV->getSection())
Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
<< AliasSection;
}
// We have to handle alias to weak aliases in here. LLVM itself disallows
// this since the object semantics would not match the IL one. For
// compatibility with gcc we implement it by just pointing the alias
// to its aliasee's aliasee. We also warn, since the user is probably
// expecting the link to be weak.
if (auto GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
if (GA->mayBeOverridden()) {
Diags.Report(AA->getLocation(), diag::warn_alias_to_weak_alias)
<< GV->getName() << GA->getName();
Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
GA->getAliasee(), Alias->getType());
Alias->setAliasee(Aliasee);
}
}
}
if (!Error)
return;
for (const GlobalDecl &GD : Aliases) {
StringRef MangledName = getMangledName(GD);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
auto *Alias = cast<llvm::GlobalAlias>(Entry);
Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
Alias->eraseFromParent();
}
}
void CodeGenModule::clear() {
DeferredDeclsToEmit.clear();
if (OpenMPRuntime)
OpenMPRuntime->clear();
}
void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
StringRef MainFile) {
if (!hasDiagnostics())
return;
if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
if (MainFile.empty())
MainFile = "<stdin>";
Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
} else
Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Missing
<< Mismatched;
}
void CodeGenModule::Release() {
EmitDeferred();
applyReplacements();
checkAliases();
EmitCXXGlobalInitFunc();
EmitCXXGlobalDtorFunc();
EmitCXXThreadLocalInitFunc();
if (ObjCRuntime)
if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice &&
CUDARuntime) {
if (llvm::Function *CudaCtorFunction = CUDARuntime->makeModuleCtorFunction())
AddGlobalCtor(CudaCtorFunction);
if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction())
AddGlobalDtor(CudaDtorFunction);
}
if (PGOReader && PGOStats.hasDiagnostics())
PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
EmitCtorList(GlobalCtors, "llvm.global_ctors");
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitGlobalAnnotations();
EmitStaticExternCAliases();
EmitDeferredUnusedCoverageMappings();
if (CoverageMapping)
CoverageMapping->emit();
emitLLVMUsed();
if (CodeGenOpts.Autolink &&
(Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
EmitModuleLinkOptions();
}
if (CodeGenOpts.DwarfVersion)
// We actually want the latest version when there are conflicts.
// We can change from Warning to Latest if such mode is supported.
getModule().addModuleFlag(llvm::Module::Warning, "Dwarf Version",
CodeGenOpts.DwarfVersion);
if (DebugInfo)
// We support a single version in the linked module. The LLVM
// parser will drop debug info with a different version number
// (and warn about it, too).
getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
llvm::DEBUG_METADATA_VERSION);
// We need to record the widths of enums and wchar_t, so that we can generate
// the correct build attributes in the ARM backend.
llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
if ( Arch == llvm::Triple::arm
|| Arch == llvm::Triple::armeb
|| Arch == llvm::Triple::thumb
|| Arch == llvm::Triple::thumbeb) {
// Width of wchar_t in bytes
uint64_t WCharWidth =
Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
// The minimum width of an enum in bytes
uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
}
// HLSL Change Starts
if (Context.getLangOpts().HLSL) {
FinishCodeGen();
}
// HLSL Change Ends
if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
llvm::PICLevel::Level PL = llvm::PICLevel::Default;
switch (PLevel) {
case 0: break;
case 1: PL = llvm::PICLevel::Small; break;
case 2: PL = llvm::PICLevel::Large; break;
default: llvm_unreachable("Invalid PIC Level");
}
getModule().setPICLevel(PL);
}
SimplifyPersonality();
if (getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
EmitCoverageFile();
if (DebugInfo)
DebugInfo->finalize();
EmitVersionIdentMetadata();
EmitTargetMetadata();
}
void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
// Make sure that this type is translated.
Types.UpdateCompletedType(TD);
}
llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
if (!TBAA)
return nullptr;
return TBAA->getTBAAInfo(QTy);
}
llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
if (!TBAA)
return nullptr;
return TBAA->getTBAAInfoForVTablePtr();
}
llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
if (!TBAA)
return nullptr;
return TBAA->getTBAAStructInfo(QTy);
}
llvm::MDNode *CodeGenModule::getTBAAStructTypeInfo(QualType QTy) {
if (!TBAA)
return nullptr;
return TBAA->getTBAAStructTypeInfo(QTy);
}
llvm::MDNode *CodeGenModule::getTBAAStructTagInfo(QualType BaseTy,
llvm::MDNode *AccessN,
uint64_t O) {
if (!TBAA)
return nullptr;
return TBAA->getTBAAStructTagInfo(BaseTy, AccessN, O);
}
/// Decorate the instruction with a TBAA tag. For both scalar TBAA
/// and struct-path aware TBAA, the tag has the same format:
/// base type, access type and offset.
/// When ConvertTypeToTag is true, we create a tag based on the scalar type.
void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
llvm::MDNode *TBAAInfo,
bool ConvertTypeToTag) {
if (ConvertTypeToTag && TBAA)
Inst->setMetadata(llvm::LLVMContext::MD_tbaa,
TBAA->getTBAAScalarTagInfo(TBAAInfo));
else
Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
}
void CodeGenModule::Error(SourceLocation loc, StringRef message) {
unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
getDiags().Report(Context.getFullLoc(loc), diagID) << message;
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified stmt yet.
void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
<< Msg << S->getSourceRange();
}
/// ErrorUnsupported - Print out an error that codegen doesn't support the
/// specified decl yet.
void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
return llvm::ConstantInt::get(SizeTy, size.getQuantity());
}
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
const NamedDecl *D) const {
// Internal definitions always have default visibility.
if (GV->hasLocalLinkage()) {
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
return;
}
// Set visibility for definitions.
LinkageInfo LV = D->getLinkageAndVisibility();
if (LV.isVisibilityExplicit() || !GV->hasAvailableExternallyLinkage())
GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
}
static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
.Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
.Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
.Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
.Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
}
static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(
CodeGenOptions::TLSModel M) {
switch (M) {
case CodeGenOptions::GeneralDynamicTLSModel:
return llvm::GlobalVariable::GeneralDynamicTLSModel;
case CodeGenOptions::LocalDynamicTLSModel:
return llvm::GlobalVariable::LocalDynamicTLSModel;
case CodeGenOptions::InitialExecTLSModel:
return llvm::GlobalVariable::InitialExecTLSModel;
case CodeGenOptions::LocalExecTLSModel:
return llvm::GlobalVariable::LocalExecTLSModel;
}
llvm_unreachable("Invalid TLS model!");
}
void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
llvm::GlobalValue::ThreadLocalMode TLM;
TLM = GetLLVMTLSModel(CodeGenOpts.getDefaultTLSModel());
// Override the TLS model if it is explicitly specified.
if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
TLM = GetLLVMTLSModel(Attr->getModel());
}
GV->setThreadLocalMode(TLM);
}
StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
StringRef &FoundStr = MangledDeclNames[GD.getCanonicalDecl()];
if (!FoundStr.empty())
return FoundStr;
const auto *ND = cast<NamedDecl>(GD.getDecl());
// HLSL Change Starts
// Entry point doesn't get mangled
if (ND->getKind() == Decl::Function &&
ND->getDeclContext()->getDeclKind() == Decl::Kind::TranslationUnit &&
ND->getNameAsString() == CodeGenOpts.HLSLEntryFunction) {
return CodeGenOpts.HLSLEntryFunction;
}
// HLSL Change Ends
SmallString<256> Buffer;
StringRef Str;
if (getCXXABI().getMangleContext().shouldMangleDeclName(ND)) {
llvm::raw_svector_ostream Out(Buffer);
if (const auto *D = dyn_cast<CXXConstructorDecl>(ND))
getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
else if (const auto *D = dyn_cast<CXXDestructorDecl>(ND))
getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
else
getCXXABI().getMangleContext().mangleName(ND, Out);
Str = Out.str();
} else {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
Str = II->getName();
}
// Keep the first result in the case of a mangling collision.
auto Result = Manglings.insert(std::make_pair(Str, GD));
return FoundStr = Result.first->first();
}
StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
const BlockDecl *BD) {
MangleContext &MangleCtx = getCXXABI().getMangleContext();
const Decl *D = GD.getDecl();
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
if (!D)
MangleCtx.mangleGlobalBlock(BD,
dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
else
MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
return Result.first->first();
}
llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
return getModule().getNamedValue(Name);
}
/// AddGlobalCtor - Add a function to the list that will be called before
/// main() runs.
void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
llvm::Constant *AssociatedData) {
// FIXME: Type coercion of void()* types.
GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
}
/// AddGlobalDtor - Add a function to the list that will be called
/// when the module is unloaded.
void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) {
// FIXME: Type coercion of void()* types.
GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
}
void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()*, i8* }.
llvm::StructType *CtorStructTy = llvm::StructType::get(
Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy, nullptr);
// Construct the constructor and destructor arrays.
SmallVector<llvm::Constant *, 8> Ctors;
for (const auto &I : Fns) {
llvm::Constant *S[] = {
llvm::ConstantInt::get(Int32Ty, I.Priority, false),
llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy),
(I.AssociatedData
? llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy)
: llvm::Constant::getNullValue(VoidPtrTy))};
Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
}
if (!Ctors.empty()) {
llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
new llvm::GlobalVariable(TheModule, AT, false,
llvm::GlobalValue::AppendingLinkage,
llvm::ConstantArray::get(AT, Ctors),
GlobalName);
}
}
llvm::GlobalValue::LinkageTypes
CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
const auto *D = cast<FunctionDecl>(GD.getDecl());
GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
if (isa<CXXDestructorDecl>(D) &&
getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
GD.getDtorType())) {
// Destructor variants in the Microsoft C++ ABI are always internal or
// linkonce_odr thunks emitted on an as-needed basis.
return Linkage == GVA_Internal ? llvm::GlobalValue::InternalLinkage
: llvm::GlobalValue::LinkOnceODRLinkage;
}
return getLLVMLinkageForDeclarator(D, Linkage, /*isConstantVariable=*/false);
}
void CodeGenModule::setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(FD)) {
if (getCXXABI().useThunkForDtorVariant(Dtor, GD.getDtorType())) {
// Don't dllexport/import destructor thunks.
F->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
return;
}
}
if (FD->hasAttr<DLLImportAttr>())
F->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
else if (FD->hasAttr<DLLExportAttr>())
F->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
else
F->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
}
void CodeGenModule::setFunctionDefinitionAttributes(const FunctionDecl *D,
llvm::Function *F) {
setNonAliasAttributes(D, F);
}
void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
const CGFunctionInfo &Info,
llvm::Function *F) {
unsigned CallingConv;
AttributeListType AttributeList;
ConstructAttributeList(Info, D, AttributeList, CallingConv, false);
F->setAttributes(llvm::AttributeSet::get(getLLVMContext(), AttributeList));
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
// HLSL Change Begins
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
getHLSLRuntime().AddHLSLFunctionInfo(F, FD);
// HLSL Change Ends
}
/// Determines whether the language options require us to model
/// unwind exceptions. We treat -fexceptions as mandating this
/// except under the fragile ObjC ABI with only ObjC exceptions
/// enabled. This means, for example, that C with -fexceptions
/// enables this.
static bool hasUnwindExceptions(const LangOptions &LangOpts) {
// If exceptions are completely disabled, obviously this is false.
if (!LangOpts.Exceptions) return false;
// If C++ exceptions are enabled, this is true.
if (LangOpts.CXXExceptions) return true;
// If ObjC exceptions are enabled, this depends on the ABI.
if (LangOpts.ObjCExceptions) {
return LangOpts.ObjCRuntime.hasUnwindExceptions();
}
return true;
}
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
llvm::AttrBuilder B;
if (CodeGenOpts.UnwindTables)
B.addAttribute(llvm::Attribute::UWTable);
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
if (D->hasAttr<NakedAttr>()) {
// Naked implies noinline: we should not be inlining such functions.
B.addAttribute(llvm::Attribute::Naked);
B.addAttribute(llvm::Attribute::NoInline);
} else if (D->hasAttr<NoDuplicateAttr>()) {
B.addAttribute(llvm::Attribute::NoDuplicate);
} else if (D->hasAttr<NoInlineAttr>()) {
B.addAttribute(llvm::Attribute::NoInline);
} else if (D->hasAttr<AlwaysInlineAttr>() &&
!F->getAttributes().hasAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoInline)) {
// (noinline wins over always_inline, and we can't specify both in IR)
B.addAttribute(llvm::Attribute::AlwaysInline);
}
if (D->hasAttr<ColdAttr>()) {
if (!D->hasAttr<OptimizeNoneAttr>())
B.addAttribute(llvm::Attribute::OptimizeForSize);
B.addAttribute(llvm::Attribute::Cold);
}
if (D->hasAttr<MinSizeAttr>())
B.addAttribute(llvm::Attribute::MinSize);
if (LangOpts.getStackProtector() == LangOptions::SSPOn)
B.addAttribute(llvm::Attribute::StackProtect);
else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
B.addAttribute(llvm::Attribute::StackProtectStrong);
else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
B.addAttribute(llvm::Attribute::StackProtectReq);
F->addAttributes(llvm::AttributeSet::FunctionIndex,
llvm::AttributeSet::get(
F->getContext(), llvm::AttributeSet::FunctionIndex, B));
if (D->hasAttr<OptimizeNoneAttr>()) {
// OptimizeNone implies noinline; we should not be inlining such functions.
F->addFnAttr(llvm::Attribute::OptimizeNone);
F->addFnAttr(llvm::Attribute::NoInline);
// OptimizeNone wins over OptimizeForSize, MinSize, AlwaysInline.
assert(!F->hasFnAttribute(llvm::Attribute::OptimizeForSize) &&
"OptimizeNone and OptimizeForSize on same function!");
assert(!F->hasFnAttribute(llvm::Attribute::MinSize) &&
"OptimizeNone and MinSize on same function!");
assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
"OptimizeNone and AlwaysInline on same function!");
// Attribute 'inlinehint' has no effect on 'optnone' functions.
// Explicitly remove it from the set of function attributes.
F->removeFnAttr(llvm::Attribute::InlineHint);
}
if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
F->setUnnamedAddr(true);
else if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
if (MD->isVirtual())
F->setUnnamedAddr(true);
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
if (alignment)
F->setAlignment(alignment);
// C++ ABI requires 2-byte alignment for member functions.
if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
F->setAlignment(2);
}
void CodeGenModule::SetCommonAttributes(const Decl *D,
llvm::GlobalValue *GV) {
if (const auto *ND = dyn_cast<NamedDecl>(D))
setGlobalVisibility(GV, ND);
else
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
if (D->hasAttr<UsedAttr>())
addUsedGlobal(GV);
}
void CodeGenModule::setAliasAttributes(const Decl *D,
llvm::GlobalValue *GV) {
SetCommonAttributes(D, GV);
// Process the dllexport attribute based on whether the original definition
// (not necessarily the aliasee) was exported.
if (D->hasAttr<DLLExportAttr>())
GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
}
void CodeGenModule::setNonAliasAttributes(const Decl *D,
llvm::GlobalObject *GO) {
SetCommonAttributes(D, GO);
if (const SectionAttr *SA = D->getAttr<SectionAttr>())
GO->setSection(SA->getName());
getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
}
void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
llvm::Function *F,
const CGFunctionInfo &FI) {
SetLLVMFunctionAttributes(D, FI, F);
SetLLVMFunctionAttributesForDefinition(D, F);
F->setLinkage(llvm::Function::InternalLinkage);
setNonAliasAttributes(D, F);
}
static void setLinkageAndVisibilityForGV(llvm::GlobalValue *GV,
const NamedDecl *ND) {
// Set linkage and visibility in case we never see a definition.
LinkageInfo LV = ND->getLinkageAndVisibility();
if (LV.getLinkage() != ExternalLinkage) {
// Don't set internal linkage on declarations.
} else {
if (ND->hasAttr<DLLImportAttr>()) {
GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
} else if (ND->hasAttr<DLLExportAttr>()) {
GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
} else if (ND->hasAttr<WeakAttr>() || ND->isWeakImported()) {
// "extern_weak" is overloaded in LLVM; we probably should have
// separate linkage types for this.
GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
}
// Set visibility on a declaration only if it's explicit.
if (LV.isVisibilityExplicit())
GV->setVisibility(CodeGenModule::GetLLVMVisibility(LV.getVisibility()));
}
}
void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
bool IsIncompleteFunction,
bool IsThunk) {
if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
// If this is an intrinsic function, set the function's attributes
// to the intrinsic's attributes.
F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
return;
}
const auto *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction)
SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
// Add the Returned attribute for "this", except for iOS 5 and earlier
// where substantial code, including the libstdc++ dylib, was compiled with
// GCC and does not actually return "this".
if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
!(getTarget().getTriple().isiOS() &&
getTarget().getTriple().isOSVersionLT(6))) {
assert(!F->arg_empty() &&
F->arg_begin()->getType()
->canLosslesslyBitCastTo(F->getReturnType()) &&
"unexpected this return");
F->addAttribute(1, llvm::Attribute::Returned);
}
// Only a few attributes are set on declarations; these may later be
// overridden by a definition.
setLinkageAndVisibilityForGV(F, FD);
if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
F->setSection(SA->getName());
// A replaceable global allocation function does not act like a builtin by
// default, only if it is invoked by a new-expression or delete-expression.
if (FD->isReplaceableGlobalAllocationFunction())
F->addAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoBuiltin);
}
void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
assert(!GV->isDeclaration() &&
"Only globals with definition can force usage.");
LLVMUsed.emplace_back(GV);
}
void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
assert(!GV->isDeclaration() &&
"Only globals with definition can force usage.");
LLVMCompilerUsed.emplace_back(GV);
}
static void emitUsed(CodeGenModule &CGM, StringRef Name,
std::vector<llvm::WeakTrackingVH> &List) {
// Don't create llvm.used if there is no need.
if (List.empty())
return;
// Convert List to what ConstantArray needs.
SmallVector<llvm::Constant*, 8> UsedArray;
UsedArray.resize(List.size());
for (unsigned i = 0, e = List.size(); i != e; ++i) {
UsedArray[i] =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
}
if (UsedArray.empty())
return;
llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
llvm::ConstantArray::get(ATy, UsedArray), Name);
GV->setSection("llvm.metadata");
}
void CodeGenModule::emitLLVMUsed() {
emitUsed(*this, "llvm.used", LLVMUsed);
emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
}
void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
llvm::SmallString<32> Opt;
getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
void CodeGenModule::AddDependentLib(StringRef Lib) {
llvm::SmallString<24> Opt;
getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
}
/// \brief Add link options implied by the given module, including modules
/// it depends on, using a postorder walk.
static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
SmallVectorImpl<llvm::Metadata *> &Metadata,
llvm::SmallPtrSet<Module *, 16> &Visited) {
// Import this module's parent.
if (Mod->Parent && Visited.insert(Mod->Parent).second) {
addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
}
// Import this module's dependencies.
for (unsigned I = Mod->Imports.size(); I > 0; --I) {
if (Visited.insert(Mod->Imports[I - 1]).second)
addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited);
}
// Add linker options to link against the libraries/frameworks
// described by this module.
llvm::LLVMContext &Context = CGM.getLLVMContext();
for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) {
// Link against a framework. Frameworks are currently Darwin only, so we
// don't to ask TargetCodeGenInfo for the spelling of the linker option.
if (Mod->LinkLibraries[I-1].IsFramework) {
llvm::Metadata *Args[2] = {
llvm::MDString::get(Context, "-framework"),
llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)};
Metadata.push_back(llvm::MDNode::get(Context, Args));
continue;
}
// Link against a library.
llvm::SmallString<24> Opt;
CGM.getTargetCodeGenInfo().getDependentLibraryOption(
Mod->LinkLibraries[I-1].Library, Opt);
auto *OptString = llvm::MDString::get(Context, Opt);
Metadata.push_back(llvm::MDNode::get(Context, OptString));
}
}
void CodeGenModule::EmitModuleLinkOptions() {
// Collect the set of all of the modules we want to visit to emit link
// options, which is essentially the imported modules and all of their
// non-explicit child modules.
llvm::SetVector<clang::Module *> LinkModules;
llvm::SmallPtrSet<clang::Module *, 16> Visited;
SmallVector<clang::Module *, 16> Stack;
// Seed the stack with imported modules.
for (Module *M : ImportedModules)
if (Visited.insert(M).second)
Stack.push_back(M);
// Find all of the modules to import, making a little effort to prune
// non-leaf modules.
while (!Stack.empty()) {
clang::Module *Mod = Stack.pop_back_val();
bool AnyChildren = false;
// Visit the submodules of this module.
for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
SubEnd = Mod->submodule_end();
Sub != SubEnd; ++Sub) {
// Skip explicit children; they need to be explicitly imported to be
// linked against.
if ((*Sub)->IsExplicit)
continue;
if (Visited.insert(*Sub).second) {
Stack.push_back(*Sub);
AnyChildren = true;
}
}
// We didn't find any children, so add this module to the list of
// modules to link against.
if (!AnyChildren) {
LinkModules.insert(Mod);
}
}
// Add link options for all of the imported modules in reverse topological
// order. We don't do anything to try to order import link flags with respect
// to linker options inserted by things like #pragma comment().
SmallVector<llvm::Metadata *, 16> MetadataArgs;
Visited.clear();
for (Module *M : LinkModules)
if (Visited.insert(M).second)
addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
std::reverse(MetadataArgs.begin(), MetadataArgs.end());
LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
// Add the linker options metadata flag.
getModule().addModuleFlag(llvm::Module::AppendUnique, "Linker Options",
llvm::MDNode::get(getLLVMContext(),
LinkerOptionsMetadata));
}
void CodeGenModule::EmitDeferred() {
// Emit code for any potentially referenced deferred decls. Since a
// previously unused static decl may become used during the generation of code
// for a static function, iterate until no changes are made.
if (!DeferredVTables.empty()) {
EmitDeferredVTables();
// Emitting a v-table doesn't directly cause more v-tables to
// become deferred, although it can cause functions to be
// emitted that then need those v-tables.
assert(DeferredVTables.empty());
}
// Stop if we're out of both deferred v-tables and deferred declarations.
if (DeferredDeclsToEmit.empty())
return;
// Grab the list of decls to emit. If EmitGlobalDefinition schedules more
// work, it will not interfere with this.
std::vector<DeferredGlobal> CurDeclsToEmit;
CurDeclsToEmit.swap(DeferredDeclsToEmit);
for (DeferredGlobal &G : CurDeclsToEmit) {
GlobalDecl D = G.GD;
llvm::GlobalValue *GV = G.GV;
G.GV = nullptr;
assert(!GV || GV == GetGlobalValue(getMangledName(D)));
if (!GV)
GV = GetGlobalValue(getMangledName(D));
// Check to see if we've already emitted this. This is necessary
// for a couple of reasons: first, decls can end up in the
// deferred-decls queue multiple times, and second, decls can end
// up with definitions in unusual ways (e.g. by an extern inline
// function acquiring a strong function redefinition). Just
// ignore these cases.
if (GV && !GV->isDeclaration())
continue;
// Otherwise, emit the definition and move on to the next one.
EmitGlobalDefinition(D, GV);
// If we found out that we need to emit more decls, do that recursively.
// This has the advantage that the decls are emitted in a DFS and related
// ones are close together, which is convenient for testing.
if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
EmitDeferred();
assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty());
}
}
}
void CodeGenModule::EmitGlobalAnnotations() {
if (Annotations.empty())
return;
// Create a new global variable for the ConstantStruct in the Module.
llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
Annotations[0]->getType(), Annotations.size()), Annotations);
auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
llvm::GlobalValue::AppendingLinkage,
Array, "llvm.global.annotations");
gv->setSection(AnnotationSection);
}
llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
llvm::Constant *&AStr = AnnotationStrings[Str];
if (AStr)
return AStr;
// Not found yet, create a new global.
llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
auto *gv =
new llvm::GlobalVariable(getModule(), s->getType(), true,
llvm::GlobalValue::PrivateLinkage, s, ".str");
gv->setSection(AnnotationSection);
gv->setUnnamedAddr(true);
AStr = gv;
return gv;
}
llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
SourceManager &SM = getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
if (PLoc.isValid())
return EmitAnnotationString(PLoc.getFilename());
return EmitAnnotationString(SM.getBufferName(Loc));
}
llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
SourceManager &SM = getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(L);
unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
SM.getExpansionLineNumber(L);
return llvm::ConstantInt::get(Int32Ty, LineNo);
}
llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
const AnnotateAttr *AA,
SourceLocation L) {
// Get the globals for file name, annotation, and the line number.
llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
*UnitGV = EmitAnnotationUnit(L),
*LineNoCst = EmitAnnotationLineNo(L);
// Create the ConstantStruct for the global annotation.
llvm::Constant *Fields[4] = {
llvm::ConstantExpr::getBitCast(GV, Int8PtrTy),
llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
LineNoCst
};
return llvm::ConstantStruct::getAnon(Fields);
}
void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
llvm::GlobalValue *GV) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
// Get the struct elements for these annotations.
for (const auto *I : D->specific_attrs<AnnotateAttr>())
Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
}
bool CodeGenModule::isInSanitizerBlacklist(llvm::Function *Fn,
SourceLocation Loc) const {
const auto &SanitizerBL = getContext().getSanitizerBlacklist();
// Blacklist by function name.
if (SanitizerBL.isBlacklistedFunction(Fn->getName()))
return true;
// Blacklist by location.
if (!Loc.isInvalid())
return SanitizerBL.isBlacklistedLocation(Loc);
// If location is unknown, this may be a compiler-generated function. Assume
// it's located in the main file.
auto &SM = Context.getSourceManager();
if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
return SanitizerBL.isBlacklistedFile(MainFile->getName());
}
return false;
}
bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
SourceLocation Loc, QualType Ty,
StringRef Category) const {
// For now globals can be blacklisted only in ASan and KASan.
if (!LangOpts.Sanitize.hasOneOf(
SanitizerKind::Address | SanitizerKind::KernelAddress))
return false;
const auto &SanitizerBL = getContext().getSanitizerBlacklist();
if (SanitizerBL.isBlacklistedGlobal(GV->getName(), Category))
return true;
if (SanitizerBL.isBlacklistedLocation(Loc, Category))
return true;
// Check global type.
if (!Ty.isNull()) {
// Drill down the array types: if global variable of a fixed type is
// blacklisted, we also don't instrument arrays of them.
while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
Ty = AT->getElementType();
Ty = Ty.getCanonicalType().getUnqualifiedType();
// We allow to blacklist only record types (classes, structs etc.)
if (Ty->isRecordType()) {
std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
if (SanitizerBL.isBlacklistedType(TypeStr, Category))
return true;
}
}
return false;
}
bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
// Never defer when EmitAllDecls is specified.
if (LangOpts.EmitAllDecls)
return true;
return getContext().DeclMustBeEmitted(Global);
}
bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
if (const auto *FD = dyn_cast<FunctionDecl>(Global))
if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
// Implicit template instantiations may change linkage if they are later
// explicitly instantiated, so they should not be emitted eagerly.
return false;
// OACR error 6287
#pragma prefast(disable: __WARNING_ZEROLOGICALANDLOSINGSIDEEFFECTS, "language options are constants, by design")
// If OpenMP is enabled and threadprivates must be generated like TLS, delay
// codegen for global variables, because they may be marked as threadprivate.
if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global))
return false;
return true;
}
llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor(
const CXXUuidofExpr* E) {
// Sema has verified that IIDSource has a __declspec(uuid()), and that its
// well-formed.
StringRef Uuid = E->getUuidAsStringRef(Context);
std::string Name = "_GUID_" + Uuid.lower();
std::replace(Name.begin(), Name.end(), '-', '_');
// Look for an existing global.
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
return GV;
llvm::Constant *Init = EmitUuidofInitializer(Uuid);
assert(Init && "failed to initialize as constant");
auto *GV = new llvm::GlobalVariable(
getModule(), Init->getType(),
/*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
return GV;
}
llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
const AliasAttr *AA = VD->getAttr<AliasAttr>();
assert(AA && "No alias?");
llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
if (Entry) {
unsigned AS = getContext().getTargetAddressSpace(VD->getType());
return llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
}
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
GlobalDecl(cast<FunctionDecl>(VD)),
/*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy),
nullptr);
auto *F = cast<llvm::GlobalValue>(Aliasee);
F->setLinkage(llvm::Function::ExternalWeakLinkage);
WeakRefReferences.insert(F);
return Aliasee;
}
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
const auto *Global = cast<ValueDecl>(GD.getDecl());
// Weak references don't produce any output by themselves.
if (Global->hasAttr<WeakRefAttr>())
return;
// If this is an alias definition (which otherwise looks like a declaration)
// emit it now.
if (Global->hasAttr<AliasAttr>())
return EmitAliasDefinition(GD);
// If this is CUDA, be selective about which declarations we emit.
if (LangOpts.CUDA) {
if (LangOpts.CUDAIsDevice) {
if (!Global->hasAttr<CUDADeviceAttr>() &&
!Global->hasAttr<CUDAGlobalAttr>() &&
!Global->hasAttr<CUDAConstantAttr>() &&
!Global->hasAttr<CUDASharedAttr>())
return;
} else {
if (!Global->hasAttr<CUDAHostAttr>() && (
Global->hasAttr<CUDADeviceAttr>() ||
Global->hasAttr<CUDAConstantAttr>() ||
Global->hasAttr<CUDASharedAttr>()))
return;
}
}
// Ignore declarations, they will be emitted on their first use.
if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
// Forward declarations are emitted lazily on first use.
if (!FD->doesThisDeclarationHaveABody()) {
if (!FD->doesDeclarationForceExternallyVisibleDefinition())
return;
StringRef MangledName = getMangledName(GD);
// Compute the function info and LLVM type.
const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
llvm::Type *Ty = getTypes().GetFunctionType(FI);
GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
/*DontDefer=*/false);
return;
}
} else {
const auto *VD = cast<VarDecl>(Global);
assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
!Context.isMSStaticDataMemberInlineDefinition(VD))
return;
}
// Defer code generation to first use when possible, e.g. if this is an inline
// function. If the global must always be emitted, do it eagerly if possible
// to benefit from cache locality.
if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
// Emit the definition if it can't be deferred.
EmitGlobalDefinition(GD);
return;
}
// If we're deferring emission of a C++ variable with an
// initializer, remember the order in which it appeared in the file.
if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
cast<VarDecl>(Global)->hasInit()) {
DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
CXXGlobalInits.push_back(nullptr);
}
StringRef MangledName = getMangledName(GD);
if (llvm::GlobalValue *GV = GetGlobalValue(MangledName)) {
// The value has already been used and should therefore be emitted.
addDeferredDeclToEmit(GV, GD);
} else if (MustBeEmitted(Global)) {
// The value must be emitted, but cannot be emitted eagerly.
assert(!MayBeEmittedEagerly(Global));
addDeferredDeclToEmit(/*GV=*/nullptr, GD);
} else {
// Otherwise, remember that we saw a deferred decl with this name. The
// first use of the mangled name will cause it to move into
// DeferredDeclsToEmit.
DeferredDecls[MangledName] = GD;
}
}
namespace {
struct FunctionIsDirectlyRecursive :
public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
const StringRef Name;
const Builtin::Context &BI;
bool Result;
FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
Name(N), BI(C), Result(false) {
}
typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
bool TraverseCallExpr(CallExpr *E) {
const FunctionDecl *FD = E->getDirectCallee();
if (!FD)
return true;
AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
if (Attr && Name == Attr->getLabel()) {
Result = true;
return false;
}
unsigned BuiltinID = FD->getBuiltinID();
if (!BuiltinID || !BI.isLibFunction(BuiltinID))
return true;
StringRef BuiltinName = BI.GetName(BuiltinID);
if (BuiltinName.startswith("__builtin_") &&
Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
Result = true;
return false;
}
return true;
}
};
}
// isTriviallyRecursive - Check if this function calls another
// decl that, because of the asm attribute or the other decl being a builtin,
// ends up pointing to itself.
bool
CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
StringRef Name;
if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
// asm labels are a special kind of mangling we have to support.
AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
if (!Attr)
return false;
Name = Attr->getLabel();
} else {
Name = FD->getName();
}
FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
return Walker.Result;
}
bool
CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
return true;
const auto *F = cast<FunctionDecl>(GD.getDecl());
if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
return false;
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
// but a function that calls itself is clearly not equivalent to the real
// implementation.
// This happens in glibc's btowc and in some configure checks.
return !isTriviallyRecursive(F);
}
/// If the type for the method's class was generated by
/// CGDebugInfo::createContextChain(), the cache contains only a
/// limited DIType without any declarations. Since EmitFunctionStart()
/// needs to find the canonical declaration for each method, we need
/// to construct the complete type prior to emitting the method.
void CodeGenModule::CompleteDIClassType(const CXXMethodDecl* D) {
if (!D->isInstance())
return;
if (CGDebugInfo *DI = getModuleDebugInfo())
if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
// HLSL Change Begin - This is a reference.
QualType ThisType = D->getThisObjectType(getContext());
DI->getOrCreateRecordType(ThisType, D->getLocation());
// HLSL Change End - This is a reference.
}
}
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
const auto *D = cast<ValueDecl>(GD.getDecl());
PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
Context.getSourceManager(),
"Generating code for declaration");
if (isa<FunctionDecl>(D)) {
// At -O0, don't generate IR for functions with available_externally
// linkage.
if (!shouldEmitFunction(GD))
return;
// HLSL Change Begin - Support hierarchial time tracing.
const auto *FD = dyn_cast<FunctionDecl>(D);
llvm::TimeTraceScope TimeScope(
"CodeGen Function", [FD]() { return FD->getQualifiedNameAsString(); });
// HLSL Change End - Support hierarchial time tracing.
if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
CompleteDIClassType(Method);
// Make sure to emit the definition(s) before we emit the thunks.
// This is necessary for the generation of certain thunks.
if (const auto *CD = dyn_cast<CXXConstructorDecl>(Method))
ABI->emitCXXStructor(CD, getFromCtorType(GD.getCtorType()));
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(Method))
ABI->emitCXXStructor(DD, getFromDtorType(GD.getDtorType()));
else
EmitGlobalFunctionDefinition(GD, GV);
if (Method->isVirtual())
getVTables().EmitThunks(GD);
return;
}
return EmitGlobalFunctionDefinition(GD, GV);
}
if (const auto *VD = dyn_cast<VarDecl>(D))
return EmitGlobalVarDefinition(VD);
llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
}
/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
/// module, create and return an llvm Function with the specified type. If there
/// is something in the module with the specified name, return it potentially
/// bitcasted to the right type.
///
/// If D is non-null, it specifies a decl that correspond to this. This is used
/// to set the attributes on the function when it is first created.
llvm::Constant *
CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
llvm::Type *Ty,
GlobalDecl GD, bool ForVTable,
bool DontDefer, bool IsThunk,
llvm::AttributeSet ExtraAttrs) {
const Decl *D = GD.getDecl();
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
if (WeakRefReferences.erase(Entry)) {
const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
if (FD && !FD->hasAttr<WeakAttr>())
Entry->setLinkage(llvm::Function::ExternalLinkage);
}
// Handle dropped DLL attributes.
if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
if (Entry->getType()->getElementType() == Ty)
return Entry;
// Make sure the result is of the correct type.
return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
}
// This function doesn't have a complete type (for example, the return
// type is an incomplete struct). Use a fake type instead, and make
// sure not to try to set attributes.
bool IsIncompleteFunction = false;
llvm::FunctionType *FTy;
if (isa<llvm::FunctionType>(Ty)) {
FTy = cast<llvm::FunctionType>(Ty);
} else {
FTy = llvm::FunctionType::get(VoidTy, false);
IsIncompleteFunction = true;
}
// HLSL Change: unique_ptr for F
llvm::Function *F = llvm::Function::Create(FTy,
llvm::Function::ExternalLinkage,
MangledName, &getModule());
assert(F->getName() == MangledName && "name was uniqued!");
if (D)
SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
if (ExtraAttrs.hasAttributes(llvm::AttributeSet::FunctionIndex)) {
llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeSet::FunctionIndex);
F->addAttributes(llvm::AttributeSet::FunctionIndex,
llvm::AttributeSet::get(VMContext,
llvm::AttributeSet::FunctionIndex,
B));
}
if (!DontDefer) {
// All MSVC dtors other than the base dtor are linkonce_odr and delegate to
// each other bottoming out with the base dtor. Therefore we emit non-base
// dtors on usage, even if there is no dtor definition in the TU.
if (D && isa<CXXDestructorDecl>(D) &&
getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
GD.getDtorType()))
addDeferredDeclToEmit(F, GD);
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
// of the file.
auto DDI = DeferredDecls.find(MangledName);
if (DDI != DeferredDecls.end()) {
// Move the potentially referenced deferred decl to the
// DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
// don't need it anymore).
addDeferredDeclToEmit(F, DDI->second);
DeferredDecls.erase(DDI);
// Otherwise, there are cases we have to worry about where we're
// using a declaration for which we must emit a definition but where
// we might not find a top-level definition:
// - member functions defined inline in their classes
// - friend functions defined inline in some class
// - special member functions with implicit definitions
// If we ever change our AST traversal to walk into class methods,
// this will be unnecessary.
//
// We also don't emit a definition for a function if it's going to be an
// entry in a vtable, unless it's already marked as used.
} else if (getLangOpts().CPlusPlus && D) {
// Look for a declaration that's lexically in a record.
for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
FD = FD->getPreviousDecl()) {
if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
if (FD->doesThisDeclarationHaveABody()) {
addDeferredDeclToEmit(F, GD.getWithDecl(FD));
break;
}
}
}
}
}
// Make sure the result is of the requested type.
if (!IsIncompleteFunction) {
assert(F->getType()->getElementType() == Ty);
return F;
}
llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
return llvm::ConstantExpr::getBitCast(F, PTy);
}
/// GetAddrOfFunction - Return the address of the given function. If Ty is
/// non-null, then this function will use the specified type if it has to
/// create it (this occurs when we see a definition of the function).
llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
llvm::Type *Ty,
bool ForVTable,
bool DontDefer) {
// If there was no specific requested type, just convert it now.
if (!Ty)
Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
StringRef MangledName = getMangledName(GD);
return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer);
}
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
llvm::Constant *
CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
StringRef Name,
llvm::AttributeSet ExtraAttrs) {
llvm::Constant *C =
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
/*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
if (auto *F = dyn_cast<llvm::Function>(C))
if (F->empty())
F->setCallingConv(getRuntimeCC());
return C;
}
/// CreateBuiltinFunction - Create a new builtin function with the specified
/// type and name.
llvm::Constant *
CodeGenModule::CreateBuiltinFunction(llvm::FunctionType *FTy,
StringRef Name,
llvm::AttributeSet ExtraAttrs) {
llvm::Constant *C =
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
/*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
if (auto *F = dyn_cast<llvm::Function>(C))
if (F->empty())
F->setCallingConv(getBuiltinCC());
return C;
}
/// isTypeConstant - Determine whether an object of this type can be emitted
/// as a constant.
///
/// If ExcludeCtor is true, the duration when the object's constructor runs
/// will not be considered. The caller will need to verify that the object is
/// not written to during its construction.
bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
if (!Ty.isConstant(Context) && !Ty->isReferenceType())
return false;
if (Context.getLangOpts().CPlusPlus) {
if (const CXXRecordDecl *Record
= Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
return ExcludeCtor && !Record->hasMutableFields() &&
Record->hasTrivialDestructor();
}
return true;
}
/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
/// create and return an llvm GlobalVariable with the specified type. If there
/// is something in the module with the specified name, return it potentially
/// bitcasted to the right type.
///
/// If D is non-null, it specifies a decl that correspond to this. This is used
/// to set the attributes on the global when it is first created.
llvm::Constant *
CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *Ty,
const VarDecl *D) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
if (WeakRefReferences.erase(Entry)) {
if (D && !D->hasAttr<WeakAttr>())
Entry->setLinkage(llvm::Function::ExternalLinkage);
}
// Handle dropped DLL attributes.
if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
if (Entry->getType() == Ty)
return Entry;
// Make sure the result is of the correct type.
if (Entry->getType()->getAddressSpace() != Ty->getAddressSpace()) {
return llvm::ConstantExpr::getAddrSpaceCast(Entry, Ty);
}
return llvm::ConstantExpr::getBitCast(Entry, Ty);
}
unsigned AddrSpace = GetGlobalVarAddressSpace(D, Ty->getAddressSpace());
auto *GV = new llvm::GlobalVariable(
getModule(), Ty->getElementType(), false,
llvm::GlobalValue::ExternalLinkage, nullptr, MangledName, nullptr,
llvm::GlobalVariable::NotThreadLocal, AddrSpace);
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
// of the file.
auto DDI = DeferredDecls.find(MangledName);
if (DDI != DeferredDecls.end()) {
// Move the potentially referenced deferred decl to the DeferredDeclsToEmit
// list, and remove it from DeferredDecls (since we don't need it anymore).
addDeferredDeclToEmit(GV, DDI->second);
DeferredDecls.erase(DDI);
}
// Handle things which are present even on external declarations.
if (D) {
// FIXME: This code is overly simple and should be merged with other global
// handling.
GV->setConstant(isTypeConstant(D->getType(), false));
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
setLinkageAndVisibilityForGV(GV, D);
if (D->getTLSKind()) {
if (D->getTLSKind() == VarDecl::TLS_Dynamic)
CXXThreadLocals.push_back(std::make_pair(D, GV));
setTLSMode(GV, *D);
}
// If required by the ABI, treat declarations of static data members with
// inline initializers as definitions.
if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
EmitGlobalVarDefinition(D);
}
// Handle XCore specific ABI requirements.
if (getTarget().getTriple().getArch() == llvm::Triple::xcore &&
D->getLanguageLinkage() == CLanguageLinkage &&
D->getType().isConstant(Context) &&
isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
GV->setSection(".cp.rodata");
}
if (AddrSpace != Ty->getAddressSpace())
return llvm::ConstantExpr::getAddrSpaceCast(GV, Ty);
return GV;
}
llvm::GlobalVariable *
CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage) {
llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
llvm::GlobalVariable *OldGV = nullptr;
if (GV) {
// Check if the variable has the right type.
if (GV->getType()->getElementType() == Ty)
return GV;
// Because C++ name mangling, the only way we can end up with an already
// existing global with the same name is if it has been declared extern "C".
assert(GV->isDeclaration() && "Declaration has wrong type!");
OldGV = GV;
}
// Create a new variable.
GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
Linkage, nullptr, Name);
if (OldGV) {
// Replace occurrences of the old variable if needed.
GV->takeName(OldGV);
if (!OldGV->use_empty()) {
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
}
OldGV->eraseFromParent();
}
if (supportsCOMDAT() && GV->isWeakForLinker() &&
!GV->hasAvailableExternallyLinkage())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
return GV;
}
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be created with the specified type instead of whatever the
/// normal requested type would be.
llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Type *Ty) {
assert(D->hasGlobalStorage() && "Not a global variable");
QualType ASTTy = D->getType();
if (!Ty)
Ty = getTypes().ConvertTypeForMem(ASTTy);
llvm::PointerType *PTy =
llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
StringRef MangledName = getMangledName(D);
return GetOrCreateLLVMGlobal(MangledName, PTy, D);
}
/// CreateRuntimeVariable - Create a new runtime global variable with the
/// specified type and name.
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name) {
return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), nullptr);
}
void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
assert(!D->getInit() && "Cannot emit definite definitions here!");
if (!MustBeEmitted(D)) {
// If we have not seen a reference to this variable yet, place it
// into the deferred declarations table to be emitted if needed
// later.
StringRef MangledName = getMangledName(D);
if (!GetGlobalValue(MangledName)) {
DeferredDecls[MangledName] = D;
return;
}
}
// The tentative definition is the only definition.
EmitGlobalVarDefinition(D);
}
CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
return Context.toCharUnitsFromBits(
TheDataLayout.getTypeStoreSizeInBits(Ty));
}
unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D,
unsigned AddrSpace) {
if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
if (D->hasAttr<CUDAConstantAttr>())
AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_constant);
else if (D->hasAttr<CUDASharedAttr>())
AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_shared);
else
AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_device);
}
// HLSL Change Begins
if (LangOpts.HLSL) {
if (D->hasAttr<HLSLGroupSharedAttr>())
AddrSpace = getContext().getTargetAddressSpace(hlsl::DXIL::kTGSMAddrSpace);
}
// HLSL Change Ends
return AddrSpace;
}
template<typename SomeDecl>
void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
llvm::GlobalValue *GV) {
if (!getLangOpts().CPlusPlus)
return;
// Must have 'used' attribute, or else inline assembly can't rely on
// the name existing.
if (!D->template hasAttr<UsedAttr>())
return;
// Must have internal linkage and an ordinary name.
if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
return;
// Must be in an extern "C" context. Entities declared directly within
// a record are not extern "C" even if the record is in such a context.
const SomeDecl *First = D->getFirstDecl();
if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
return;
// OK, this is an internal linkage entity inside an extern "C" linkage
// specification. Make a note of that so we can give it the "expected"
// mangled name if nothing else is using that name.
std::pair<StaticExternCMap::iterator, bool> R =
StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
// If we have multiple internal linkage entities with the same name
// in extern "C" regions, none of them gets that name.
if (!R.second)
R.first->second = nullptr;
}
static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
if (!CGM.supportsCOMDAT())
return false;
if (D.hasAttr<SelectAnyAttr>())
return true;
GVALinkage Linkage;
if (auto *VD = dyn_cast<VarDecl>(&D))
Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
else
Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
switch (Linkage) {
case GVA_Internal:
case GVA_AvailableExternally:
case GVA_StrongExternal:
return false;
case GVA_DiscardableODR:
case GVA_StrongODR:
return true;
}
llvm_unreachable("No such linkage");
}
void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
llvm::GlobalObject &GO) {
if (!shouldBeInCOMDAT(*this, D))
return;
GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
}
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = nullptr;
QualType ASTTy = D->getType();
CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
bool NeedsGlobalCtor = false;
bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
const VarDecl *InitDecl;
const Expr *InitExpr = D->getAnyInitializer(InitDecl);
if (!InitExpr) {
// This is a tentative definition; tentative definitions are
// implicitly initialized with { 0 }.
//
// Note that tentative definitions are only emitted at the end of
// a translation unit, so they should never have incomplete
// type. In addition, EmitTentativeDefinition makes sure that we
// never attempt to emit a tentative definition if a real one
// exists. A use may still exists, however, so we still may need
// to do a RAUW.
// HLSL Change Starts
// Allow incomplete type.
if (getLangOpts().HLSL && ASTTy->isIncompleteType()) {
if (hlsl::IsIncompleteHLSLResourceArrayType(getContext(), ASTTy)) {
llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
llvm::Constant *Entry = GetAddrOfGlobalVar(D, Ty);
// Entry is now either a Function or GlobalVariable.
auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
if (getCodeGenOpts().getDebugInfo() >=
CodeGenOptions::LimitedDebugInfo)
DI->EmitGlobalVariable(GV, D);
return;
}
}
// HLSL Change Ends
assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
Init = EmitNullConstant(D->getType());
} else {
initializedGlobalDecl = GlobalDecl(D);
Init = EmitConstantInit(*InitDecl);
if (!Init) {
QualType T = InitExpr->getType();
if (D->getType()->isReferenceType())
T = D->getType();
if (getLangOpts().CPlusPlus) {
Init = EmitNullConstant(T);
NeedsGlobalCtor = true;
// HLSL Change Begins.
if (getLangOpts().HLSL && D->isExternallyVisible()) {
// For global constant with init, the init will be ignored.
// Warning here.
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Warning,
"Initializer of external global will be ignored");
Diags.Report(D->getLocation(), DiagID);
// Don't create global ctor for it.
NeedsGlobalCtor = false;
}
// HLSL Change Ends.
} else {
ErrorUnsupported(D, "static initializer");
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
}
} else {
// We don't need an initializer, so remove the entry for the delayed
// initializer position (just in case this entry was delayed) if we
// also don't need to register a destructor.
if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
DelayedCXXInitPosition.erase(D);
// HLSL Change Begins.
if (getLangOpts().HLSL && D->isExternallyVisible() && !D->isStaticDataMember()) {
// For global constant with init, the init will be ignored.
Init = EmitNullConstant(D->getType());
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Warning,
"Initializer of external global will be ignored");
Diags.Report(D->getLocation(), DiagID);
}
// HLSL Change Ends.
}
}
llvm::Type* InitType = Init->getType();
llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
// Strip off a bitcast if we got one back.
if (auto *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
assert(CE->getOpcode() == llvm::Instruction::BitCast ||
CE->getOpcode() == llvm::Instruction::AddrSpaceCast ||
// All zero index gep.
CE->getOpcode() == llvm::Instruction::GetElementPtr);
Entry = CE->getOperand(0);
}
// Entry is now either a Function or GlobalVariable.
auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
// We have a definition after a declaration with the wrong type.
// We must make a new GlobalVariable* and update everything that used OldGV
// (a declaration or tentative definition) with the new GlobalVariable*
// (which will be a definition).
//
// This happens if there is a prototype for a global (e.g.
// "extern int x[];") and then a definition of a different type (e.g.
// "int x[10];"). This also happens when an initializer has a different type
// from the type of the global (this happens with unions).
if (!GV ||
GV->getType()->getElementType() != InitType ||
GV->getType()->getAddressSpace() !=
GetGlobalVarAddressSpace(D, getContext().getTargetAddressSpace(ASTTy))) {
// Move the old entry aside so that we'll create a new one.
Entry->setName(StringRef());
// Make a new global with the correct type, this is now guaranteed to work.
GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, Entry->getType());
Entry->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
cast<llvm::GlobalValue>(Entry)->eraseFromParent();
}
MaybeHandleStaticInExternC(D, GV);
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, GV);
// HLSL Change Begins.
if (!getLangOpts().HLSL || !D->isExternallyVisible())
GV->setInitializer(Init); // Resources and $Globals are not initialized
// HLSL Change Ends.
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
isTypeConstant(D->getType(), true));
// If it is in a read-only section, mark it 'constant'.
if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
GV->setConstant(true);
}
GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
// Set the llvm linkage type as appropriate.
llvm::GlobalValue::LinkageTypes Linkage =
getLLVMLinkageVarDefinition(D, GV->isConstant());
// On Darwin, the backing variable for a C++11 thread_local variable always
// has internal linkage; all accesses should just be calls to the
// Itanium-specified entry point, which has the normal linkage of the
// variable.
if (!D->isStaticLocal() && D->getTLSKind() == VarDecl::TLS_Dynamic &&
Context.getTargetInfo().getTriple().isMacOSX())
Linkage = llvm::GlobalValue::InternalLinkage;
// HLSL Change Begins.
if (getLangOpts().HLSL && D->isExternallyVisible())
Linkage = llvm::GlobalValue::ExternalLinkage; //Resources and $Globals have no definition
// HLSL Change Ends.
GV->setLinkage(Linkage);
if (D->hasAttr<DLLImportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
else if (D->hasAttr<DLLExportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
else
GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
if (Linkage == llvm::GlobalVariable::CommonLinkage)
// common vars aren't constant even if declared const.
GV->setConstant(false);
setNonAliasAttributes(D, GV);
if (D->getTLSKind() && !GV->isThreadLocal()) {
if (D->getTLSKind() == VarDecl::TLS_Dynamic)
CXXThreadLocals.push_back(std::make_pair(D, GV));
setTLSMode(GV, *D);
}
maybeSetTrivialComdat(*D, *GV);
// Emit the initializer function if necessary.
if (NeedsGlobalCtor || NeedsGlobalDtor)
EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
// HLSL Change Begin.
if (NeedsGlobalCtor)
GV->setInitializer(llvm::UndefValue::get(InitType));
// HLSL Change End.
SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
DI->EmitGlobalVariable(GV, D);
}
static bool isVarDeclStrongDefinition(const ASTContext &Context,
CodeGenModule &CGM, const VarDecl *D,
bool NoCommon) {
// Don't give variables common linkage if -fno-common was specified unless it
// was overridden by a NoCommon attribute.
if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
return true;
// C11 6.9.2/2:
// A declaration of an identifier for an object that has file scope without
// an initializer, and without a storage-class specifier or with the
// storage-class specifier static, constitutes a tentative definition.
if (D->getInit() || D->hasExternalStorage())
return true;
// A variable cannot be both common and exist in a section.
if (D->hasAttr<SectionAttr>())
return true;
// Thread local vars aren't considered common linkage.
if (D->getTLSKind())
return true;
// Tentative definitions marked with WeakImportAttr are true definitions.
if (D->hasAttr<WeakImportAttr>())
return true;
// A variable cannot be both common and exist in a comdat.
if (shouldBeInCOMDAT(CGM, *D))
return true;
// Declarations with a required alignment do not have common linakge in MSVC
// mode.
if (Context.getLangOpts().MSVCCompat) {
if (D->hasAttr<AlignedAttr>())
return true;
QualType VarType = D->getType();
if (Context.isAlignmentRequired(VarType))
return true;
if (const auto *RT = VarType->getAs<RecordType>()) {
const RecordDecl *RD = RT->getDecl();
for (const FieldDecl *FD : RD->fields()) {
if (FD->isBitField())
continue;
if (FD->hasAttr<AlignedAttr>())
return true;
if (Context.isAlignmentRequired(FD->getType()))
return true;
}
}
}
return false;
}
llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
if (Linkage == GVA_Internal)
return llvm::Function::InternalLinkage;
if (D->hasAttr<WeakAttr>()) {
if (IsConstantVariable)
return llvm::GlobalVariable::WeakODRLinkage;
else
return llvm::GlobalVariable::WeakAnyLinkage;
}
// We are guaranteed to have a strong definition somewhere else,
// so we can use available_externally linkage.
if (Linkage == GVA_AvailableExternally)
return llvm::Function::AvailableExternallyLinkage;
// Note that Apple's kernel linker doesn't support symbol
// coalescing, so we need to avoid linkonce and weak linkages there.
// Normally, this means we just map to internal, but for explicit
// instantiations we'll map to external.
// In C++, the compiler has to emit a definition in every translation unit
// that references the function. We should use linkonce_odr because
// a) if all references in this translation unit are optimized away, we
// don't need to codegen it. b) if the function persists, it needs to be
// merged with other definitions. c) C++ has the ODR, so we know the
// definition is dependable.
if (Linkage == GVA_DiscardableODR)
return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
: llvm::Function::InternalLinkage;
// An explicit instantiation of a template has weak linkage, since
// explicit instantiations can occur in multiple translation units
// and must all be equivalent. However, we are not allowed to
// throw away these explicit instantiations.
if (Linkage == GVA_StrongODR)
return !Context.getLangOpts().AppleKext ? llvm::Function::WeakODRLinkage
: llvm::Function::ExternalLinkage;
// C++ doesn't have tentative definitions and thus cannot have common
// linkage.
if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
!isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
CodeGenOpts.NoCommon))
return llvm::GlobalVariable::CommonLinkage;
// selectany symbols are externally visible, so use weak instead of
// linkonce. MSVC optimizes away references to const selectany globals, so
// all definitions should be the same and ODR linkage should be used.
// http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
if (D->hasAttr<SelectAnyAttr>())
return llvm::GlobalVariable::WeakODRLinkage;
// Otherwise, we have strong external linkage.
assert(Linkage == GVA_StrongExternal);
return llvm::GlobalVariable::ExternalLinkage;
}
llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
const VarDecl *VD, bool IsConstant) {
GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
}
/// Replace the uses of a function that was declared with a non-proto type.
/// We want to silently drop extra arguments from call sites
static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
llvm::Function *newFn) {
// Fast path.
if (old->use_empty()) return;
llvm::Type *newRetTy = newFn->getReturnType();
SmallVector<llvm::Value*, 4> newArgs;
for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
ui != ue; ) {
llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
llvm::User *user = use->getUser();
// Recognize and replace uses of bitcasts. Most calls to
// unprototyped functions will use bitcasts.
if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
if (bitcast->getOpcode() == llvm::Instruction::BitCast)
replaceUsesOfNonProtoConstant(bitcast, newFn);
continue;
}
// Recognize calls to the function.
llvm::CallSite callSite(user);
if (!callSite) continue;
if (!callSite.isCallee(&*use)) continue;
// If the return types don't match exactly, then we can't
// transform this call unless it's dead.
if (callSite->getType() != newRetTy && !callSite->use_empty())
continue;
// Get the call site's attribute list.
SmallVector<llvm::AttributeSet, 8> newAttrs;
llvm::AttributeSet oldAttrs = callSite.getAttributes();
// Collect any return attributes from the call.
if (oldAttrs.hasAttributes(llvm::AttributeSet::ReturnIndex))
newAttrs.push_back(
llvm::AttributeSet::get(newFn->getContext(),
oldAttrs.getRetAttributes()));
// If the function was passed too few arguments, don't transform.
unsigned newNumArgs = newFn->arg_size();
if (callSite.arg_size() < newNumArgs) continue;
// If extra arguments were passed, we silently drop them.
// If any of the types mismatch, we don't transform.
unsigned argNo = 0;
bool dontTransform = false;
for (llvm::Function::arg_iterator ai = newFn->arg_begin(),
ae = newFn->arg_end(); ai != ae; ++ai, ++argNo) {
if (callSite.getArgument(argNo)->getType() != ai->getType()) {
dontTransform = true;
break;
}
// Add any parameter attributes.
if (oldAttrs.hasAttributes(argNo + 1))
newAttrs.
push_back(llvm::
AttributeSet::get(newFn->getContext(),
oldAttrs.getParamAttributes(argNo + 1)));
}
if (dontTransform)
continue;
if (oldAttrs.hasAttributes(llvm::AttributeSet::FunctionIndex))
newAttrs.push_back(llvm::AttributeSet::get(newFn->getContext(),
oldAttrs.getFnAttributes()));
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
newArgs.append(callSite.arg_begin(), callSite.arg_begin() + argNo);
llvm::CallSite newCall;
if (callSite.isCall()) {
newCall = llvm::CallInst::Create(newFn, newArgs, "",
callSite.getInstruction());
} else {
auto *oldInvoke = cast<llvm::InvokeInst>(callSite.getInstruction());
newCall = llvm::InvokeInst::Create(newFn,
oldInvoke->getNormalDest(),
oldInvoke->getUnwindDest(),
newArgs, "",
callSite.getInstruction());
}
newArgs.clear(); // for the next iteration
if (!newCall->getType()->isVoidTy())
newCall->takeName(callSite.getInstruction());
newCall.setAttributes(
llvm::AttributeSet::get(newFn->getContext(), newAttrs));
newCall.setCallingConv(callSite.getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
if (!callSite->use_empty())
callSite->replaceAllUsesWith(newCall.getInstruction());
// Copy debug location attached to CI.
if (callSite->getDebugLoc())
newCall->setDebugLoc(callSite->getDebugLoc());
callSite->eraseFromParent();
}
}
/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
/// implement a function with no prototype, e.g. "int foo() {}". If there are
/// existing call uses of the old function in the module, this adjusts them to
/// call the new function directly.
///
/// This is not just a cleanup: the always_inline pass requires direct calls to
/// functions to be able to inline them. If there is a bitcast in the way, it
/// won't inline them. Instcombine normally deletes these calls, but it isn't
/// run at -O0.
static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
llvm::Function *NewFn) {
// If we're redefining a global as a function, don't transform it.
if (!isa<llvm::Function>(Old)) return;
replaceUsesOfNonProtoConstant(Old, NewFn);
}
void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
// If we have a definition, this might be a deferred decl. If the
// instantiation is explicit, make sure we emit it at the end.
if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
GetAddrOfGlobalVar(VD);
EmitTopLevelDecl(VD);
}
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
llvm::GlobalValue *GV) {
const auto *D = cast<FunctionDecl>(GD.getDecl());
// Compute the function info and LLVM type.
const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
// HLSL Change Begins.
// Out parameter is not ByVal.
auto I = const_cast<CGFunctionInfo&>(FI).arg_begin();
for (const ParmVarDecl *Param : D->params()) {
if (Param->isModifierOut() && I->info.isIndirect()) {
I->info.setIndirectByVal(false);
}
I++;
}
// HLSL Change Ends.
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
// Get or create the prototype for the function.
if (!GV) {
llvm::Constant *C =
GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer*/ true);
// Strip off a bitcast if we got one back.
if (auto *CE = dyn_cast<llvm::ConstantExpr>(C)) {
assert(CE->getOpcode() == llvm::Instruction::BitCast);
GV = cast<llvm::GlobalValue>(CE->getOperand(0));
} else {
GV = cast<llvm::GlobalValue>(C);
}
}
if (!GV->isDeclaration()) {
getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name);
GlobalDecl OldGD = Manglings.lookup(GV->getName());
if (auto *Prev = OldGD.getDecl())
getDiags().Report(Prev->getLocation(), diag::note_previous_definition);
return;
}
if (GV->getType()->getElementType() != Ty) {
// If the types mismatch then we have to rewrite the definition.
assert(GV->isDeclaration() && "Shouldn't replace non-declaration");
// F is the Function* for the one with the wrong type, we must make a new
// Function* and update everything that used F (a declaration) with the new
// Function* (which will be a definition).
//
// This happens if there is a prototype for a function
// (e.g. "int f()") and then a definition of a different type
// (e.g. "int f(int x)"). Move the old function aside so that it
// doesn't interfere with GetAddrOfFunction.
GV->setName(StringRef());
auto *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
// This might be an implementation of a function without a
// prototype, in which case, try to do special replacement of
// calls which match the new prototype. The really key thing here
// is that we also potentially drop arguments from the call site
// so as to make a direct call, which makes the inliner happier
// and suppresses a number of optimizer warnings (!) about
// dropping arguments.
if (!GV->use_empty()) {
ReplaceUsesOfNonProtoTypeWithRealFunction(GV, NewFn);
GV->removeDeadConstantUsers();
}
// Replace uses of F with the Function we will endow with a body.
if (!GV->use_empty()) {
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(NewFn, GV->getType());
GV->replaceAllUsesWith(NewPtrForOldDecl);
}
// Ok, delete the old function now, which is dead.
GV->eraseFromParent();
GV = NewFn;
}
// We need to set linkage and visibility on the function before
// generating code for it because various parts of IR generation
// want to propagate this information down (e.g. to local static
// declarations).
auto *Fn = cast<llvm::Function>(GV);
setFunctionLinkage(GD, Fn);
setFunctionDLLStorageClass(GD, Fn);
// FIXME: this is redundant with part of setFunctionDefinitionAttributes
setGlobalVisibility(Fn, D);
MaybeHandleStaticInExternC(D, Fn);
maybeSetTrivialComdat(*D, *Fn);
CodeGenFunction(*this).GenerateCode(D, Fn, FI);
setFunctionDefinitionAttributes(D, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
AddGlobalCtor(Fn, CA->getPriority());
if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
AddGlobalDtor(Fn, DA->getPriority());
if (D->hasAttr<AnnotateAttr>())
AddGlobalAnnotations(D, Fn);
}
void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
const auto *D = cast<ValueDecl>(GD.getDecl());
const AliasAttr *AA = D->getAttr<AliasAttr>();
assert(AA && "Not an alias?");
StringRef MangledName = getMangledName(GD);
// If there is a definition in the module, then it wins over the alias.
// This is dubious, but allow it to be safe. Just ignore the alias.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry && !Entry->isDeclaration())
return;
Aliases.push_back(GD);
llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
// Create a reference to the named value. This ensures that it is emitted
// if a deferred decl.
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
/*ForVTable=*/false);
else
Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
llvm::PointerType::getUnqual(DeclTy),
/*D=*/nullptr);
// Create the new alias itself, but don't set a name yet.
auto *GA = llvm::GlobalAlias::create(
cast<llvm::PointerType>(Aliasee->getType()),
llvm::Function::ExternalLinkage, "", Aliasee, &getModule());
if (Entry) {
if (GA->getAliasee() == Entry) {
Diags.Report(AA->getLocation(), diag::err_cyclic_alias);
return;
}
assert(Entry->isDeclaration());
// If there is a declaration in the module, then we had an extern followed
// by the alias, as in:
// extern int test6();
// ...
// int test6() __attribute__((alias("test7")));
//
// Remove it and replace uses of it with the alias.
GA->takeName(Entry);
Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
Entry->getType()));
Entry->eraseFromParent();
} else {
GA->setName(MangledName);
}
// Set attributes which are particular to an alias; this is a
// specialization of the attributes which may be set on a global
// variable/function.
if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
D->isWeakImported()) {
GA->setLinkage(llvm::Function::WeakAnyLinkage);
}
if (const auto *VD = dyn_cast<VarDecl>(D))
if (VD->getTLSKind())
setTLSMode(GA, *VD);
setAliasAttributes(D, GA);
}
llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
ArrayRef<llvm::Type*> Tys) {
return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
Tys);
}
static llvm::StringMapEntry<llvm::GlobalVariable *> &
GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
const StringLiteral *Literal, bool TargetIsLSB,
bool &IsUTF16, unsigned &StringLength) {
StringRef String = Literal->getString();
unsigned NumBytes = String.size();
// Check for simple case.
if (!Literal->containsNonAsciiOrNull()) {
StringLength = NumBytes;
return *Map.insert(std::make_pair(String, nullptr)).first;
}
// Otherwise, convert the UTF8 literals into a string of shorts.
IsUTF16 = true;
SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
const UTF8 *FromPtr = (const UTF8 *)String.data();
UTF16 *ToPtr = &ToBuf[0];
(void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
&ToPtr, ToPtr + NumBytes,
strictConversion);
// ConvertUTF8toUTF16 returns the length in ToPtr.
StringLength = ToPtr - &ToBuf[0];
// Add an explicit null.
*ToPtr = 0;
return *Map.insert(std::make_pair(
StringRef(reinterpret_cast<const char *>(ToBuf.data()),
(StringLength + 1) * 2),
nullptr)).first;
}
static llvm::StringMapEntry<llvm::GlobalVariable *> &
GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
const StringLiteral *Literal, unsigned &StringLength) {
StringRef String = Literal->getString();
StringLength = String.size();
return *Map.insert(std::make_pair(String, nullptr)).first;
}
llvm::Constant *
CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
unsigned StringLength = 0;
bool isUTF16 = false;
llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
GetConstantCFStringEntry(CFConstantStringMap, Literal,
getDataLayout().isLittleEndian(), isUTF16,
StringLength);
if (auto *C = Entry.second)
return C;
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
llvm::Value *V;
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
llvm::Constant *GV = CreateRuntimeVariable(Ty,
"__CFConstantStringClassReference");
// Decay array -> ptr
V = llvm::ConstantExpr::getGetElementPtr(Ty, GV, Zeros);
CFConstantStringClassRef = V;
}
else
V = CFConstantStringClassRef;
QualType CFTy = getContext().getCFConstantStringType();
auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
llvm::Constant *Fields[4];
// Class pointer.
Fields[0] = cast<llvm::ConstantExpr>(V);
// Flags.
llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
llvm::ConstantInt::get(Ty, 0x07C8);
// String pointer.
llvm::Constant *C = nullptr;
if (isUTF16) {
ArrayRef<uint16_t> Arr = llvm::makeArrayRef<uint16_t>(
reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
Entry.first().size() / 2);
C = llvm::ConstantDataArray::get(VMContext, Arr);
} else {
C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
}
// Note: -fwritable-strings doesn't make the backing store strings of
// CFStrings writable. (See <rdar://problem/10657500>)
auto *GV =
new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, C, ".str");
GV->setUnnamedAddr(true);
// Don't enforce the target's minimum global alignment, since the only use
// of the string is via this class initializer.
// FIXME: We set the section explicitly to avoid a bug in ld64 224.1. Without
// it LLVM can merge the string with a non unnamed_addr one during LTO. Doing
// that changes the section it ends in, which surprises ld64.
if (isUTF16) {
CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
GV->setAlignment(Align.getQuantity());
GV->setSection("__TEXT,__ustring");
} else {
CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
GV->setAlignment(Align.getQuantity());
GV->setSection("__TEXT,__cstring,cstring_literals");
}
// String.
Fields[2] =
llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
if (isUTF16)
// Cast the UTF16 string to the correct type.
Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy);
// String length.
Ty = getTypes().ConvertType(getContext().LongTy);
Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
// The struct.
C = llvm::ConstantStruct::get(STy, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_cfstring_");
GV->setSection("__DATA,__cfstring");
Entry.second = GV;
return GV;
}
llvm::GlobalVariable *
CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
unsigned StringLength = 0;
llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
if (auto *C = Entry.second)
return C;
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
llvm::Value *V;
// If we don't already have it, get _NSConstantStringClassReference.
if (!ConstantStringClassRef) {
std::string StringClass(getLangOpts().ObjCConstantStringClass);
llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
llvm::Constant *GV;
if (LangOpts.ObjCRuntime.isNonFragile()) {
std::string str =
StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
: "OBJC_CLASS_$_" + StringClass;
GV = getObjCRuntime().GetClassGlobal(str);
// Make sure the result is of the correct type.
llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
V = llvm::ConstantExpr::getBitCast(GV, PTy);
ConstantStringClassRef = V;
} else {
std::string str =
StringClass.empty() ? "_NSConstantStringClassReference"
: "_" + StringClass + "ClassReference";
llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
GV = CreateRuntimeVariable(PTy, str);
// Decay array -> ptr
V = llvm::ConstantExpr::getGetElementPtr(PTy, GV, Zeros);
ConstantStringClassRef = V;
}
} else
V = ConstantStringClassRef;
if (!NSConstantStringType) {
// Construct the type for a constant NSString.
RecordDecl *D = Context.buildImplicitRecord("__builtin_NSString");
D->startDefinition();
QualType FieldTypes[3];
// const int *isa;
FieldTypes[0] = Context.getPointerType(Context.IntTy.withConst());
// const char *str;
FieldTypes[1] = Context.getPointerType(Context.CharTy.withConst());
// unsigned int length;
FieldTypes[2] = Context.UnsignedIntTy;
// Create fields
for (unsigned i = 0; i < 3; ++i) {
FieldDecl *Field = FieldDecl::Create(Context, D,
SourceLocation(),
SourceLocation(), nullptr,
FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
D->addDecl(Field);
}
D->completeDefinition();
QualType NSTy = Context.getTagDeclType(D);
NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
}
llvm::Constant *Fields[3];
// Class pointer.
Fields[0] = cast<llvm::ConstantExpr>(V);
// String pointer.
llvm::Constant *C =
llvm::ConstantDataArray::getString(VMContext, Entry.first());
llvm::GlobalValue::LinkageTypes Linkage;
bool isConstant;
Linkage = llvm::GlobalValue::PrivateLinkage;
isConstant = !LangOpts.WritableStrings;
auto *GV = new llvm::GlobalVariable(getModule(), C->getType(), isConstant,
Linkage, C, ".str");
GV->setUnnamedAddr(true);
// Don't enforce the target's minimum global alignment, since the only use
// of the string is via this class initializer.
CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
GV->setAlignment(Align.getQuantity());
Fields[1] =
llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
// String length.
llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
// The struct.
C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_nsstring_");
const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
const char *NSStringNonFragileABISection =
"__DATA,__objc_stringobj,regular,no_dead_strip";
// FIXME. Fix section.
GV->setSection(LangOpts.ObjCRuntime.isNonFragile()
? NSStringNonFragileABISection
: NSStringSection);
Entry.second = GV;
return GV;
}
QualType CodeGenModule::getObjCFastEnumerationStateType() {
if (ObjCFastEnumerationStateType.isNull()) {
RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
D->startDefinition();
QualType FieldTypes[] = {
Context.UnsignedLongTy,
Context.getPointerType(Context.getObjCIdType()),
Context.getPointerType(Context.UnsignedLongTy),
Context.getConstantArrayType(Context.UnsignedLongTy,
llvm::APInt(32, 5), ArrayType::Normal, 0)
};
for (size_t i = 0; i < 4; ++i) {
FieldDecl *Field = FieldDecl::Create(Context,
D,
SourceLocation(),
SourceLocation(), nullptr,
FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
D->addDecl(Field);
}
D->completeDefinition();
ObjCFastEnumerationStateType = Context.getTagDeclType(D);
}
return ObjCFastEnumerationStateType;
}
llvm::Constant *
CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
assert(!E->getType()->isPointerType() && "Strings are always arrays");
// Don't emit it as the address of the string, emit the string data itself
// as an inline array.
if (E->getCharByteWidth() == 1) {
SmallString<64> Str(E->getString());
// Resize the string to the right size, which is indicated by its type.
const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
Str.resize(CAT->getSize().getZExtValue());
return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
llvm::Type *ElemTy = AType->getElementType();
unsigned NumElements = AType->getNumElements();
// Wide strings have either 2-byte or 4-byte elements.
if (ElemTy->getPrimitiveSizeInBits() == 16) {
SmallVector<uint16_t, 32> Elements;
Elements.reserve(NumElements);
for(unsigned i = 0, e = E->getLength(); i != e; ++i)
Elements.push_back(E->getCodeUnit(i));
Elements.resize(NumElements);
return llvm::ConstantDataArray::get(VMContext, Elements);
}
assert(ElemTy->getPrimitiveSizeInBits() == 32);
SmallVector<uint32_t, 32> Elements;
Elements.reserve(NumElements);
for(unsigned i = 0, e = E->getLength(); i != e; ++i)
Elements.push_back(E->getCodeUnit(i));
Elements.resize(NumElements);
return llvm::ConstantDataArray::get(VMContext, Elements);
}
static llvm::GlobalVariable *
GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
CodeGenModule &CGM, StringRef GlobalName,
unsigned Alignment) {
// OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
unsigned AddrSpace = 0;
if (CGM.getLangOpts().OpenCL)
AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
llvm::Module &M = CGM.getModule();
// Create a global variable for this string
auto *GV = new llvm::GlobalVariable(
M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
GV->setAlignment(Alignment);
GV->setUnnamedAddr(true);
if (GV->isWeakForLinker()) {
assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals");
GV->setComdat(M.getOrInsertComdat(GV->getName()));
}
return GV;
}
/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
/// constant array for the given string literal.
llvm::GlobalVariable *
CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
StringRef Name) {
auto Alignment =
getContext().getAlignOfGlobalVarInChars(S->getType()).getQuantity();
llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
llvm::GlobalVariable **Entry = nullptr;
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
if (Alignment > GV->getAlignment())
GV->setAlignment(Alignment);
return GV;
}
}
SmallString<256> MangledNameBuffer;
StringRef GlobalVariableName;
llvm::GlobalValue::LinkageTypes LT;
// Mangle the string literal if the ABI allows for it. However, we cannot
// do this if we are compiling with ASan or -fwritable-strings because they
// rely on strings having normal linkage.
if (!LangOpts.WritableStrings &&
!LangOpts.Sanitize.has(SanitizerKind::Address) &&
getCXXABI().getMangleContext().shouldMangleStringLiteral(S)) {
llvm::raw_svector_ostream Out(MangledNameBuffer);
getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
Out.flush();
LT = llvm::GlobalValue::LinkOnceODRLinkage;
GlobalVariableName = MangledNameBuffer;
} else {
LT = llvm::GlobalValue::PrivateLinkage;
GlobalVariableName = Name;
}
auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
if (Entry)
*Entry = GV;
SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
QualType());
return GV;
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
/// array for the given ObjCEncodeExpr node.
llvm::GlobalVariable *
CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
std::string Str;
getContext().getObjCEncodingForType(E->getEncodedType(), Str);
return GetAddrOfConstantCString(Str);
}
/// GetAddrOfConstantCString - Returns a pointer to a character array containing
/// the literal and a terminating '\0' character.
/// The result has pointer to array type.
llvm::GlobalVariable *CodeGenModule::GetAddrOfConstantCString(
const std::string &Str, const char *GlobalName, unsigned Alignment) {
StringRef StrWithNull(Str.c_str(), Str.size() + 1);
if (Alignment == 0) {
Alignment = getContext()
.getAlignOfGlobalVarInChars(getContext().CharTy)
.getQuantity();
}
llvm::Constant *C =
llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
// Don't share any string literals if strings aren't constant.
llvm::GlobalVariable **Entry = nullptr;
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
if (Alignment > GV->getAlignment())
GV->setAlignment(Alignment);
return GV;
}
}
// Get the default prefix if a name wasn't specified.
if (!GlobalName)
GlobalName = ".str";
// Create a global variable for this.
auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this,
GlobalName, Alignment);
if (Entry)
*Entry = GV;
return GV;
}
llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary(
const MaterializeTemporaryExpr *E, const Expr *Init) {
assert((E->getStorageDuration() == SD_Static ||
E->getStorageDuration() == SD_Thread) && "not a global temporary");
const auto *VD = cast<VarDecl>(E->getExtendingDecl());
// If we're not materializing a subobject of the temporary, keep the
// cv-qualifiers from the type of the MaterializeTemporaryExpr.
QualType MaterializedType = Init->getType();
if (Init == E->GetTemporaryExpr())
MaterializedType = E->getType();
llvm::Constant *&Slot = MaterializedGlobalTemporaryMap[E];
if (Slot)
return Slot;
// FIXME: If an externally-visible declaration extends multiple temporaries,
// we need to give each temporary the same name in every translation unit (and
// we also need to make the temporaries externally-visible).
SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
getCXXABI().getMangleContext().mangleReferenceTemporary(
VD, E->getManglingNumber(), Out);
Out.flush();
APValue *Value = nullptr;
if (E->getStorageDuration() == SD_Static) {
// We might have a cached constant initializer for this temporary. Note
// that this might have a different value from the value computed by
// evaluating the initializer if the surrounding constant expression
// modifies the temporary.
Value = getContext().getMaterializedTemporaryValue(E, false);
if (Value && Value->isUninit())
Value = nullptr;
}
// Try evaluating it now, it might have a constant initializer.
Expr::EvalResult EvalResult;
if (!Value && Init->EvaluateAsRValue(EvalResult, getContext()) &&
!EvalResult.hasSideEffects())
Value = &EvalResult.Val;
llvm::Constant *InitialValue = nullptr;
bool Constant = false;
llvm::Type *Type;
if (Value) {
// The temporary has a constant initializer, use it.
InitialValue = EmitConstantValue(*Value, MaterializedType, nullptr);
Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value);
Type = InitialValue->getType();
} else {
// No initializer, the initialization will be provided when we
// initialize the declaration which performed lifetime extension.
Type = getTypes().ConvertTypeForMem(MaterializedType);
}
// Create a global variable for this lifetime-extended temporary.
llvm::GlobalValue::LinkageTypes Linkage =
getLLVMLinkageVarDefinition(VD, Constant);
if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
const VarDecl *InitVD;
if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) {
// Temporaries defined inside a class get linkonce_odr linkage because the
// class can be defined in multipe translation units.
Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
} else {
// There is no need for this temporary to have external linkage if the
// VarDecl has external linkage.
Linkage = llvm::GlobalVariable::InternalLinkage;
}
}
unsigned AddrSpace = GetGlobalVarAddressSpace(
VD, getContext().getTargetAddressSpace(MaterializedType));
auto *GV = new llvm::GlobalVariable(
getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
AddrSpace);
setGlobalVisibility(GV, VD);
GV->setAlignment(
getContext().getTypeAlignInChars(MaterializedType).getQuantity());
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
if (VD->getTLSKind())
setTLSMode(GV, *VD);
Slot = GV;
return GV;
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
/// properties for an implementation.
void CodeGenModule::EmitObjCPropertyImplementations(const
ObjCImplementationDecl *D) {
#if 0 // HLSL Change - no ObjC support
for (const auto *PID : D->property_impls()) {
// Dynamic is just for type-checking.
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
ObjCPropertyDecl *PD = PID->getPropertyDecl();
// Determine which methods need to be implemented, some may have
// been overridden. Note that ::isPropertyAccessor is not the method
// we want, that just indicates if the decl came from a
// property. What we want to know is if the method is defined in
// this implementation.
if (!D->getInstanceMethod(PD->getGetterName()))
CodeGenFunction(*this).GenerateObjCGetter(
const_cast<ObjCImplementationDecl *>(D), PID);
if (!PD->isReadOnly() &&
!D->getInstanceMethod(PD->getSetterName()))
CodeGenFunction(*this).GenerateObjCSetter(
const_cast<ObjCImplementationDecl *>(D), PID);
}
}
#endif // HLSL Change - no ObjC support
}
#if 0 // HLSL Change - no ObjC support
static bool needsDestructMethod(ObjCImplementationDecl *impl) {
const ObjCInterfaceDecl *iface = impl->getClassInterface();
for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
ivar; ivar = ivar->getNextIvar())
if (ivar->getType().isDestructedType())
return true;
return false;
}
static bool AllTrivialInitializers(CodeGenModule &CGM,
ObjCImplementationDecl *D) {
CodeGenFunction CGF(CGM);
for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
E = D->init_end(); B != E; ++B) {
CXXCtorInitializer *CtorInitExp = *B;
Expr *Init = CtorInitExp->getInit();
if (!CGF.isTrivialInitializer(Init))
return false;
}
return true;
}
#endif
/// EmitObjCIvarInitializations - Emit information for ivar initialization
/// for an implementation.
void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
#if 0 // HLSL Change - no ObjC support
// We might need a .cxx_destruct even if we don't have any ivar initializers.
if (needsDestructMethod(D)) {
IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
ObjCMethodDecl *DTORMethod =
ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
cxxSelector, getContext().VoidTy, nullptr, D,
/*isInstance=*/true, /*isVariadic=*/false,
/*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true,
/*isDefined=*/false, ObjCMethodDecl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
D->setHasDestructors(true);
}
// If the implementation doesn't have any ivar initializers, we don't need
// a .cxx_construct.
if (D->getNumIvarInitializers() == 0 ||
AllTrivialInitializers(*this, D))
return;
IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
// The constructor returns 'self'.
ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
D->getLocation(),
D->getLocation(),
cxxSelector,
getContext().getObjCIdType(),
nullptr, D, /*isInstance=*/true,
/*isVariadic=*/false,
/*isPropertyAccessor=*/true,
/*isImplicitlyDeclared=*/true,
/*isDefined=*/false,
ObjCMethodDecl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
D->setHasNonZeroConstructors(true);
#endif // HLSL Change - no ObjC support
}
/// EmitNamespace - Emit all declarations in a namespace.
void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
for (auto *I : ND->decls()) {
if (const auto *VD = dyn_cast<VarDecl>(I))
if (VD->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
VD->getTemplateSpecializationKind() != TSK_Undeclared)
continue;
EmitTopLevelDecl(I);
}
}
// EmitLinkageSpec - Emit all declarations in a linkage spec.
void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
ErrorUnsupported(LSD, "linkage spec");
return;
}
for (auto *I : LSD->decls()) {
// Meta-data for ObjC class includes references to implemented methods.
// Generate class's method definitions first.
if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
for (auto *M : OID->methods())
EmitTopLevelDecl(M);
}
EmitTopLevelDecl(I);
}
}
/// EmitTopLevelDecl - Emit code for a single top level declaration.
void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// Ignore dependent declarations.
if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
return;
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
case Decl::Function:
// Skip function templates
if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
EmitGlobal(cast<FunctionDecl>(D));
// Always provide some coverage mapping
// even for the functions that aren't emitted.
AddDeferredUnusedCoverageMapping(D);
break;
case Decl::Var:
// Skip variable templates
if (cast<VarDecl>(D)->getDescribedVarTemplate())
return;
LLVM_FALLTHROUGH; // HLSL Change
case Decl::VarTemplateSpecialization:
EmitGlobal(cast<VarDecl>(D));
// HLSL Change Start - add resource or subobject for global variables
if (hlsl::IsHLSLSubobjectType(cast<VarDecl>(D)->getType())) {
getHLSLRuntime().addSubobject(D);
}
else {
getHLSLRuntime().addResource(D);
}
// HLSL Change End
break;
// Indirect fields from global anonymous structs and unions can be
// ignored; only the actual variable requires IR gen support.
case Decl::IndirectField:
break;
// C++ Decls
case Decl::Namespace:
EmitNamespace(cast<NamespaceDecl>(D));
break;
// No code generation needed.
case Decl::UsingShadow:
case Decl::ClassTemplate:
case Decl::VarTemplate:
case Decl::VarTemplatePartialSpecialization:
case Decl::FunctionTemplate:
case Decl::TypeAliasTemplate:
case Decl::Block:
case Decl::Empty:
break;
case Decl::Using: // using X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(*D));
return;
case Decl::NamespaceAlias:
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
return;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
return;
case Decl::CXXConstructor:
// Skip function templates
if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
break;
case Decl::CXXDestructor:
if (cast<FunctionDecl>(D)->isLateTemplateParsed())
return;
getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
break;
case Decl::StaticAssert:
// Nothing to do.
break;
// Objective-C Decls
#if 0 // HLSL Change Starts - no ObjC support
// Forward declarations, no (immediate) code generation.
case Decl::ObjCInterface:
case Decl::ObjCCategory:
break;
case Decl::ObjCProtocol: {
auto *Proto = cast<ObjCProtocolDecl>(D);
if (Proto->isThisDeclarationADefinition())
ObjCRuntime->GenerateProtocol(Proto);
break;
}
case Decl::ObjCCategoryImpl:
// Categories have properties but don't support synthesize so we
// can ignore them here.
ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
break;
case Decl::ObjCImplementation: {
auto *OMD = cast<ObjCImplementationDecl>(D);
EmitObjCPropertyImplementations(OMD);
EmitObjCIvarInitializations(OMD);
ObjCRuntime->GenerateClass(OMD);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
if (getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(
OMD->getClassInterface()), OMD->getLocation());
break;
}
case Decl::ObjCMethod: {
auto *OMD = cast<ObjCMethodDecl>(D);
// If this is not a prototype, emit the body.
if (OMD->getBody())
CodeGenFunction(*this).GenerateObjCMethod(OMD);
break;
}
case Decl::ObjCCompatibleAlias:
ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
break;
#endif // HLSL Change Ends - no ObjC support
case Decl::LinkageSpec:
EmitLinkageSpec(cast<LinkageSpecDecl>(D));
break;
#if 0 // HLSL Change Starts - no asm, import or openmp support
case Decl::FileScopeAsm: {
// File-scope asm is ignored during device-side CUDA compilation.
if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
break;
auto *AD = cast<FileScopeAsmDecl>(D);
getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
break;
}
case Decl::Import: {
auto *Import = cast<ImportDecl>(D);
// Ignore import declarations that come from imported modules.
if (clang::Module *Owner = Import->getImportedOwningModule()) {
if (getLangOpts().CurrentModule.empty() ||
Owner->getTopLevelModule()->Name == getLangOpts().CurrentModule)
break;
}
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitImportDecl(*Import);
ImportedModules.insert(Import->getImportedModule());
break;
}
case Decl::OMPThreadPrivate:
EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
break;
#endif // HLSL Change Ends - no asm, import or openmp support
case Decl::ClassTemplateSpecialization: {
const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
if (DebugInfo &&
Spec->getSpecializationKind() == TSK_ExplicitInstantiationDefinition &&
Spec->hasDefinition())
DebugInfo->completeTemplateDefinition(*Spec);
break;
}
// HLSL Change Starts
case Decl::HLSLBuffer: {
// TODO: add resource to HLSLRuntime
HLSLBufferDecl *BD = cast<HLSLBufferDecl>(D);
getHLSLRuntime().addResource(BD);
break;
}
// HLSL Change Ends
default:
// Make sure we handled everything we should, every other kind is a
// non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
// function. Need to recode Decl::Kind to do that easily.
assert(isa<TypeDecl>(D) && "Unsupported decl kind");
break;
}
}
void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
// Do we need to generate coverage mapping?
if (!CodeGenOpts.CoverageMapping)
return;
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
case Decl::Function:
case Decl::ObjCMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor: {
if (!cast<FunctionDecl>(D)->hasBody())
return;
auto I = DeferredEmptyCoverageMappingDecls.find(D);
if (I == DeferredEmptyCoverageMappingDecls.end())
DeferredEmptyCoverageMappingDecls[D] = true;
break;
}
default:
break;
};
}
void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
// Do we need to generate coverage mapping?
if (!CodeGenOpts.CoverageMapping)
return;
if (const auto *Fn = dyn_cast<FunctionDecl>(D)) {
if (Fn->isTemplateInstantiation())
ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
}
auto I = DeferredEmptyCoverageMappingDecls.find(D);
if (I == DeferredEmptyCoverageMappingDecls.end())
DeferredEmptyCoverageMappingDecls[D] = false;
else
I->second = false;
}
void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
std::vector<const Decl *> DeferredDecls;
for (const auto &I : DeferredEmptyCoverageMappingDecls) {
if (!I.second)
continue;
DeferredDecls.push_back(I.first);
}
// Sort the declarations by their location to make sure that the tests get a
// predictable order for the coverage mapping for the unused declarations.
if (CodeGenOpts.DumpCoverageMapping)
std::sort(DeferredDecls.begin(), DeferredDecls.end(),
[] (const Decl *LHS, const Decl *RHS) {
return LHS->getLocStart() < RHS->getLocStart();
});
for (const auto *D : DeferredDecls) {
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
case Decl::Function:
case Decl::ObjCMethod: {
CodeGenPGO PGO(*this);
GlobalDecl GD(cast<FunctionDecl>(D));
PGO.emitEmptyCounterMapping(D, getMangledName(GD),
getFunctionLinkage(GD));
break;
}
case Decl::CXXConstructor: {
CodeGenPGO PGO(*this);
GlobalDecl GD(cast<CXXConstructorDecl>(D), Ctor_Base);
PGO.emitEmptyCounterMapping(D, getMangledName(GD),
getFunctionLinkage(GD));
break;
}
case Decl::CXXDestructor: {
CodeGenPGO PGO(*this);
GlobalDecl GD(cast<CXXDestructorDecl>(D), Dtor_Base);
PGO.emitEmptyCounterMapping(D, getMangledName(GD),
getFunctionLinkage(GD));
break;
}
default:
break;
};
}
}
/// Turns the given pointer into a constant.
static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
const void *Ptr) {
uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
return llvm::ConstantInt::get(i64, PtrInt);
}
static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
llvm::NamedMDNode *&GlobalMetadata,
GlobalDecl D,
llvm::GlobalValue *Addr) {
if (!GlobalMetadata)
GlobalMetadata =
CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
// TODO: should we report variant information for ctors/dtors?
llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr),
llvm::ConstantAsMetadata::get(GetPointerConstant(
CGM.getLLVMContext(), D.getDecl()))};
GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
/// For each function which is declared within an extern "C" region and marked
/// as 'used', but has internal linkage, create an alias from the unmangled
/// name to the mangled name if possible. People expect to be able to refer
/// to such functions with an unmangled name from inline assembly within the
/// same translation unit.
void CodeGenModule::EmitStaticExternCAliases() {
for (auto &I : StaticExternCValues) {
IdentifierInfo *Name = I.first;
llvm::GlobalValue *Val = I.second;
if (Val && !getModule().getNamedValue(Name->getName()))
addUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
}
}
bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
GlobalDecl &Result) const {
auto Res = Manglings.find(MangledName);
if (Res == Manglings.end())
return false;
Result = Res->getValue();
return true;
}
/// Emits metadata nodes associating all the global values in the
/// current module with the Decls they came from. This is useful for
/// projects using IR gen as a subroutine.
///
/// Since there's currently no way to associate an MDNode directly
/// with an llvm::GlobalValue, we create a global named metadata
/// with the name 'clang.global.decl.ptrs'.
void CodeGenModule::EmitDeclMetadata() {
llvm::NamedMDNode *GlobalMetadata = nullptr;
// StaticLocalDeclMap
for (auto &I : MangledDeclNames) {
llvm::GlobalValue *Addr = getModule().getNamedValue(I.second);
EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
}
}
/// Emits metadata nodes for all the local variables in the current
/// function.
void CodeGenFunction::EmitDeclMetadata() {
if (LocalDeclMap.empty()) return;
llvm::LLVMContext &Context = getLLVMContext();
// Find the unique metadata ID for this name.
unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
llvm::NamedMDNode *GlobalMetadata = nullptr;
for (auto &I : LocalDeclMap) {
const Decl *D = I.first;
llvm::Value *Addr = I.second;
if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
Alloca->setMetadata(
DeclPtrKind, llvm::MDNode::get(
Context, llvm::ValueAsMetadata::getConstant(DAddr)));
} else if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
}
}
}
void CodeGenModule::EmitVersionIdentMetadata() {
llvm::NamedMDNode *IdentMetadata =
TheModule.getOrInsertNamedMetadata("llvm.ident");
std::string Version = getClangFullVersion();
llvm::LLVMContext &Ctx = TheModule.getContext();
llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)};
IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
}
void CodeGenModule::EmitTargetMetadata() {
// Warning, new MangledDeclNames may be appended within this loop.
// We rely on MapVector insertions adding new elements to the end
// of the container.
// FIXME: Move this loop into the one target that needs it, and only
// loop over those declarations for which we couldn't emit the target
// metadata when we emitted the declaration.
for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
auto Val = *(MangledDeclNames.begin() + I);
const Decl *D = Val.first.getDecl()->getMostRecentDecl();
llvm::GlobalValue *GV = GetGlobalValue(Val.second);
getTargetCodeGenInfo().emitTargetMD(D, GV, *this);
}
}
void CodeGenModule::EmitCoverageFile() {
if (!getCodeGenOpts().CoverageFile.empty()) {
if (llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu")) {
llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
llvm::LLVMContext &Ctx = TheModule.getContext();
llvm::MDString *CoverageFile =
llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile);
for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
llvm::MDNode *CU = CUNode->getOperand(i);
llvm::Metadata *Elts[] = {CoverageFile, CU};
GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
}
}
}
}
llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) {
// Sema has checked that all uuid strings are of the form
// "12345678-1234-1234-1234-1234567890ab".
assert(Uuid.size() == 36);
for (unsigned i = 0; i < 36; ++i) {
if (i == 8 || i == 13 || i == 18 || i == 23) assert(Uuid[i] == '-');
else assert(isHexDigit(Uuid[i]));
}
// The starts of all bytes of Field3 in Uuid. Field 3 is "1234-1234567890ab".
const unsigned Field3ValueOffsets[8] = { 19, 21, 24, 26, 28, 30, 32, 34 };
llvm::Constant *Field3[8];
for (unsigned Idx = 0; Idx < 8; ++Idx)
Field3[Idx] = llvm::ConstantInt::get(
Int8Ty, Uuid.substr(Field3ValueOffsets[Idx], 2), 16);
llvm::Constant *Fields[4] = {
llvm::ConstantInt::get(Int32Ty, Uuid.substr(0, 8), 16),
llvm::ConstantInt::get(Int16Ty, Uuid.substr(9, 4), 16),
llvm::ConstantInt::get(Int16Ty, Uuid.substr(14, 4), 16),
llvm::ConstantArray::get(llvm::ArrayType::get(Int8Ty, 8), Field3)
};
return llvm::ConstantStruct::getAnon(Fields);
}
llvm::Constant *
CodeGenModule::getAddrOfCXXCatchHandlerType(QualType Ty,
QualType CatchHandlerType) {
return getCXXABI().getAddrOfCXXCatchHandlerType(Ty, CatchHandlerType);
}
llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
bool ForEH) {
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
if (!ForEH && !getLangOpts().RTTI)
return llvm::Constant::getNullValue(Int8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
LangOpts.ObjCRuntime.isGNUFamily())
return ObjCRuntime->GetEHType(Ty);
return getCXXABI().getAddrOfRTTIDescriptor(Ty);
}
void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
#if 0 // HLSL Change - no OpenMP support
for (auto RefExpr : D->varlists()) {
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
bool PerformInit =
VD->getAnyInitializer() &&
!VD->getAnyInitializer()->isConstantInitializer(getContext(),
/*ForRef=*/false);
if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
VD, GetAddrOfGlobalVar(VD), RefExpr->getLocStart(), PerformInit))
CXXGlobalInits.push_back(InitFunction);
}
#endif
}
llvm::MDTuple *CodeGenModule::CreateVTableBitSetEntry(
llvm::GlobalVariable *VTable, CharUnits Offset, const CXXRecordDecl *RD) {
std::string OutName;
llvm::raw_string_ostream Out(OutName);
getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out);
llvm::Metadata *BitsetOps[] = {
llvm::MDString::get(getLLVMContext(), Out.str()),
llvm::ConstantAsMetadata::get(VTable),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(Int64Ty, Offset.getQuantity()))};
return llvm::MDTuple::get(getLLVMContext(), BitsetOps);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCXX.cpp | //===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with C++ code generation.
//
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
/// Try to emit a base destructor as an alias to its primary
/// base-class destructor.
bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (!getCodeGenOpts().CXXCtorDtorAliases)
return true;
// Producing an alias to a base class ctor/dtor can degrade debug quality
// as the debugger cannot tell them apart.
if (getCodeGenOpts().OptimizationLevel == 0)
return true;
// If the destructor doesn't have a trivial body, we have to emit it
// separately.
if (!D->hasTrivialBody())
return true;
const CXXRecordDecl *Class = D->getParent();
// We are going to instrument this destructor, so give up even if it is
// currently empty.
if (Class->mayInsertExtraPadding())
return true;
// If we need to manipulate a VTT parameter, give up.
if (Class->getNumVBases()) {
// Extra Credit: passing extra parameters is perfectly safe
// in many calling conventions, so only bail out if the ctor's
// calling convention is nonstandard.
return true;
}
// If any field has a non-trivial destructor, we have to emit the
// destructor separately.
for (const auto *I : Class->fields())
if (I->getType().isDestructedType())
return true;
// Try to find a unique base class with a non-trivial destructor.
const CXXRecordDecl *UniqueBase = nullptr;
for (const auto &I : Class->bases()) {
// We're in the base destructor, so skip virtual bases.
if (I.isVirtual()) continue;
// Skip base classes with trivial destructors.
const auto *Base =
cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
if (Base->hasTrivialDestructor()) continue;
// If we've already found a base class with a non-trivial
// destructor, give up.
if (UniqueBase) return true;
UniqueBase = Base;
}
// If we didn't find any bases with a non-trivial destructor, then
// the base destructor is actually effectively trivial, which can
// happen if it was needlessly user-defined or if there are virtual
// bases with non-trivial destructors.
if (!UniqueBase)
return true;
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
return true;
// Give up if the calling conventions don't match. We could update the call,
// but it is probably not worth it.
const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
if (BaseD->getType()->getAs<FunctionType>()->getCallConv() !=
D->getType()->getAs<FunctionType>()->getCallConv())
return true;
return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
GlobalDecl(BaseD, Dtor_Base),
false);
}
/// Try to emit a definition as a global alias for another definition.
/// If \p InEveryTU is true, we know that an equivalent alias can be produced
/// in every translation unit.
bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
GlobalDecl TargetDecl,
bool InEveryTU) {
if (!getCodeGenOpts().CXXCtorDtorAliases)
return true;
// The alias will use the linkage of the referent. If we can't
// support aliases with that linkage, fail.
llvm::GlobalValue::LinkageTypes Linkage = getFunctionLinkage(AliasDecl);
// We can't use an alias if the linkage is not valid for one.
if (!llvm::GlobalAlias::isValidLinkage(Linkage))
return true;
// Don't create a weak alias for a dllexport'd symbol.
if (AliasDecl.getDecl()->hasAttr<DLLExportAttr>() &&
llvm::GlobalValue::isWeakForLinker(Linkage))
return true;
llvm::GlobalValue::LinkageTypes TargetLinkage =
getFunctionLinkage(TargetDecl);
// Check if we have it already.
StringRef MangledName = getMangledName(AliasDecl);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry && !Entry->isDeclaration())
return false;
if (Replacements.count(MangledName))
return false;
// Derive the type for the alias.
llvm::PointerType *AliasType
= getTypes().GetFunctionType(AliasDecl)->getPointerTo();
// Find the referent. Some aliases might require a bitcast, in
// which case the caller is responsible for ensuring the soundness
// of these semantics.
auto *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
llvm::Constant *Aliasee = Ref;
if (Ref->getType() != AliasType)
Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
// Instead of creating as alias to a linkonce_odr, replace all of the uses
// of the aliasee.
if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) &&
(TargetLinkage != llvm::GlobalValue::AvailableExternallyLinkage ||
!TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
// FIXME: An extern template instantiation will create functions with
// linkage "AvailableExternally". In libc++, some classes also define
// members with attribute "AlwaysInline" and expect no reference to
// be generated. It is desirable to reenable this optimisation after
// corresponding LLVM changes.
Replacements[MangledName] = Aliasee;
return false;
}
if (!InEveryTU) {
// If we don't have a definition for the destructor yet, don't
// emit. We can't emit aliases to declarations; that's just not
// how aliases work.
if (Ref->isDeclaration())
return true;
}
// Don't create an alias to a linker weak symbol. This avoids producing
// different COMDATs in different TUs. Another option would be to
// output the alias both for weak_odr and linkonce_odr, but that
// requires explicit comdat support in the IL.
if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
return true;
// Create the alias with no name.
auto *Alias =
llvm::GlobalAlias::create(AliasType, Linkage, "", Aliasee, &getModule());
// Switch any previous uses to the alias.
if (Entry) {
assert(Entry->getType() == AliasType &&
"declaration exists with different type");
Alias->takeName(Entry);
Entry->replaceAllUsesWith(Alias);
Entry->eraseFromParent();
} else {
Alias->setName(MangledName);
}
// Finally, set up the alias with its proper name and attributes.
setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
return false;
}
llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
StructorType Type) {
const CGFunctionInfo &FnInfo =
getTypes().arrangeCXXStructorDeclaration(MD, Type);
auto *Fn = cast<llvm::Function>(
getAddrOfCXXStructor(MD, Type, &FnInfo, nullptr, true));
GlobalDecl GD;
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
GD = GlobalDecl(DD, toCXXDtorType(Type));
} else {
const auto *CD = cast<CXXConstructorDecl>(MD);
GD = GlobalDecl(CD, toCXXCtorType(Type));
}
setFunctionLinkage(GD, Fn);
setFunctionDLLStorageClass(GD, Fn);
CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo);
setFunctionDefinitionAttributes(MD, Fn);
SetLLVMFunctionAttributesForDefinition(MD, Fn);
return Fn;
}
llvm::GlobalValue *CodeGenModule::getAddrOfCXXStructor(
const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
llvm::FunctionType *FnType, bool DontDefer) {
GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
GD = GlobalDecl(CD, toCXXCtorType(Type));
} else {
GD = GlobalDecl(cast<CXXDestructorDecl>(MD), toCXXDtorType(Type));
}
StringRef Name = getMangledName(GD);
if (llvm::GlobalValue *Existing = GetGlobalValue(Name))
return Existing;
if (!FnType) {
if (!FnInfo)
FnInfo = &getTypes().arrangeCXXStructorDeclaration(MD, Type);
FnType = getTypes().GetFunctionType(*FnInfo);
}
return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FnType, GD,
/*ForVTable=*/false,
DontDefer));
}
static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
GlobalDecl GD,
llvm::Type *Ty,
const CXXRecordDecl *RD) {
assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() &&
"No kext in Microsoft ABI");
GD = GD.getCanonicalDecl();
CodeGenModule &CGM = CGF.CGM;
llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
Ty = Ty->getPointerTo()->getPointerTo();
VTable = CGF.Builder.CreateBitCast(VTable, Ty);
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
uint64_t AddressPoint =
CGM.getItaniumVTableContext().getVTableLayout(RD)
.getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
VTableIndex += AddressPoint;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
return CGF.Builder.CreateLoad(VFuncPtr);
}
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
llvm::Value *
CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
llvm::Type *Ty) {
assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
"BuildAppleKextVirtualCall - bad Qual kind");
const Type *QTy = Qual->getAsType();
QualType T = QualType(QTy, 0);
const RecordType *RT = T->getAs<RecordType>();
assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD))
return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
return ::BuildAppleKextVirtualCall(*this, MD, Ty, RD);
}
/// BuildVirtualCall - This routine makes indirect vtable call for
/// call to virtual destructors. It returns 0 if it could not do it.
llvm::Value *
CodeGenFunction::BuildAppleKextVirtualDestructorCall(
const CXXDestructorDecl *DD,
CXXDtorType Type,
const CXXRecordDecl *RD) {
const auto *MD = cast<CXXMethodDecl>(DD);
// FIXME. Dtor_Base dtor is always direct!!
// It need be somehow inline expanded into the caller.
// -O does that. But need to support -O0 as well.
if (MD->isVirtual() && Type != Dtor_Base) {
// Compute the function type we're calling.
const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
DD, StructorType::Complete);
llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}
return nullptr;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/SanitizerMetadata.h | //===--- SanitizerMetadata.h - Metadata for sanitizers ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Class which emits metadata consumed by sanitizer instrumentation passes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_SANITIZERMETADATA_H
#define LLVM_CLANG_LIB_CODEGEN_SANITIZERMETADATA_H
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
namespace llvm {
class GlobalVariable;
class Instruction;
class MDNode;
}
namespace clang {
class VarDecl;
namespace CodeGen {
class CodeGenModule;
class SanitizerMetadata {
SanitizerMetadata(const SanitizerMetadata &) = delete;
void operator=(const SanitizerMetadata &) = delete;
CodeGenModule &CGM;
public:
SanitizerMetadata(CodeGenModule &CGM);
void reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D,
bool IsDynInit = false);
void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
StringRef Name, QualType Ty, bool IsDynInit = false,
bool IsBlacklisted = false);
void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
void disableSanitizerForInstruction(llvm::Instruction *I);
private:
llvm::MDNode *getLocationMetadata(SourceLocation Loc);
};
} // end namespace CodeGen
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCXXABI.cpp | //===----- CGCXXABI.cpp - Interface to C++ ABIs ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for C++ code generation. Concrete subclasses
// of this implement code generation for specific C++ ABIs.
//
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
using namespace clang;
using namespace CodeGen;
CGCXXABI::~CGCXXABI() { }
void CGCXXABI::ErrorUnsupportedABI(CodeGenFunction &CGF, StringRef S) {
DiagnosticsEngine &Diags = CGF.CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot yet compile %0 in this ABI");
Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
DiagID)
<< S;
}
bool CGCXXABI::canCopyArgument(const CXXRecordDecl *RD) const {
// If RD has a non-trivial move or copy constructor, we cannot copy the
// argument.
if (RD->hasNonTrivialCopyConstructor() || RD->hasNonTrivialMoveConstructor())
return false;
// If RD has a non-trivial destructor, we cannot copy the argument.
if (RD->hasNonTrivialDestructor())
return false;
// We can only copy the argument if there exists at least one trivial,
// non-deleted copy or move constructor.
// FIXME: This assumes that all lazily declared copy and move constructors are
// not deleted. This assumption might not be true in some corner cases.
bool CopyDeleted = false;
bool MoveDeleted = false;
for (const CXXConstructorDecl *CD : RD->ctors()) {
if (CD->isCopyConstructor() || CD->isMoveConstructor()) {
assert(CD->isTrivial());
// We had at least one undeleted trivial copy or move ctor. Return
// directly.
if (!CD->isDeleted())
return true;
if (CD->isCopyConstructor())
CopyDeleted = true;
else
MoveDeleted = true;
}
}
// If all trivial copy and move constructors are deleted, we cannot copy the
// argument.
return !(CopyDeleted && MoveDeleted);
}
llvm::Constant *CGCXXABI::GetBogusMemberPointer(QualType T) {
return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
}
llvm::Type *
CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
}
llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, llvm::Value *&This,
llvm::Value *MemPtr, const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "calls through member pointers");
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodType(RD, FPT));
return llvm::Constant::getNullValue(FTy->getPointerTo());
}
llvm::Value *
CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
llvm::Value *Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
return llvm::Constant::getNullValue(Ty);
}
llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
const CastExpr *E,
llvm::Value *Src) {
ErrorUnsupportedABI(CGF, "member function pointer conversions");
return GetBogusMemberPointer(E->getType());
}
llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
llvm::Constant *Src) {
return GetBogusMemberPointer(E->getType());
}
llvm::Value *
CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
llvm::Value *L,
llvm::Value *R,
const MemberPointerType *MPT,
bool Inequality) {
ErrorUnsupportedABI(CGF, "member function pointer comparison");
return CGF.Builder.getFalse();
}
llvm::Value *
CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "member function pointer null testing");
return CGF.Builder.getFalse();
}
llvm::Constant *
CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
return GetBogusMemberPointer(QualType(MPT, 0));
}
llvm::Constant *CGCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
return GetBogusMemberPointer(CGM.getContext().getMemberPointerType(
MD->getType(), MD->getParent()->getTypeForDecl()));
}
llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
CharUnits offset) {
return GetBogusMemberPointer(QualType(MPT, 0));
}
llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
return GetBogusMemberPointer(MPT);
}
bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
// Fake answer.
return true;
}
void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList ¶ms) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
// FIXME: I'm not entirely sure I like using a fake decl just for code
// generation. Maybe we can come up with a better way?
ImplicitParamDecl *ThisDecl
= ImplicitParamDecl::Create(CGM.getContext(), nullptr, MD->getLocation(),
&CGM.getContext().Idents.get("this"),
MD->getThisType(CGM.getContext()));
params.push_back(ThisDecl);
getThisDecl(CGF) = ThisDecl;
}
void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
/// Initialize the 'this' slot.
assert(getThisDecl(CGF) && "no 'this' variable for function");
// HLSL Change Starts:
if (CGF.CurFn) {
getThisValue(CGF) = CGF.CurFn->args().begin();
} else
// HLSL Change Ends
getThisValue(CGF) = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
"this");
}
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
CGF.EmitReturnOfRValue(RV, ResultType);
}
CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
if (!requiresArrayCookie(expr))
return CharUnits::Zero();
return getArrayCookieSizeImpl(expr->getAllocatedType());
}
CharUnits CGCXXABI::getArrayCookieSizeImpl(QualType elementType) {
// BOGUS
return CharUnits::Zero();
}
llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
llvm::Value *NewPtr,
llvm::Value *NumElements,
const CXXNewExpr *expr,
QualType ElementType) {
// Should never be called.
ErrorUnsupportedABI(CGF, "array cookie initialization");
return nullptr;
}
bool CGCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
QualType elementType) {
// If the class's usual deallocation function takes two arguments,
// it needs a cookie.
if (expr->doesUsualArrayDeleteWantSize())
return true;
return elementType.isDestructedType();
}
bool CGCXXABI::requiresArrayCookie(const CXXNewExpr *expr) {
// If the class's usual deallocation function takes two arguments,
// it needs a cookie.
if (expr->doesUsualArrayDeleteWantSize())
return true;
return expr->getAllocatedType().isDestructedType();
}
void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *ptr,
const CXXDeleteExpr *expr, QualType eltTy,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
unsigned AS = ptr->getType()->getPointerAddressSpace();
llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS);
ptr = CGF.Builder.CreateBitCast(ptr, charPtrTy);
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
allocPtr = ptr;
numElements = nullptr;
cookieSize = CharUnits::Zero();
return;
}
cookieSize = getArrayCookieSizeImpl(eltTy);
allocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ptr,
-cookieSize.getQuantity());
numElements = readArrayCookieImpl(CGF, allocPtr, cookieSize);
}
llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
llvm::Value *ptr,
CharUnits cookieSize) {
ErrorUnsupportedABI(CGF, "reading a new[] cookie");
return llvm::ConstantInt::get(CGF.SizeTy, 0);
}
/// Returns the adjustment, in bytes, required for the given
/// member-pointer operation. Returns null if no adjustment is
/// required.
llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
E->getCastKind() == CK_BaseToDerivedMemberPointer);
QualType derivedType;
if (E->getCastKind() == CK_DerivedToBaseMemberPointer)
derivedType = E->getSubExpr()->getType();
else
derivedType = E->getType();
const CXXRecordDecl *derivedClass =
derivedType->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
return CGM.GetNonVirtualBaseClassOffset(derivedClass,
E->path_begin(),
E->path_end());
}
CharUnits CGCXXABI::getMemberPointerPathAdjustment(const APValue &MP) {
// TODO: Store base specifiers in APValue member pointer paths so we can
// easily reuse CGM.GetNonVirtualBaseClassOffset().
const ValueDecl *MPD = MP.getMemberPointerDecl();
CharUnits ThisAdjustment = CharUnits::Zero();
ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
bool DerivedMember = MP.isMemberPointerToDerivedMember();
const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
const CXXRecordDecl *Base = RD;
const CXXRecordDecl *Derived = Path[I];
if (DerivedMember)
std::swap(Base, Derived);
ThisAdjustment +=
getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
RD = Path[I];
}
if (DerivedMember)
ThisAdjustment = -ThisAdjustment;
return ThisAdjustment;
}
llvm::BasicBlock *
CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
if (CGM.getTarget().getCXXABI().hasConstructorVariants())
llvm_unreachable("shouldn't be called in this ABI");
ErrorUnsupportedABI(CGF, "complete object detection in ctor");
return nullptr;
}
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
return false;
}
llvm::CallInst *
CGCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
llvm::Value *Exn) {
// Just call std::terminate and ignore the violating exception.
return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGObjC.cpp | //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Objective-C code as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
using namespace clang;
using namespace CodeGen;
// HLSL Change Starts
// No ObjC codegen support, so simply skip all of this compilation.
// Here are enough stubs to link the current targets.
#if 1
llvm::Value *
CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(
const Expr *E, const ObjCMethodDecl *MethodWithObjects) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *
CodeGenFunction::EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
/// Emit a selector.
llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
llvm_unreachable("HLSL does not support ObjC constructs");
return RValue();
}
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::generateObjCGetterBody(
const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
const ObjCMethodDecl *GetterMethodDecl, llvm::Constant *AtomicHelperFn) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::generateObjCSetterBody(
const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl, llvm::Constant *AtomicHelperFn) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
llvm_unreachable("HLSL does not support ObjC constructs");
return false;
}
bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
llvm_unreachable("HLSL does not support ObjC constructs");
return false;
}
llvm::Value *CodeGenFunction::LoadObjCSelf() {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
QualType CodeGenFunction::TypeOfSelfObject() {
llvm_unreachable("HLSL does not support ObjC constructs");
return QualType();
}
void CodeGenFunction::EmitObjCForCollectionStmt(
const ObjCForCollectionStmt &S) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
const ObjCAtSynchronizedStmt &S) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
/// Produce the code for a CK_ARCProduceObject. Just does a
/// primitive retain.
llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
llvm::Value *object) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value *> values) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
bool mandatory) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitARCRelease(llvm::Value *value,
ARCPreciseLifetime_t precise) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr,
ARCPreciseLifetime_t precise) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
llvm::Value *value,
bool ignored) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
llvm::Value *newValue,
bool ignored) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
llvm::Value *value,
bool ignored) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, llvm::Value *addr,
QualType type) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
std::pair<LValue, llvm::Value *>
CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, bool ignored) {
llvm_unreachable("HLSL does not support ObjC constructs");
return std::pair<LValue, llvm::Value *>();
}
std::pair<LValue, llvm::Value *>
CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
llvm_unreachable("HLSL does not support ObjC constructs");
return std::pair<LValue, llvm::Value *>();
}
void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
const ObjCAutoreleasePoolStmt &ARPS) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
llvm_unreachable("HLSL does not support ObjC constructs");
}
llvm::Constant *CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
llvm::Value *CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block,
QualType Ty) {
llvm_unreachable("HLSL does not support ObjC constructs");
return nullptr;
}
#else
// HLSL Change Ends
typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
static RValue AdjustObjCObjectType(CodeGenFunction &CGF,
QualType ET,
RValue Result);
/// Given the address of a variable of pointer type, find the correct
/// null to store into it.
static llvm::Constant *getNullForVariable(llvm::Value *addr) {
llvm::Type *type =
cast<llvm::PointerType>(addr->getType())->getElementType();
return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
}
/// Emits an instance of NSConstantString representing the object.
llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
llvm::Constant *C =
CGM.getObjCRuntime().GenerateConstantString(E->getString());
// FIXME: This bitcast should just be made an invariant on the Runtime.
return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
}
/// EmitObjCBoxedExpr - This routine generates code to call
/// the appropriate expression boxing method. This will either be
/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:],
/// or [NSValue valueWithBytes:objCType:].
///
llvm::Value *
CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// Generate the correct selector for this literal's concrete type.
// Get the method.
const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
const Expr *SubExpr = E->getSubExpr();
assert(BoxingMethod && "BoxingMethod is null");
assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
Selector Sel = BoxingMethod->getSelector();
// Generate a reference to the class pointer, which will be the receiver.
// Assumes that the method was introduced in the class that should be
// messaged (avoids pulling it out of the result type).
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
CallArgList Args;
const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin();
QualType ArgQT = ArgDecl->getType().getUnqualifiedType();
// ObjCBoxedExpr supports boxing of structs and unions
// via [NSValue valueWithBytes:objCType:]
const QualType ValueType(SubExpr->getType().getCanonicalType());
if (ValueType->isObjCBoxableRecordType()) {
// Emit CodeGen for first parameter
// and cast value to correct type
llvm::Value *Temporary = CreateMemTemp(SubExpr->getType());
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
llvm::Value *BitCast = Builder.CreateBitCast(Temporary,
ConvertType(ArgQT));
Args.add(RValue::get(BitCast), ArgQT);
// Create char array to store type encoding
std::string Str;
getContext().getObjCEncodingForType(ValueType, Str);
llvm::GlobalVariable *GV = CGM.GetAddrOfConstantCString(Str);
// Cast type encoding to correct type
const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1];
QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType();
llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT));
Args.add(RValue::get(Cast), EncodingQT);
} else {
Args.add(EmitAnyExpr(SubExpr), ArgQT);
}
RValue result = Runtime.GenerateMessageSend(
*this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
Args, ClassDecl, BoxingMethod);
return Builder.CreateBitCast(result.getScalarVal(),
ConvertType(E->getType()));
}
llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
const ObjCMethodDecl *MethodWithObjects) {
ASTContext &Context = CGM.getContext();
const ObjCDictionaryLiteral *DLE = nullptr;
const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
if (!ALE)
DLE = cast<ObjCDictionaryLiteral>(E);
// Compute the type of the array we're initializing.
uint64_t NumElements =
ALE ? ALE->getNumElements() : DLE->getNumElements();
llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
NumElements);
QualType ElementType = Context.getObjCIdType().withConst();
QualType ElementArrayType
= Context.getConstantArrayType(ElementType, APNumElements,
ArrayType::Normal, /*IndexTypeQuals=*/0);
// Allocate the temporary array(s).
llvm::AllocaInst *Objects = CreateMemTemp(ElementArrayType, "objects");
llvm::AllocaInst *Keys = nullptr;
if (DLE)
Keys = CreateMemTemp(ElementArrayType, "keys");
// In ARC, we may need to do extra work to keep all the keys and
// values alive until after the call.
SmallVector<llvm::Value *, 16> NeededObjects;
bool TrackNeededObjects =
(getLangOpts().ObjCAutoRefCount &&
CGM.getCodeGenOpts().OptimizationLevel != 0);
// Perform the actual initialialization of the array(s).
for (uint64_t i = 0; i < NumElements; i++) {
if (ALE) {
// Emit the element and store it to the appropriate array slot.
const Expr *Rhs = ALE->getElement(i);
LValue LV = LValue::MakeAddr(
Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i),
ElementType, Context.getTypeAlignInChars(Rhs->getType()), Context);
llvm::Value *value = EmitScalarExpr(Rhs);
EmitStoreThroughLValue(RValue::get(value), LV, true);
if (TrackNeededObjects) {
NeededObjects.push_back(value);
}
} else {
// Emit the key and store it to the appropriate array slot.
const Expr *Key = DLE->getKeyValueElement(i).Key;
LValue KeyLV = LValue::MakeAddr(
Builder.CreateStructGEP(Keys->getAllocatedType(), Keys, i),
ElementType, Context.getTypeAlignInChars(Key->getType()), Context);
llvm::Value *keyValue = EmitScalarExpr(Key);
EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true);
// Emit the value and store it to the appropriate array slot.
const Expr *Value = DLE->getKeyValueElement(i).Value;
LValue ValueLV = LValue::MakeAddr(
Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i),
ElementType, Context.getTypeAlignInChars(Value->getType()), Context);
llvm::Value *valueValue = EmitScalarExpr(Value);
EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true);
if (TrackNeededObjects) {
NeededObjects.push_back(keyValue);
NeededObjects.push_back(valueValue);
}
}
}
// Generate the argument list.
CallArgList Args;
ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
const ParmVarDecl *argDecl = *PI++;
QualType ArgQT = argDecl->getType().getUnqualifiedType();
Args.add(RValue::get(Objects), ArgQT);
if (DLE) {
argDecl = *PI++;
ArgQT = argDecl->getType().getUnqualifiedType();
Args.add(RValue::get(Keys), ArgQT);
}
argDecl = *PI;
ArgQT = argDecl->getType().getUnqualifiedType();
llvm::Value *Count =
llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
Args.add(RValue::get(Count), ArgQT);
// Generate a reference to the class pointer, which will be the receiver.
Selector Sel = MethodWithObjects->getSelector();
QualType ResultType = E->getType();
const ObjCObjectPointerType *InterfacePointerType
= ResultType->getAsObjCInterfacePointerType();
ObjCInterfaceDecl *Class
= InterfacePointerType->getObjectType()->getInterface();
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
llvm::Value *Receiver = Runtime.GetClass(*this, Class);
// Generate the message send.
RValue result = Runtime.GenerateMessageSend(
*this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel,
Receiver, Args, Class, MethodWithObjects);
// The above message send needs these objects, but in ARC they are
// passed in a buffer that is essentially __unsafe_unretained.
// Therefore we must prevent the optimizer from releasing them until
// after the call.
if (TrackNeededObjects) {
EmitARCIntrinsicUse(NeededObjects);
}
return Builder.CreateBitCast(result.getScalarVal(),
ConvertType(E->getType()));
}
llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
}
llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
const ObjCDictionaryLiteral *E) {
return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
}
/// Emit a selector.
llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
// Untyped selector.
// Note that this implementation allows for non-constant strings to be passed
// as arguments to @selector(). Currently, the only thing preventing this
// behaviour is the type checking in the front end.
return CGM.getObjCRuntime().GetSelector(*this, E->getSelector());
}
llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
// FIXME: This should pass the Decl not the name.
return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol());
}
/// \brief Adjust the type of an Objective-C object that doesn't match up due
/// to type erasure at various points, e.g., related result types or the use
/// of parameterized classes.
static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT,
RValue Result) {
if (!ExpT->isObjCRetainableType())
return Result;
// If the converted types are the same, we're done.
llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT);
if (ExpLLVMTy == Result.getScalarVal()->getType())
return Result;
// We have applied a substitution. Cast the rvalue appropriately.
return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
ExpLLVMTy));
}
/// Decide whether to extend the lifetime of the receiver of a
/// returns-inner-pointer message.
static bool
shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
switch (message->getReceiverKind()) {
// For a normal instance message, we should extend unless the
// receiver is loaded from a variable with precise lifetime.
case ObjCMessageExpr::Instance: {
const Expr *receiver = message->getInstanceReceiver();
const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
receiver = ice->getSubExpr()->IgnoreParens();
// Only __strong variables.
if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
return true;
// All ivars and fields have precise lifetime.
if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
return false;
// Otherwise, check for variables.
const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
if (!declRef) return true;
const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
if (!var) return true;
// All variables have precise lifetime except local variables with
// automatic storage duration that aren't specially marked.
return (var->hasLocalStorage() &&
!var->hasAttr<ObjCPreciseLifetimeAttr>());
}
case ObjCMessageExpr::Class:
case ObjCMessageExpr::SuperClass:
// It's never necessary for class objects.
return false;
case ObjCMessageExpr::SuperInstance:
// We generally assume that 'self' lives throughout a method call.
return false;
}
llvm_unreachable("invalid receiver kind");
}
RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
// implementation vary between runtimes. We can get the receiver and
// arguments in generic code.
bool isDelegateInit = E->isDelegateInitCall();
const ObjCMethodDecl *method = E->getMethodDecl();
// We don't retain the receiver in delegate init calls, and this is
// safe because the receiver value is always loaded from 'self',
// which we zero out. We don't want to Block_copy block receivers,
// though.
bool retainSelf =
(!isDelegateInit &&
CGM.getLangOpts().ObjCAutoRefCount &&
method &&
method->hasAttr<NSConsumesSelfAttr>());
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
bool isSuperMessage = false;
bool isClassMessage = false;
ObjCInterfaceDecl *OID = nullptr;
// Find the receiver
QualType ReceiverType;
llvm::Value *Receiver = nullptr;
switch (E->getReceiverKind()) {
case ObjCMessageExpr::Instance:
ReceiverType = E->getInstanceReceiver()->getType();
if (retainSelf) {
TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
E->getInstanceReceiver());
Receiver = ter.getPointer();
if (ter.getInt()) retainSelf = false;
} else
Receiver = EmitScalarExpr(E->getInstanceReceiver());
break;
case ObjCMessageExpr::Class: {
ReceiverType = E->getClassReceiver();
const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
assert(ObjTy && "Invalid Objective-C class message send");
OID = ObjTy->getInterface();
assert(OID && "Invalid Objective-C class message send");
Receiver = Runtime.GetClass(*this, OID);
isClassMessage = true;
break;
}
case ObjCMessageExpr::SuperInstance:
ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
break;
case ObjCMessageExpr::SuperClass:
ReceiverType = E->getSuperType();
Receiver = LoadObjCSelf();
isSuperMessage = true;
isClassMessage = true;
break;
}
if (retainSelf)
Receiver = EmitARCRetainNonBlock(Receiver);
// In ARC, we sometimes want to "extend the lifetime"
// (i.e. retain+autorelease) of receivers of returns-inner-pointer
// messages.
if (getLangOpts().ObjCAutoRefCount && method &&
method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
shouldExtendReceiverForInnerPointerMessage(E))
Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
QualType ResultType = method ? method->getReturnType() : E->getType();
CallArgList Args;
EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
// For delegate init calls in ARC, do an unsafe store of null into
// self. This represents the call taking direct ownership of that
// value. We have to do this after emitting the other call
// arguments because they might also reference self, but we don't
// have to worry about any of them modifying self because that would
// be an undefined read and write of an object in unordered
// expressions.
if (isDelegateInit) {
assert(getLangOpts().ObjCAutoRefCount &&
"delegate init calls should only be marked in ARC");
// Do an unsafe store of null into self.
llvm::Value *selfAddr =
LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
assert(selfAddr && "no self entry for a delegate init call?");
Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
}
RValue result;
if (isSuperMessage) {
// super is only valid in an Objective-C method
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
E->getSelector(),
OMD->getClassInterface(),
isCategoryImpl,
Receiver,
isClassMessage,
Args,
method);
} else {
result = Runtime.GenerateMessageSend(*this, Return, ResultType,
E->getSelector(),
Receiver, Args, OID,
method);
}
// For delegate init calls in ARC, implicitly store the result of
// the call back into self. This takes ownership of the value.
if (isDelegateInit) {
llvm::Value *selfAddr =
LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
llvm::Value *newSelf = result.getScalarVal();
// The delegate return type isn't necessarily a matching type; in
// fact, it's quite likely to be 'id'.
llvm::Type *selfTy =
cast<llvm::PointerType>(selfAddr->getType())->getElementType();
newSelf = Builder.CreateBitCast(newSelf, selfTy);
Builder.CreateStore(newSelf, selfAddr);
}
return AdjustObjCObjectType(*this, E->getType(), result);
}
namespace {
struct FinishARCDealloc : EHScopeStack::Cleanup {
void Emit(CodeGenFunction &CGF, Flags flags) override {
const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
const ObjCInterfaceDecl *iface = impl->getClassInterface();
if (!iface->getSuperClass()) return;
bool isCategory = isa<ObjCCategoryImplDecl>(impl);
// Call [super dealloc] if we have a superclass.
llvm::Value *self = CGF.LoadObjCSelf();
CallArgList args;
CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
CGF.getContext().VoidTy,
method->getSelector(),
iface,
isCategory,
self,
/*is class msg*/ false,
args,
method);
}
};
}
/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
/// the LLVM function and sets the other context used by
/// CodeGenFunction.
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) {
SourceLocation StartLoc = OMD->getLocStart();
FunctionArgList args;
// Check if we should generate debug info for this method.
if (OMD->hasAttr<NoDebugAttr>())
DebugInfo = nullptr; // disable debug info indefinitely for this function
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
args.push_back(OMD->getSelfDecl());
args.push_back(OMD->getCmdDecl());
args.append(OMD->param_begin(), OMD->param_end());
CurGD = OMD;
CurEHLocation = OMD->getLocEnd();
StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
OMD->getLocation(), StartLoc);
// In ARC, certain methods get an extra cleanup.
if (CGM.getLangOpts().ObjCAutoRefCount &&
OMD->isInstanceMethod() &&
OMD->getSelector().isUnarySelector()) {
const IdentifierInfo *ident =
OMD->getSelector().getIdentifierInfoForSlot(0);
if (ident->isStr("dealloc"))
EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
}
}
static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
LValue lvalue, QualType type);
/// Generate an Objective-C method. An Objective-C method is a C function with
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
StartObjCMethod(OMD, OMD->getClassInterface());
PGO.assignRegionCounters(OMD, CurFn);
assert(isa<CompoundStmt>(OMD->getBody()));
incrementProfileCounter(OMD->getBody());
EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
FinishFunction(OMD->getBodyRBrace());
}
/// emitStructGetterCall - Call the runtime function to load a property
/// into the return value slot.
static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
bool isAtomic, bool hasStrong) {
ASTContext &Context = CGF.getContext();
llvm::Value *src =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
ivar, 0).getAddress();
// objc_copyStruct (ReturnValue, &structIvar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList args;
llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
args.add(RValue::get(dest), Context.VoidPtrTy);
src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
args.add(RValue::get(src), Context.VoidPtrTy);
CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Context.VoidTy, args,
FunctionType::ExtInfo(),
RequiredArgs::All),
fn, ReturnValueSlot(), args);
}
/// Determine whether the given architecture supports unaligned atomic
/// accesses. They don't have to be fast, just faster than a function
/// call and a mutex.
static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
// FIXME: Allow unaligned atomic load/store on x86. (It is not
// currently supported by the backend.)
return 0;
}
/// Return the maximum size that permits atomic accesses for the given
/// architecture.
static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
llvm::Triple::ArchType arch) {
// ARM has 8-byte atomic accesses, but it's not clear whether we
// want to rely on them here.
// In the default case, just assume that any size up to a pointer is
// fine given adequate alignment.
return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
}
namespace {
class PropertyImplStrategy {
public:
enum StrategyKind {
/// The 'native' strategy is to use the architecture's provided
/// reads and writes.
Native,
/// Use objc_setProperty and objc_getProperty.
GetSetProperty,
/// Use objc_setProperty for the setter, but use expression
/// evaluation for the getter.
SetPropertyAndExpressionGet,
/// Use objc_copyStruct.
CopyStruct,
/// The 'expression' strategy is to emit normal assignment or
/// lvalue-to-rvalue expressions.
Expression
};
StrategyKind getKind() const { return StrategyKind(Kind); }
bool hasStrongMember() const { return HasStrong; }
bool isAtomic() const { return IsAtomic; }
bool isCopy() const { return IsCopy; }
CharUnits getIvarSize() const { return IvarSize; }
CharUnits getIvarAlignment() const { return IvarAlignment; }
PropertyImplStrategy(CodeGenModule &CGM,
const ObjCPropertyImplDecl *propImpl);
private:
unsigned Kind : 8;
unsigned IsAtomic : 1;
unsigned IsCopy : 1;
unsigned HasStrong : 1;
CharUnits IvarSize;
CharUnits IvarAlignment;
};
}
/// Pick an implementation strategy for the given property synthesis.
PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
const ObjCPropertyImplDecl *propImpl) {
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
IsCopy = (setterKind == ObjCPropertyDecl::Copy);
IsAtomic = prop->isAtomic();
HasStrong = false; // doesn't matter here.
// Evaluate the ivar's size and alignment.
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
QualType ivarType = ivar->getType();
std::tie(IvarSize, IvarAlignment) =
CGM.getContext().getTypeInfoInChars(ivarType);
// If we have a copy property, we always have to use getProperty/setProperty.
// TODO: we could actually use setProperty and an expression for non-atomics.
if (IsCopy) {
Kind = GetSetProperty;
return;
}
// Handle retain.
if (setterKind == ObjCPropertyDecl::Retain) {
// In GC-only, there's nothing special that needs to be done.
if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
// fallthrough
// In ARC, if the property is non-atomic, use expression emission,
// which translates to objc_storeStrong. This isn't required, but
// it's slightly nicer.
} else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
// Using standard expression emission for the setter is only
// acceptable if the ivar is __strong, which won't be true if
// the property is annotated with __attribute__((NSObject)).
// TODO: falling all the way back to objc_setProperty here is
// just laziness, though; we could still use objc_storeStrong
// if we hacked it right.
if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
Kind = Expression;
else
Kind = SetPropertyAndExpressionGet;
return;
// Otherwise, we need to at least use setProperty. However, if
// the property isn't atomic, we can use normal expression
// emission for the getter.
} else if (!IsAtomic) {
Kind = SetPropertyAndExpressionGet;
return;
// Otherwise, we have to use both setProperty and getProperty.
} else {
Kind = GetSetProperty;
return;
}
}
// If we're not atomic, just use expression accesses.
if (!IsAtomic) {
Kind = Expression;
return;
}
// Properties on bitfield ivars need to be emitted using expression
// accesses even if they're nominally atomic.
if (ivar->isBitField()) {
Kind = Expression;
return;
}
// GC-qualified or ARC-qualified ivars need to be emitted as
// expressions. This actually works out to being atomic anyway,
// except for ARC __strong, but that should trigger the above code.
if (ivarType.hasNonTrivialObjCLifetime() ||
(CGM.getLangOpts().getGC() &&
CGM.getContext().getObjCGCAttrKind(ivarType))) {
Kind = Expression;
return;
}
// Compute whether the ivar has strong members.
if (CGM.getLangOpts().getGC())
if (const RecordType *recordType = ivarType->getAs<RecordType>())
HasStrong = recordType->getDecl()->hasObjectMember();
// We can never access structs with object members with a native
// access, because we need to use write barriers. This is what
// objc_copyStruct is for.
if (HasStrong) {
Kind = CopyStruct;
return;
}
// Otherwise, this is target-dependent and based on the size and
// alignment of the ivar.
// If the size of the ivar is not a power of two, give up. We don't
// want to get into the business of doing compare-and-swaps.
if (!IvarSize.isPowerOfTwo()) {
Kind = CopyStruct;
return;
}
llvm::Triple::ArchType arch =
CGM.getTarget().getTriple().getArch();
// Most architectures require memory to fit within a single cache
// line, so the alignment has to be at least the size of the access.
// Otherwise we have to grab a lock.
if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
Kind = CopyStruct;
return;
}
// If the ivar's size exceeds the architecture's maximum atomic
// access size, we have to use CopyStruct.
if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
Kind = CopyStruct;
return;
}
// Otherwise, we can use native loads and stores.
Kind = Native;
}
/// \brief Generate an Objective-C property getter function.
///
/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
llvm::Constant *AtomicHelperFn =
CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
StartObjCMethod(OMD, IMP->getClassInterface());
generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
FinishFunction();
}
static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
const Expr *getter = propImpl->getGetterCXXConstructor();
if (!getter) return true;
// Sema only makes only of these when the ivar has a C++ class type,
// so the form is pretty constrained.
// If the property has a reference type, we might just be binding a
// reference, in which case the result will be a gl-value. We should
// treat this as a non-trivial operation.
if (getter->isGLValue())
return false;
// If we selected a trivial copy-constructor, we're okay.
if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
return (construct->getConstructor()->isTrivial());
// The constructor might require cleanups (in which case it's never
// trivial).
assert(isa<ExprWithCleanups>(getter));
return false;
}
/// emitCPPObjectAtomicGetterCall - Call the runtime function to
/// copy the ivar into the resturn slot.
static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
llvm::Value *returnAddr,
ObjCIvarDecl *ivar,
llvm::Constant *AtomicHelperFn) {
// objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
// AtomicHelperFn);
CallArgList args;
// The 1st argument is the return Slot.
args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
// The 2nd argument is the address of the ivar.
llvm::Value *ivarAddr =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
CGF.LoadObjCSelf(), ivar, 0).getAddress();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
llvm::Value *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
args,
FunctionType::ExtInfo(),
RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
}
void
CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
const ObjCMethodDecl *GetterMethodDecl,
llvm::Constant *AtomicHelperFn) {
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
if (!AtomicHelperFn) {
ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
/*nrvo*/ nullptr);
EmitReturnStmt(ret);
}
else {
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
emitCPPObjectAtomicGetterCall(*this, ReturnValue,
ivar, AtomicHelperFn);
}
return;
}
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
QualType propType = prop->getType();
ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
// Pick an implementation strategy.
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
// We don't need to do anything for a zero-size struct.
if (strategy.getIvarSize().isZero())
return;
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
llvm::Type *bitcastType =
llvm::Type::getIntNTy(getLLVMContext(),
getContext().toBits(strategy.getIvarSize()));
bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Perform an atomic load. This does not impose ordering constraints.
llvm::Value *ivarAddr = LV.getAddress();
ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
load->setAlignment(strategy.getIvarAlignment().getQuantity());
load->setAtomic(llvm::Unordered);
// Store that value into the return address. Doing this with a
// bitcast is likely to produce some pretty ugly IR, but it's not
// the *most* terrible thing in the world.
Builder.CreateStore(load, Builder.CreateBitCast(ReturnValue, bitcastType));
// Make sure we don't do an autorelease.
AutoreleaseResult = false;
return;
}
case PropertyImplStrategy::GetSetProperty: {
llvm::Value *getPropertyFn =
CGM.getObjCRuntime().GetPropertyGetFunction();
if (!getPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
return;
}
// Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
// FIXME: Can't this be simpler? This might even be worse than the
// corresponding gcc code.
llvm::Value *cmd =
Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
EmitIvarOffset(classImpl->getClassInterface(), ivar);
CallArgList args;
args.add(RValue::get(self), getContext().getObjCIdType());
args.add(RValue::get(cmd), getContext().getObjCSelType());
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
llvm::Instruction *CallInstruction;
RValue RV = EmitCall(getTypes().arrangeFreeFunctionCall(propType, args,
FunctionType::ExtInfo(),
RequiredArgs::All),
getPropertyFn, ReturnValueSlot(), args, nullptr,
&CallInstruction);
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
call->setTailCall();
// We need to fix the type here. Ivars with copy & retain are
// always objects so we don't need to worry about complex or
// aggregates.
RV = RValue::get(Builder.CreateBitCast(
RV.getScalarVal(),
getTypes().ConvertType(getterMethod->getReturnType())));
EmitReturnOfRValue(RV, propType);
// objc_getProperty does an autorelease, so we should suppress ours.
AutoreleaseResult = false;
return;
}
case PropertyImplStrategy::CopyStruct:
emitStructGetterCall(*this, ivar, strategy.isAtomic(),
strategy.hasStrongMember());
return;
case PropertyImplStrategy::Expression:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
QualType ivarType = ivar->getType();
switch (getEvaluationKind(ivarType)) {
case TEK_Complex: {
ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
EmitStoreOfComplex(pair,
MakeNaturalAlignAddrLValue(ReturnValue, ivarType),
/*init*/ true);
return;
}
case TEK_Aggregate:
// The return value slot is guaranteed to not be aliased, but
// that's not necessarily the same as "on the stack", so
// we still potentially need objc_memmove_collectable.
EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
return;
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
value = LV.getAddress();
} else {
// We want to load and autoreleaseReturnValue ARC __weak ivars.
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
// Otherwise we want to do a simple load, suppressing the
// final autorelease.
} else {
value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal();
AutoreleaseResult = false;
}
value = Builder.CreateBitCast(value, ConvertType(propType));
value = Builder.CreateBitCast(
value, ConvertType(GetterMethodDecl->getReturnType()));
}
EmitReturnOfRValue(RValue::get(value), propType);
return;
}
}
llvm_unreachable("bad evaluation kind");
}
}
llvm_unreachable("bad @property implementation strategy!");
}
/// emitStructSetterCall - Call the runtime function to store the value
/// from the first formal parameter into the given ivar.
static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
ObjCIvarDecl *ivar) {
// objc_copyStruct (&structIvar, &Arg,
// sizeof (struct something), true, false);
CallArgList args;
// The first argument is the address of the ivar.
llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
CGF.LoadObjCSelf(), ivar, 0)
.getAddress();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
// The second argument is the address of the parameter variable.
ParmVarDecl *argVar = *OMD->param_begin();
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
VK_LValue, SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
// The third argument is the sizeof the type.
llvm::Value *size =
CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
args.add(RValue::get(size), CGF.getContext().getSizeType());
// The fourth argument is the 'isAtomic' flag.
args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
// The fifth argument is the 'hasStrong' flag.
// FIXME: should this really always be false?
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
args,
FunctionType::ExtInfo(),
RequiredArgs::All),
copyStructFn, ReturnValueSlot(), args);
}
/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
/// the value from the first formal parameter into the given ivar, using
/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
ObjCMethodDecl *OMD,
ObjCIvarDecl *ivar,
llvm::Constant *AtomicHelperFn) {
// objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
// AtomicHelperFn);
CallArgList args;
// The first argument is the address of the ivar.
llvm::Value *ivarAddr =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
CGF.LoadObjCSelf(), ivar, 0).getAddress();
ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
// The second argument is the address of the parameter variable.
ParmVarDecl *argVar = *OMD->param_begin();
DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
VK_LValue, SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
llvm::Value *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
args,
FunctionType::ExtInfo(),
RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
}
static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
Expr *setter = PID->getSetterCXXAssignment();
if (!setter) return true;
// Sema only makes only of these when the ivar has a C++ class type,
// so the form is pretty constrained.
// An operator call is trivial if the function it calls is trivial.
// This also implies that there's nothing non-trivial going on with
// the arguments, because operator= can only be trivial if it's a
// synthesized assignment operator and therefore both parameters are
// references.
if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
if (const FunctionDecl *callee
= dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
if (callee->isTrivial())
return true;
return false;
}
assert(isa<ExprWithCleanups>(setter));
return false;
}
static bool UseOptimizedSetter(CodeGenModule &CGM) {
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
return false;
return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
}
void
CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
llvm::Constant *AtomicHelperFn) {
const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
// Just use the setter expression if Sema gave us one and it's
// non-trivial.
if (!hasTrivialSetExpr(propImpl)) {
if (!AtomicHelperFn)
// If non-atomic, assignment is called directly.
EmitStmt(propImpl->getSetterCXXAssignment());
else
// If atomic, assignment is called via a locking api.
emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
AtomicHelperFn);
return;
}
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
case PropertyImplStrategy::Native: {
// We don't need to do anything for a zero-size struct.
if (strategy.getIvarSize().isZero())
return;
llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
LValue ivarLValue =
EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
llvm::Value *ivarAddr = ivarLValue.getAddress();
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
llvm::Type *bitcastType =
llvm::Type::getIntNTy(getLLVMContext(),
getContext().toBits(strategy.getIvarSize()));
bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Cast both arguments to the chosen operation type.
argAddr = Builder.CreateBitCast(argAddr, bitcastType);
ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
// This bitcast load is likely to cause some nasty IR.
llvm::Value *load = Builder.CreateLoad(argAddr);
// Perform an atomic store. There are no memory ordering requirements.
llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
store->setAlignment(strategy.getIvarAlignment().getQuantity());
store->setAtomic(llvm::Unordered);
return;
}
case PropertyImplStrategy::GetSetProperty:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
llvm::Value *setOptimizedPropertyFn = nullptr;
llvm::Value *setPropertyFn = nullptr;
if (UseOptimizedSetter(CGM)) {
// 10.8 and iOS 6.0 code and GC is off
setOptimizedPropertyFn =
CGM.getObjCRuntime()
.GetOptimizedPropertySetFunction(strategy.isAtomic(),
strategy.isCopy());
if (!setOptimizedPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
return;
}
}
else {
setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
if (!setPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
return;
}
}
// Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
llvm::Value *cmd =
Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
llvm::Value *self =
Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
EmitIvarOffset(classImpl->getClassInterface(), ivar);
llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
CallArgList args;
args.add(RValue::get(self), getContext().getObjCIdType());
args.add(RValue::get(cmd), getContext().getObjCSelType());
if (setOptimizedPropertyFn) {
args.add(RValue::get(arg), getContext().getObjCIdType());
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
FunctionType::ExtInfo(),
RequiredArgs::All),
setOptimizedPropertyFn, ReturnValueSlot(), args);
} else {
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
args.add(RValue::get(arg), getContext().getObjCIdType());
args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
getContext().BoolTy);
args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
FunctionType::ExtInfo(),
RequiredArgs::All),
setPropertyFn, ReturnValueSlot(), args);
}
return;
}
case PropertyImplStrategy::CopyStruct:
emitStructSetterCall(*this, setterMethod, ivar);
return;
case PropertyImplStrategy::Expression:
break;
}
// Otherwise, fake up some ASTs and emit a normal assignment.
ValueDecl *selfDecl = setterMethod->getSelfDecl();
DeclRefExpr self(selfDecl, false, selfDecl->getType(),
VK_LValue, SourceLocation());
ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
selfDecl->getType(), CK_LValueToRValue, &self,
VK_RValue);
ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
SourceLocation(), SourceLocation(),
&selfLoad, true, true);
ParmVarDecl *argDecl = *setterMethod->param_begin();
QualType argType = argDecl->getType().getNonReferenceType();
DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
argType.getUnqualifiedType(), CK_LValueToRValue,
&arg, VK_RValue);
// The property type can differ from the ivar type in some situations with
// Objective-C pointer types, we can always bit cast the RHS in these cases.
// The following absurdity is just to ensure well-formed IR.
CastKind argCK = CK_NoOp;
if (ivarRef.getType()->isObjCObjectPointerType()) {
if (argLoad.getType()->isObjCObjectPointerType())
argCK = CK_BitCast;
else if (argLoad.getType()->isBlockPointerType())
argCK = CK_BlockPointerToObjCPointerCast;
else
argCK = CK_CPointerToObjCPointerCast;
} else if (ivarRef.getType()->isBlockPointerType()) {
if (argLoad.getType()->isBlockPointerType())
argCK = CK_BitCast;
else
argCK = CK_AnyPointerToBlockPointerCast;
} else if (ivarRef.getType()->isPointerType()) {
argCK = CK_BitCast;
}
ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
ivarRef.getType(), argCK, &argLoad,
VK_RValue);
Expr *finalArg = &argLoad;
if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
argLoad.getType()))
finalArg = &argCast;
BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
ivarRef.getType(), VK_RValue, OK_Ordinary,
SourceLocation(), false);
EmitStmt(&assign);
}
/// \brief Generate an Objective-C property setter function.
///
/// The given Decl must be an ObjCImplementationDecl. \@synthesize
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
llvm::Constant *AtomicHelperFn =
CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
StartObjCMethod(OMD, IMP->getClassInterface());
generateObjCSetterBody(IMP, PID, AtomicHelperFn);
FinishFunction();
}
namespace {
struct DestroyIvar : EHScopeStack::Cleanup {
private:
llvm::Value *addr;
const ObjCIvarDecl *ivar;
CodeGenFunction::Destroyer *destroyer;
bool useEHCleanupForArray;
public:
DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
CodeGenFunction::Destroyer *destroyer,
bool useEHCleanupForArray)
: addr(addr), ivar(ivar), destroyer(destroyer),
useEHCleanupForArray(useEHCleanupForArray) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
LValue lvalue
= CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
}
/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
static void destroyARCStrongWithStore(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
llvm::Value *null = getNullForVariable(addr);
CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
}
static void emitCXXDestructMethod(CodeGenFunction &CGF,
ObjCImplementationDecl *impl) {
CodeGenFunction::RunCleanupsScope scope(CGF);
llvm::Value *self = CGF.LoadObjCSelf();
const ObjCInterfaceDecl *iface = impl->getClassInterface();
for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
ivar; ivar = ivar->getNextIvar()) {
QualType type = ivar->getType();
// Check whether the ivar is a destructible type.
QualType::DestructionKind dtorKind = type.isDestructedType();
if (!dtorKind) continue;
CodeGenFunction::Destroyer *destroyer = nullptr;
// Use a call to objc_storeStrong to destroy strong ivars, for the
// general benefit of the tools.
if (dtorKind == QualType::DK_objc_strong_lifetime) {
destroyer = destroyARCStrongWithStore;
// Otherwise use the default for the destruction kind.
} else {
destroyer = CGF.getDestroyer(dtorKind);
}
CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
cleanupKind & EHCleanup);
}
assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
}
void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
StartObjCMethod(MD, IMP->getClassInterface());
// Emit .cxx_construct.
if (ctor) {
// Suppress the final autorelease in ARC.
AutoreleaseResult = false;
for (const auto *IvarInit : IMP->inits()) {
FieldDecl *Field = IvarInit->getAnyMember();
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
LoadObjCSelf(), Ivar, 0);
EmitAggExpr(IvarInit->getInit(),
AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
}
// constructor returns 'self'.
CodeGenTypes &Types = CGM.getTypes();
QualType IdTy(CGM.getContext().getObjCIdType());
llvm::Value *SelfAsId =
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
// Emit .cxx_destruct.
} else {
emitCXXDestructMethod(*this, IMP);
}
FinishFunction();
}
bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
it++; it++;
const ABIArgInfo &AI = it->info;
// FIXME. Is this sufficient check?
return (AI.getKind() == ABIArgInfo::Indirect);
}
bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
return false;
if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
return FDTTy->getDecl()->hasObjectMember();
return false;
}
llvm::Value *CodeGenFunction::LoadObjCSelf() {
VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
Self->getType(), VK_LValue, SourceLocation());
return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
}
QualType CodeGenFunction::TypeOfSelfObject() {
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
getContext().getCanonicalType(selfDecl->getType()));
return PTy->getPointeeType();
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Constant *EnumerationMutationFn =
CGM.getObjCRuntime().EnumerationMutationFunction();
if (!EnumerationMutationFn) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
}
CGDebugInfo *DI = getDebugInfo();
if (DI)
DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
// The local variable comes into scope immediately.
AutoVarEmission variable = AutoVarEmission::invalid();
if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
// Fast enumeration state.
QualType StateTy = CGM.getObjCFastEnumerationStateType();
llvm::AllocaInst *StatePtr = CreateMemTemp(StateTy, "state.ptr");
EmitNullInitialization(StatePtr, StateTy);
// Number of elements in the items array.
static const unsigned NumItems = 16;
// Fetch the countByEnumeratingWithState:objects:count: selector.
IdentifierInfo *II[] = {
&CGM.getContext().Idents.get("countByEnumeratingWithState"),
&CGM.getContext().Idents.get("objects"),
&CGM.getContext().Idents.get("count")
};
Selector FastEnumSel =
CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
QualType ItemsTy =
getContext().getConstantArrayType(getContext().getObjCIdType(),
llvm::APInt(32, NumItems),
ArrayType::Normal, 0);
llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
// Emit the collection pointer. In ARC, we do a retain.
llvm::Value *Collection;
if (getLangOpts().ObjCAutoRefCount) {
Collection = EmitARCRetainScalarExpr(S.getCollection());
// Enter a cleanup to do the release.
EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
} else {
Collection = EmitScalarExpr(S.getCollection());
}
// The 'continue' label needs to appear within the cleanup for the
// collection object.
JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
// Send it our message:
CallArgList Args;
// The first argument is a temporary of the enumeration-state type.
Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
// The second argument is a temporary array with space for NumItems
// pointers. We'll actually be loading elements from the array
// pointer written into the control state; this buffer is so that
// collections that *aren't* backed by arrays can still queue up
// batches of elements.
Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
// The third argument is the capacity of that temporary array.
llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
Args.add(RValue::get(Count), getContext().UnsignedLongTy);
// Start the enumeration.
RValue CountRV =
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
// The initial number of objects that were returned in the buffer.
llvm::Value *initialBufferLimit = CountRV.getScalarVal();
llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
// If the limit pointer was zero to begin with, the collection is
// empty; skip all this. Set the branch weight assuming this has the same
// probability of exiting the loop as any other loop exit.
uint64_t EntryCount = getCurrentProfileCount();
Builder.CreateCondBr(
Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB,
LoopInitBB,
createProfileWeights(EntryCount, getProfileCount(S.getBody())));
// Otherwise, initialize the loop.
EmitBlock(LoopInitBB);
// Save the initial mutations value. This is the value at an
// address that was written into the state object by
// countByEnumeratingWithState:objects:count:.
llvm::Value *StateMutationsPtrPtr = Builder.CreateStructGEP(
StatePtr->getAllocatedType(), StatePtr, 2, "mutationsptr.ptr");
llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
"mutationsptr");
llvm::Value *initialMutations =
Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
// Start looping. This is the point we return to whenever we have a
// fresh, non-empty batch of objects.
llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
EmitBlock(LoopBodyBB);
// The current index into the buffer.
llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
index->addIncoming(zero, LoopInitBB);
// The current buffer size.
llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
count->addIncoming(initialBufferLimit, LoopInitBB);
incrementProfileCounter(&S);
// Check whether the mutations value has changed from where it was
// at start. StateMutationsPtr should actually be invariant between
// refreshes.
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
llvm::Value *currentMutations
= Builder.CreateLoad(StateMutationsPtr, "statemutations");
llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
WasNotMutatedBB, WasMutatedBB);
// If so, call the enumeration-mutation function.
EmitBlock(WasMutatedBB);
llvm::Value *V =
Builder.CreateBitCast(Collection,
ConvertType(getContext().getObjCIdType()));
CallArgList Args2;
Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
EmitCall(CGM.getTypes().arrangeFreeFunctionCall(getContext().VoidTy, Args2,
FunctionType::ExtInfo(),
RequiredArgs::All),
EnumerationMutationFn, ReturnValueSlot(), Args2);
// Otherwise, or if the mutation function returns, just continue.
EmitBlock(WasNotMutatedBB);
// Initialize the element variable.
RunCleanupsScope elementVariableScope(*this);
bool elementIsVariable;
LValue elementLValue;
QualType elementType;
if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
// Initialize the variable, in case it's a __block variable or something.
EmitAutoVarInit(variable);
const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
VK_LValue, SourceLocation());
elementLValue = EmitLValue(&tempDRE);
elementType = D->getType();
elementIsVariable = true;
if (D->isARCPseudoStrong())
elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
} else {
elementLValue = LValue(); // suppress warning
elementType = cast<Expr>(S.getElement())->getType();
elementIsVariable = false;
}
llvm::Type *convertedElementType = ConvertType(elementType);
// Fetch the buffer out of the enumeration state.
// TODO: this pointer should actually be invariant between
// refreshes, which would help us do certain loop optimizations.
llvm::Value *StateItemsPtr = Builder.CreateStructGEP(
StatePtr->getAllocatedType(), StatePtr, 1, "stateitems.ptr");
llvm::Value *EnumStateItems =
Builder.CreateLoad(StateItemsPtr, "stateitems");
// Fetch the value at the current index from the buffer.
llvm::Value *CurrentItemPtr =
Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
// Cast that value to the right type.
CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
"currentitem");
// Make sure we have an l-value. Yes, this gets evaluated every
// time through the loop.
if (!elementIsVariable) {
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
} else {
EmitScalarInit(CurrentItem, elementLValue);
}
// If we do have an element variable, this assignment is the end of
// its initialization.
if (elementIsVariable)
EmitAutoVarCleanups(variable);
// Perform the loop body, setting up break and continue labels.
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
{
RunCleanupsScope Scope(*this);
EmitStmt(S.getBody());
}
BreakContinueStack.pop_back();
// Destroy the element variable now.
elementVariableScope.ForceCleanup();
// Check whether there are more elements.
EmitBlock(AfterBody.getBlock());
llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
// First we check in the local buffer.
llvm::Value *indexPlusOne
= Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
// If we haven't overrun the buffer yet, we can continue.
// Set the branch weights based on the simplifying assumption that this is
// like a while-loop, i.e., ignoring that the false branch fetches more
// elements and then returns to the loop.
Builder.CreateCondBr(
Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB,
createProfileWeights(getProfileCount(S.getBody()), EntryCount));
index->addIncoming(indexPlusOne, AfterBody.getBlock());
count->addIncoming(count, AfterBody.getBlock());
// Otherwise, we have to fetch more elements.
EmitBlock(FetchMoreBB);
CountRV =
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
// If we got a zero count, we're done.
llvm::Value *refetchCount = CountRV.getScalarVal();
// (note that the message send might split FetchMoreBB)
index->addIncoming(zero, Builder.GetInsertBlock());
count->addIncoming(refetchCount, Builder.GetInsertBlock());
Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
EmptyBB, LoopBodyBB);
// No more elements.
EmitBlock(EmptyBB);
if (!elementIsVariable) {
// If the element was not a declaration, set it to be null.
llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
EmitStoreThroughLValue(RValue::get(null), elementLValue);
}
if (DI)
DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
// Leave the cleanup we entered in ARC.
if (getLangOpts().ObjCAutoRefCount)
PopCleanupBlock();
EmitBlock(LoopEnd.getBlock());
}
void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
CGM.getObjCRuntime().EmitTryStmt(*this, S);
}
void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
CGM.getObjCRuntime().EmitThrowStmt(*this, S);
}
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
const ObjCAtSynchronizedStmt &S) {
CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
/// Produce the code for a CK_ARCProduceObject. Just does a
/// primitive retain.
llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
llvm::Value *value) {
return EmitARCRetain(type, value);
}
namespace {
struct CallObjCRelease : EHScopeStack::Cleanup {
CallObjCRelease(llvm::Value *object) : object(object) {}
llvm::Value *object;
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Releases at the end of the full-expression are imprecise.
CGF.EmitARCRelease(object, ARCImpreciseLifetime);
}
};
}
/// Produce the code for a CK_ARCConsumeObject. Does a primitive
/// release at the end of the full-expression.
llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
llvm::Value *object) {
// If we're in a conditional branch, we need to make the cleanup
// conditional.
pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
return object;
}
llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
llvm::Value *value) {
return EmitARCRetainAutorelease(type, value);
}
/// Given a number of pointers, inform the optimizer that they're
/// being intrinsically used up until this point in the program.
void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
llvm::Constant *&fn = CGM.getARCEntrypoints().clang_arc_use;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(CGM.VoidTy, None, true);
fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
}
// This isn't really a "runtime" function, but as an intrinsic it
// doesn't really matter as long as we align things up.
EmitNounwindRuntimeCall(fn, values);
}
static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
llvm::FunctionType *type,
StringRef fnName) {
llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
// If the target runtime doesn't naturally support ARC, emit weak
// references to the runtime support library. We don't really
// permit this to fail, but we need a particular relocation style.
if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
f->setLinkage(llvm::Function::ExternalWeakLinkage);
} else if (fnName == "objc_retain" || fnName == "objc_release") {
// If we have Native ARC, set nonlazybind attribute for these APIs for
// performance.
f->addFnAttr(llvm::Attribute::NonLazyBind);
}
}
return fn;
}
/// Perform an operation having the signature
/// i8* (i8*)
/// where a null input causes a no-op and returns null.
static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
llvm::Value *value,
llvm::Constant *&fn,
StringRef fnName,
bool isTailCall = false) {
if (isa<llvm::ConstantPointerNull>(value)) return value;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
// Cast the argument to 'id'.
llvm::Type *origType = value->getType();
value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
// Call the function.
llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value);
if (isTailCall)
call->setTailCall();
// Cast the result back to the original type.
return CGF.Builder.CreateBitCast(call, origType);
}
/// Perform an operation having the following signature:
/// i8* (i8**)
static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
llvm::Value *addr,
llvm::Constant *&fn,
StringRef fnName) {
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
// Cast the argument to 'id*'.
llvm::Type *origType = addr->getType();
addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
// Call the function.
llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr);
// Cast the result back to a dereference of the original type.
if (origType != CGF.Int8PtrPtrTy)
result = CGF.Builder.CreateBitCast(result,
cast<llvm::PointerType>(origType)->getElementType());
return result;
}
/// Perform an operation having the following signature:
/// i8* (i8**, i8*)
static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
llvm::Value *addr,
llvm::Value *value,
llvm::Constant *&fn,
StringRef fnName,
bool ignored) {
assert(cast<llvm::PointerType>(addr->getType())->getElementType()
== value->getType());
if (!fn) {
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
llvm::Type *origType = value->getType();
llvm::Value *args[] = {
CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy),
CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy)
};
llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args);
if (ignored) return nullptr;
return CGF.Builder.CreateBitCast(result, origType);
}
/// Perform an operation having the following signature:
/// void (i8**, i8**)
static void emitARCCopyOperation(CodeGenFunction &CGF,
llvm::Value *dst,
llvm::Value *src,
llvm::Constant *&fn,
StringRef fnName) {
assert(dst->getType() == src->getType());
if (!fn) {
llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy };
llvm::FunctionType *fnType
= llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
}
llvm::Value *args[] = {
CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy),
CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy)
};
CGF.EmitNounwindRuntimeCall(fn, args);
}
/// Produce the code to do a retain. Based on the type, calls one of:
/// call i8* \@objc_retain(i8* %value)
/// call i8* \@objc_retainBlock(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
if (type->isBlockPointerType())
return EmitARCRetainBlock(value, /*mandatory*/ false);
else
return EmitARCRetainNonBlock(value);
}
/// Retain the given object, with normal retain semantics.
/// call i8* \@objc_retain(i8* %value)
llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_retain,
"objc_retain");
}
/// Retain the given block, with _Block_copy semantics.
/// call i8* \@objc_retainBlock(i8* %value)
///
/// \param mandatory - If false, emit the call with metadata
/// indicating that it's okay for the optimizer to eliminate this call
/// if it can prove that the block never escapes except down the stack.
llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
bool mandatory) {
llvm::Value *result
= emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_retainBlock,
"objc_retainBlock");
// If the copy isn't mandatory, add !clang.arc.copy_on_escape to
// tell the optimizer that it doesn't need to do this copy if the
// block doesn't escape, where being passed as an argument doesn't
// count as escaping.
if (!mandatory && isa<llvm::Instruction>(result)) {
llvm::CallInst *call
= cast<llvm::CallInst>(result->stripPointerCasts());
assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
call->setMetadata("clang.arc.copy_on_escape",
llvm::MDNode::get(Builder.getContext(), None));
}
return result;
}
/// Retain the given object which is the result of a function call.
/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
///
/// Yes, this function name is one character away from a different
/// call with completely different semantics.
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
// Fetch the void(void) inline asm which marks that we're going to
// retain the autoreleased return value.
llvm::InlineAsm *&marker
= CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
if (!marker) {
StringRef assembly
= CGM.getTargetCodeGenInfo()
.getARCRetainAutoreleasedReturnValueMarker();
// If we have an empty assembly string, there's nothing to do.
if (assembly.empty()) {
// Otherwise, at -O0, build an inline asm that we're going to call
// in a moment.
} else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
llvm::FunctionType *type =
llvm::FunctionType::get(VoidTy, /*variadic*/false);
marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
// If we're at -O1 and above, we don't want to litter the code
// with this marker yet, so leave a breadcrumb for the ARC
// optimizer to pick up.
} else {
llvm::NamedMDNode *metadata =
CGM.getModule().getOrInsertNamedMetadata(
"clang.arc.retainAutoreleasedReturnValueMarker");
assert(metadata->getNumOperands() <= 1);
if (metadata->getNumOperands() == 0) {
metadata->addOperand(llvm::MDNode::get(
getLLVMContext(), llvm::MDString::get(getLLVMContext(), assembly)));
}
}
}
// Call the marker asm if we made one, which we do only at -O0.
if (marker)
Builder.CreateCall(marker);
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
"objc_retainAutoreleasedReturnValue");
}
/// Release the given object.
/// call void \@objc_release(i8* %value)
void CodeGenFunction::EmitARCRelease(llvm::Value *value,
ARCPreciseLifetime_t precise) {
if (isa<llvm::ConstantPointerNull>(value)) return;
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
}
// Cast the argument to 'id'.
value = Builder.CreateBitCast(value, Int8PtrTy);
// Call objc_release.
llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
if (precise == ARCImpreciseLifetime) {
call->setMetadata("clang.imprecise_release",
llvm::MDNode::get(Builder.getContext(), None));
}
}
/// Destroy a __strong variable.
///
/// At -O0, emit a call to store 'null' into the address;
/// instrumenting tools prefer this because the address is exposed,
/// but it's relatively cumbersome to optimize.
///
/// At -O1 and above, just load and call objc_release.
///
/// call void \@objc_storeStrong(i8** %addr, i8* null)
void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr,
ARCPreciseLifetime_t precise) {
if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
llvm::PointerType *addrTy = cast<llvm::PointerType>(addr->getType());
llvm::Value *null = llvm::ConstantPointerNull::get(
cast<llvm::PointerType>(addrTy->getElementType()));
EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
return;
}
llvm::Value *value = Builder.CreateLoad(addr);
EmitARCRelease(value, precise);
}
/// Store into a strong object. Always calls this:
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
llvm::Value *value,
bool ignored) {
assert(cast<llvm::PointerType>(addr->getType())->getElementType()
== value->getType());
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
if (!fn) {
llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
llvm::FunctionType *fnType
= llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
}
llvm::Value *args[] = {
Builder.CreateBitCast(addr, Int8PtrPtrTy),
Builder.CreateBitCast(value, Int8PtrTy)
};
EmitNounwindRuntimeCall(fn, args);
if (ignored) return nullptr;
return value;
}
/// Store into a strong object. Sometimes calls this:
/// call void \@objc_storeStrong(i8** %addr, i8* %value)
/// Other times, breaks it down into components.
llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
llvm::Value *newValue,
bool ignored) {
QualType type = dst.getType();
bool isBlock = type->isBlockPointerType();
// Use a store barrier at -O0 unless this is a block type or the
// lvalue is inadequately aligned.
if (shouldUseFusedARCCalls() &&
!isBlock &&
(dst.getAlignment().isZero() ||
dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
}
// Otherwise, split it out.
// Retain the new value.
newValue = EmitARCRetain(type, newValue);
// Read the old value.
llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation());
// Store. We do this before the release so that any deallocs won't
// see the old value.
EmitStoreOfScalar(newValue, dst);
// Finally, release the old value.
EmitARCRelease(oldValue, dst.isARCPreciseLifetime());
return newValue;
}
/// Autorelease the given object.
/// call i8* \@objc_autorelease(i8* %value)
llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_autorelease,
"objc_autorelease");
}
/// Autorelease the given object.
/// call i8* \@objc_autoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
"objc_autoreleaseReturnValue",
/*isTailCall*/ true);
}
/// Do a fused retain/autorelease of the given object.
/// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
"objc_retainAutoreleaseReturnValue",
/*isTailCall*/ true);
}
/// Do a fused retain/autorelease of the given object.
/// call i8* \@objc_retainAutorelease(i8* %value)
/// or
/// %retain = call i8* \@objc_retainBlock(i8* %value)
/// call i8* \@objc_autorelease(i8* %retain)
llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
llvm::Value *value) {
if (!type->isBlockPointerType())
return EmitARCRetainAutoreleaseNonBlock(value);
if (isa<llvm::ConstantPointerNull>(value)) return value;
llvm::Type *origType = value->getType();
value = Builder.CreateBitCast(value, Int8PtrTy);
value = EmitARCRetainBlock(value, /*mandatory*/ true);
value = EmitARCAutorelease(value);
return Builder.CreateBitCast(value, origType);
}
/// Do a fused retain/autorelease of the given object.
/// call i8* \@objc_retainAutorelease(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
return emitARCValueOperation(*this, value,
CGM.getARCEntrypoints().objc_retainAutorelease,
"objc_retainAutorelease");
}
/// i8* \@objc_loadWeak(i8** %addr)
/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
return emitARCLoadOperation(*this, addr,
CGM.getARCEntrypoints().objc_loadWeak,
"objc_loadWeak");
}
/// i8* \@objc_loadWeakRetained(i8** %addr)
llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
return emitARCLoadOperation(*this, addr,
CGM.getARCEntrypoints().objc_loadWeakRetained,
"objc_loadWeakRetained");
}
/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
/// Returns %value.
llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
llvm::Value *value,
bool ignored) {
return emitARCStoreOperation(*this, addr, value,
CGM.getARCEntrypoints().objc_storeWeak,
"objc_storeWeak", ignored);
}
/// i8* \@objc_initWeak(i8** %addr, i8* %value)
/// Returns %value. %addr is known to not have a current weak entry.
/// Essentially equivalent to:
/// *addr = nil; objc_storeWeak(addr, value);
void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
// If we're initializing to null, just write null to memory; no need
// to get the runtime involved. But don't do this if optimization
// is enabled, because accounting for this would make the optimizer
// much more complicated.
if (isa<llvm::ConstantPointerNull>(value) &&
CGM.getCodeGenOpts().OptimizationLevel == 0) {
Builder.CreateStore(value, addr);
return;
}
emitARCStoreOperation(*this, addr, value,
CGM.getARCEntrypoints().objc_initWeak,
"objc_initWeak", /*ignored*/ true);
}
/// void \@objc_destroyWeak(i8** %addr)
/// Essentially objc_storeWeak(addr, nil).
void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
}
// Cast the argument to 'id*'.
addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
EmitNounwindRuntimeCall(fn, addr);
}
/// void \@objc_moveWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Leaves %src pointing to nothing.
/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
emitARCCopyOperation(*this, dst, src,
CGM.getARCEntrypoints().objc_moveWeak,
"objc_moveWeak");
}
/// void \@objc_copyWeak(i8** %dest, i8** %src)
/// Disregards the current value in %dest. Essentially
/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
emitARCCopyOperation(*this, dst, src,
CGM.getARCEntrypoints().objc_copyWeak,
"objc_copyWeak");
}
/// Produce the code to do a objc_autoreleasepool_push.
/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Int8PtrTy, false);
fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
}
return EmitNounwindRuntimeCall(fn);
}
/// Produce the code to do a primitive release.
/// call void \@objc_autoreleasePoolPop(i8* %ptr)
void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
assert(value->getType() == Int8PtrTy);
llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
if (!fn) {
llvm::FunctionType *fnType =
llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false);
// We don't want to use a weak import here; instead we should not
// fall into this path.
fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
}
// objc_autoreleasePoolPop can throw.
EmitRuntimeCallOrInvoke(fn, value);
}
/// Produce the code to do an MRR version objc_autoreleasepool_push.
/// Which is: [[NSAutoreleasePool alloc] init];
/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
/// init is declared as: - (id) init; in its NSObject super class.
///
llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this);
// [NSAutoreleasePool alloc]
IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
Selector AllocSel = getContext().Selectors.getSelector(0, &II);
CallArgList Args;
RValue AllocRV =
Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
getContext().getObjCIdType(),
AllocSel, Receiver, Args);
// [Receiver init]
Receiver = AllocRV.getScalarVal();
II = &CGM.getContext().Idents.get("init");
Selector InitSel = getContext().Selectors.getSelector(0, &II);
RValue InitRV =
Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
getContext().getObjCIdType(),
InitSel, Receiver, Args);
return InitRV.getScalarVal();
}
/// Produce the code to do a primitive release.
/// [tmp drain];
void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
Selector DrainSel = getContext().Selectors.getSelector(0, &II);
CallArgList Args;
CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().VoidTy, DrainSel, Arg, Args);
}
void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime);
}
void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime);
}
void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
llvm::Value *addr,
QualType type) {
CGF.EmitARCDestroyWeak(addr);
}
namespace {
struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
llvm::Value *Token;
CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitObjCAutoreleasePoolPop(Token);
}
};
struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
llvm::Value *Token;
CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitObjCMRRAutoreleasePoolPop(Token);
}
};
}
void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
if (CGM.getLangOpts().ObjCAutoRefCount)
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
else
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
}
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
LValue lvalue,
QualType type) {
switch (type.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
return TryEmitResult(CGF.EmitLoadOfLValue(lvalue,
SourceLocation()).getScalarVal(),
false);
case Qualifiers::OCL_Weak:
return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
true);
}
llvm_unreachable("impossible lifetime!");
}
static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
const Expr *e) {
e = e->IgnoreParens();
QualType type = e->getType();
// If we're loading retained from a __strong xvalue, we can avoid
// an extra retain/release pair by zeroing out the source of this
// "move" operation.
if (e->isXValue() &&
!type.isConstQualified() &&
type.getObjCLifetime() == Qualifiers::OCL_Strong) {
// Emit the lvalue.
LValue lv = CGF.EmitLValue(e);
// Load the object pointer.
llvm::Value *result = CGF.EmitLoadOfLValue(lv,
SourceLocation()).getScalarVal();
// Set the source pointer to NULL.
CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
return TryEmitResult(result, true);
}
// As a very special optimization, in ARC++, if the l-value is the
// result of a non-volatile assignment, do a simple retain of the
// result of the call to objc_storeWeak instead of reloading.
if (CGF.getLangOpts().CPlusPlus &&
!type.isVolatileQualified() &&
type.getObjCLifetime() == Qualifiers::OCL_Weak &&
isa<BinaryOperator>(e) &&
cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
return TryEmitResult(CGF.EmitScalarExpr(e), false);
return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
}
static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
llvm::Value *value);
/// Given that the given expression is some sort of call (which does
/// not return retained), emit a retain following it.
static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
llvm::Value *value = CGF.EmitScalarExpr(e);
return emitARCRetainAfterCall(CGF, value);
}
static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
llvm::Value *value) {
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
// Place the retain immediately following the call.
CGF.Builder.SetInsertPoint(call->getParent(),
++llvm::BasicBlock::iterator(call));
value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
CGF.Builder.restoreIP(ip);
return value;
} else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
// Place the retain at the beginning of the normal destination block.
llvm::BasicBlock *BB = invoke->getNormalDest();
CGF.Builder.SetInsertPoint(BB, BB->begin());
value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
CGF.Builder.restoreIP(ip);
return value;
// Bitcasts can arise because of related-result returns. Rewrite
// the operand.
} else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
llvm::Value *operand = bitcast->getOperand(0);
operand = emitARCRetainAfterCall(CGF, operand);
bitcast->setOperand(0, operand);
return bitcast;
// Generic fall-back case.
} else {
// Retain using the non-block variant: we never need to do a copy
// of a block that's been returned to us.
return CGF.EmitARCRetainNonBlock(value);
}
}
/// Determine whether it might be important to emit a separate
/// objc_retain_block on the result of the given expression, or
/// whether it's okay to just emit it in a +1 context.
static bool shouldEmitSeparateBlockRetain(const Expr *e) {
assert(e->getType()->isBlockPointerType());
e = e->IgnoreParens();
// For future goodness, emit block expressions directly in +1
// contexts if we can.
if (isa<BlockExpr>(e))
return false;
if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
switch (cast->getCastKind()) {
// Emitting these operations in +1 contexts is goodness.
case CK_LValueToRValue:
case CK_ARCReclaimReturnedObject:
case CK_ARCConsumeObject:
case CK_ARCProduceObject:
return false;
// These operations preserve a block type.
case CK_NoOp:
case CK_BitCast:
return shouldEmitSeparateBlockRetain(cast->getSubExpr());
// These operations are known to be bad (or haven't been considered).
case CK_AnyPointerToBlockPointerCast:
default:
return true;
}
}
return true;
}
/// Try to emit a PseudoObjectExpr at +1.
///
/// This massively duplicates emitPseudoObjectRValue.
static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
const PseudoObjectExpr *E) {
SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
// Find the result expression.
const Expr *resultExpr = E->getResultExpr();
assert(resultExpr);
TryEmitResult result;
for (PseudoObjectExpr::const_semantics_iterator
i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
const Expr *semantic = *i;
// If this semantic expression is an opaque value, bind it
// to the result of its source expression.
if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
typedef CodeGenFunction::OpaqueValueMappingData OVMA;
OVMA opaqueData;
// If this semantic is the result of the pseudo-object
// expression, try to evaluate the source as +1.
if (ov == resultExpr) {
assert(!OVMA::shouldBindAsLValue(ov));
result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
// Otherwise, just bind it.
} else {
opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
}
opaques.push_back(opaqueData);
// Otherwise, if the expression is the result, evaluate it
// and remember the result.
} else if (semantic == resultExpr) {
result = tryEmitARCRetainScalarExpr(CGF, semantic);
// Otherwise, evaluate the expression in an ignored context.
} else {
CGF.EmitIgnoredExpr(semantic);
}
}
// Unbind all the opaques now.
for (unsigned i = 0, e = opaques.size(); i != e; ++i)
opaques[i].unbind(CGF);
return result;
}
static TryEmitResult
tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
// We should *never* see a nested full-expression here, because if
// we fail to emit at +1, our caller must not retain after we close
// out the full-expression.
assert(!isa<ExprWithCleanups>(e));
// The desired result type, if it differs from the type of the
// ultimate opaque expression.
llvm::Type *resultType = nullptr;
while (true) {
e = e->IgnoreParens();
// There's a break at the end of this if-chain; anything
// that wants to keep looping has to explicitly continue.
if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
switch (ce->getCastKind()) {
// No-op casts don't change the type, so we just ignore them.
case CK_NoOp:
e = ce->getSubExpr();
continue;
case CK_LValueToRValue: {
TryEmitResult loadResult
= tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
if (resultType) {
llvm::Value *value = loadResult.getPointer();
value = CGF.Builder.CreateBitCast(value, resultType);
loadResult.setPointer(value);
}
return loadResult;
}
// These casts can change the type, so remember that and
// soldier on. We only need to remember the outermost such
// cast, though.
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
case CK_BitCast:
if (!resultType)
resultType = CGF.ConvertType(ce->getType());
e = ce->getSubExpr();
assert(e->getType()->hasPointerRepresentation());
continue;
// For consumptions, just emit the subexpression and thus elide
// the retain/release pair.
case CK_ARCConsumeObject: {
llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
}
// Block extends are net +0. Naively, we could just recurse on
// the subexpression, but actually we need to ensure that the
// value is copied as a block, so there's a little filter here.
case CK_ARCExtendBlockObject: {
llvm::Value *result; // will be a +0 value
// If we can't safely assume the sub-expression will produce a
// block-copied value, emit the sub-expression at +0.
if (shouldEmitSeparateBlockRetain(ce->getSubExpr())) {
result = CGF.EmitScalarExpr(ce->getSubExpr());
// Otherwise, try to emit the sub-expression at +1 recursively.
} else {
TryEmitResult subresult
= tryEmitARCRetainScalarExpr(CGF, ce->getSubExpr());
result = subresult.getPointer();
// If that produced a retained value, just use that,
// possibly casting down.
if (subresult.getInt()) {
if (resultType)
result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
}
// Otherwise it's +0.
}
// Retain the object as a block, then cast down.
result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
}
// For reclaims, emit the subexpression as a retained call and
// skip the consumption.
case CK_ARCReclaimReturnedObject: {
llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
}
default:
break;
}
// Skip __extension__.
} else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
if (op->getOpcode() == UO_Extension) {
e = op->getSubExpr();
continue;
}
// For calls and message sends, use the retained-call logic.
// Delegate inits are a special case in that they're the only
// returns-retained expression that *isn't* surrounded by
// a consume.
} else if (isa<CallExpr>(e) ||
(isa<ObjCMessageExpr>(e) &&
!cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
llvm::Value *result = emitARCRetainCall(CGF, e);
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, true);
// Look through pseudo-object expressions.
} else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
TryEmitResult result
= tryEmitARCRetainPseudoObject(CGF, pseudo);
if (resultType) {
llvm::Value *value = result.getPointer();
value = CGF.Builder.CreateBitCast(value, resultType);
result.setPointer(value);
}
return result;
}
// Conservatively halt the search at any other expression kind.
break;
}
// We didn't find an obvious production, so emit what we've got and
// tell the caller that we didn't manage to retain.
llvm::Value *result = CGF.EmitScalarExpr(e);
if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
return TryEmitResult(result, false);
}
static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
LValue lvalue,
QualType type) {
TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
llvm::Value *value = result.getPointer();
if (!result.getInt())
value = CGF.EmitARCRetain(type, value);
return value;
}
/// EmitARCRetainScalarExpr - Semantically equivalent to
/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
/// best-effort attempt to peephole expressions that naturally produce
/// retained objects.
llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainScalarExpr(cleanups->getSubExpr());
}
TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
llvm::Value *value = result.getPointer();
if (!result.getInt())
value = EmitARCRetain(e->getType(), value);
return value;
}
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
// The retain needs to happen within the full-expression.
if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
enterFullExpression(cleanups);
RunCleanupsScope scope(*this);
return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr());
}
TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
llvm::Value *value = result.getPointer();
if (result.getInt())
value = EmitARCAutorelease(value);
else
value = EmitARCRetainAutorelease(e->getType(), value);
return value;
}
llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
llvm::Value *result;
bool doRetain;
if (shouldEmitSeparateBlockRetain(e)) {
result = EmitScalarExpr(e);
doRetain = true;
} else {
TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
result = subresult.getPointer();
doRetain = !subresult.getInt();
}
if (doRetain)
result = EmitARCRetainBlock(result, /*mandatory*/ true);
return EmitObjCConsumeObject(e->getType(), result);
}
llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
// In ARC, retain and autorelease the expression.
if (getLangOpts().ObjCAutoRefCount) {
// Do so before running any cleanups for the full-expression.
// EmitARCRetainAutoreleaseScalarExpr does this for us.
return EmitARCRetainAutoreleaseScalarExpr(expr);
}
// Otherwise, use the normal scalar-expression emission. The
// exception machinery doesn't do anything special with the
// exception like retaining it, so there's no safety associated with
// only running cleanups after the throw has started, and when it
// matters it tends to be substantially inferior code.
return EmitScalarExpr(expr);
}
std::pair<LValue,llvm::Value*>
CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
bool ignored) {
// Evaluate the RHS first.
TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
llvm::Value *value = result.getPointer();
bool hasImmediateRetain = result.getInt();
// If we didn't emit a retained object, and the l-value is of block
// type, then we need to emit the block-retain immediately in case
// it invalidates the l-value.
if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
value = EmitARCRetainBlock(value, /*mandatory*/ false);
hasImmediateRetain = true;
}
LValue lvalue = EmitLValue(e->getLHS());
// If the RHS was emitted retained, expand this.
if (hasImmediateRetain) {
llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation());
EmitStoreOfScalar(value, lvalue);
EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
} else {
value = EmitARCStoreStrong(lvalue, value, ignored);
}
return std::pair<LValue,llvm::Value*>(lvalue, value);
}
std::pair<LValue,llvm::Value*>
CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
LValue lvalue = EmitLValue(e->getLHS());
EmitStoreOfScalar(value, lvalue);
return std::pair<LValue,llvm::Value*>(lvalue, value);
}
void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
const ObjCAutoreleasePoolStmt &ARPS) {
const Stmt *subStmt = ARPS.getSubStmt();
const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
CGDebugInfo *DI = getDebugInfo();
if (DI)
DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
// Keep track of the current cleanup stack depth.
RunCleanupsScope Scope(*this);
if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
llvm::Value *token = EmitObjCAutoreleasePoolPush();
EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
} else {
llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
}
for (const auto *I : S.body())
EmitStmt(I);
if (DI)
DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
}
/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
// We just use an inline assembly.
llvm::FunctionType *extenderType
= llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
llvm::Value *extender
= llvm::InlineAsm::get(extenderType,
/* assembly */ "",
/* constraints */ "r",
/* side effects */ true);
object = Builder.CreateBitCast(object, VoidPtrTy);
EmitNounwindRuntimeCall(extender, object);
}
/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
/// non-trivial copy assignment function, produce following helper function.
/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
///
llvm::Constant *
CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
if (!getLangOpts().CPlusPlus ||
!getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
return nullptr;
QualType Ty = PID->getPropertyIvarDecl()->getType();
if (!Ty->isRecordType())
return nullptr;
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialSetExpr(PID))
return nullptr;
assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
return HelperFn;
ASTContext &C = getContext();
IdentifierInfo *II
= &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
FunctionDecl *FD = FunctionDecl::Create(C,
C.getTranslationUnitDecl(),
SourceLocation(),
SourceLocation(), II, C.VoidTy,
nullptr, SC_Static,
false,
false);
QualType DestTy = C.getPointerType(Ty);
QualType SrcTy = Ty;
SrcTy.addConst();
SrcTy = C.getPointerType(SrcTy);
FunctionArgList args;
ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy);
args.push_back(&dstDecl);
ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy);
args.push_back(&srcDecl);
const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
C.VoidTy, args, FunctionType::ExtInfo(), RequiredArgs::All);
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__assign_helper_atomic_property_",
&CGM.getModule());
StartFunction(FD, C.VoidTy, Fn, FI, args);
DeclRefExpr DstExpr(&dstDecl, false, DestTy,
VK_RValue, SourceLocation());
UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
VK_LValue, OK_Ordinary, SourceLocation());
DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
VK_RValue, SourceLocation());
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
VK_LValue, OK_Ordinary, SourceLocation());
Expr *Args[2] = { &DST, &SRC };
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
Args, DestTy->getPointeeType(),
VK_LValue, SourceLocation(), false);
EmitStmt(&TheCall);
FinishFunction();
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
return HelperFn;
}
llvm::Constant *
CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
if (!getLangOpts().CPlusPlus ||
!getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
return nullptr;
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
QualType Ty = PD->getType();
if (!Ty->isRecordType())
return nullptr;
if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialGetExpr(PID))
return nullptr;
assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
return HelperFn;
ASTContext &C = getContext();
IdentifierInfo *II
= &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
FunctionDecl *FD = FunctionDecl::Create(C,
C.getTranslationUnitDecl(),
SourceLocation(),
SourceLocation(), II, C.VoidTy,
nullptr, SC_Static,
false,
false);
QualType DestTy = C.getPointerType(Ty);
QualType SrcTy = Ty;
SrcTy.addConst();
SrcTy = C.getPointerType(SrcTy);
FunctionArgList args;
ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy);
args.push_back(&dstDecl);
ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy);
args.push_back(&srcDecl);
const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
C.VoidTy, args, FunctionType::ExtInfo(), RequiredArgs::All);
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__copy_helper_atomic_property_", &CGM.getModule());
StartFunction(FD, C.VoidTy, Fn, FI, args);
DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
VK_RValue, SourceLocation());
UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
VK_LValue, OK_Ordinary, SourceLocation());
CXXConstructExpr *CXXConstExpr =
cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
SmallVector<Expr*, 4> ConstructorArgs;
ConstructorArgs.push_back(&SRC);
ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()),
CXXConstExpr->arg_end());
CXXConstructExpr *TheCXXConstructExpr =
CXXConstructExpr::Create(C, Ty, SourceLocation(),
CXXConstExpr->getConstructor(),
CXXConstExpr->isElidable(),
ConstructorArgs,
CXXConstExpr->hadMultipleCandidates(),
CXXConstExpr->isListInitialization(),
CXXConstExpr->isStdInitListInitialization(),
CXXConstExpr->requiresZeroInitialization(),
CXXConstExpr->getConstructionKind(),
SourceRange());
DeclRefExpr DstExpr(&dstDecl, false, DestTy,
VK_RValue, SourceLocation());
RValue DV = EmitAnyExpr(&DstExpr);
CharUnits Alignment
= getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
EmitAggExpr(TheCXXConstructExpr,
AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
FinishFunction();
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
return HelperFn;
}
llvm::Value *
CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
// Get selectors for retain/autorelease.
IdentifierInfo *CopyID = &getContext().Idents.get("copy");
Selector CopySelector =
getContext().Selectors.getNullarySelector(CopyID);
IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
Selector AutoreleaseSelector =
getContext().Selectors.getNullarySelector(AutoreleaseID);
// Emit calls to retain/autorelease.
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
llvm::Value *Val = Block;
RValue Result;
Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
Ty, CopySelector,
Val, CallArgList(), nullptr, nullptr);
Val = Result.getScalarVal();
Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
Ty, AutoreleaseSelector,
Val, CallArgList(), nullptr, nullptr);
Val = Result.getScalarVal();
return Val;
}
CGObjCRuntime::~CGObjCRuntime() {}
#endif // HLSL Change
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenTBAA.h | //===--- CodeGenTBAA.h - TBAA information for LLVM CodeGen ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the code that manages TBAA information and defines the TBAA policy
// for the optimizer to use.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENTBAA_H
#define LLVM_CLANG_LIB_CODEGEN_CODEGENTBAA_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/MDBuilder.h"
namespace llvm {
class LLVMContext;
class MDNode;
}
namespace clang {
class ASTContext;
class CodeGenOptions;
class LangOptions;
class MangleContext;
class QualType;
class Type;
namespace CodeGen {
class CGRecordLayout;
struct TBAAPathTag {
TBAAPathTag(const Type *B, const llvm::MDNode *A, uint64_t O)
: BaseT(B), AccessN(A), Offset(O) {}
const Type *BaseT;
const llvm::MDNode *AccessN;
uint64_t Offset;
};
/// CodeGenTBAA - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
class CodeGenTBAA {
ASTContext &Context;
const CodeGenOptions &CodeGenOpts;
const LangOptions &Features;
MangleContext &MContext;
// MDHelper - Helper for creating metadata.
llvm::MDBuilder MDHelper;
/// MetadataCache - This maps clang::Types to scalar llvm::MDNodes describing
/// them.
llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
/// This maps clang::Types to a struct node in the type DAG.
llvm::DenseMap<const Type *, llvm::MDNode *> StructTypeMetadataCache;
/// This maps TBAAPathTags to a tag node.
llvm::DenseMap<TBAAPathTag, llvm::MDNode *> StructTagMetadataCache;
/// This maps a scalar type to a scalar tag node.
llvm::DenseMap<const llvm::MDNode *, llvm::MDNode *> ScalarTagMetadataCache;
/// StructMetadataCache - This maps clang::Types to llvm::MDNodes describing
/// them for struct assignments.
llvm::DenseMap<const Type *, llvm::MDNode *> StructMetadataCache;
llvm::MDNode *Root;
llvm::MDNode *Char;
/// getRoot - This is the mdnode for the root of the metadata type graph
/// for this translation unit.
llvm::MDNode *getRoot();
/// getChar - This is the mdnode for "char", which is special, and any types
/// considered to be equivalent to it.
llvm::MDNode *getChar();
/// CollectFields - Collect information about the fields of a type for
/// !tbaa.struct metadata formation. Return false for an unsupported type.
bool CollectFields(uint64_t BaseOffset,
QualType Ty,
SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &Fields,
bool MayAlias);
/// A wrapper function to create a scalar type. For struct-path aware TBAA,
/// the scalar type has the same format as the struct type: name, offset,
/// pointer to another node in the type DAG.
llvm::MDNode *createTBAAScalarType(StringRef Name, llvm::MDNode *Parent);
public:
CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
const CodeGenOptions &CGO,
const LangOptions &Features,
MangleContext &MContext);
~CodeGenTBAA();
/// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
/// of the given type.
llvm::MDNode *getTBAAInfo(QualType QTy);
/// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
/// dereference of a vtable pointer.
llvm::MDNode *getTBAAInfoForVTablePtr();
/// getTBAAStructInfo - Get the TBAAStruct MDNode to be used for a memcpy of
/// the given type.
llvm::MDNode *getTBAAStructInfo(QualType QTy);
/// Get the MDNode in the type DAG for given struct type QType.
llvm::MDNode *getTBAAStructTypeInfo(QualType QType);
/// Get the tag MDNode for a given base type, the actual scalar access MDNode
/// and offset into the base type.
llvm::MDNode *getTBAAStructTagInfo(QualType BaseQType,
llvm::MDNode *AccessNode, uint64_t Offset);
/// Get the scalar tag MDNode for a given scalar type.
llvm::MDNode *getTBAAScalarTagInfo(llvm::MDNode *AccessNode);
};
} // end namespace CodeGen
} // end namespace clang
namespace llvm {
template<> struct DenseMapInfo<clang::CodeGen::TBAAPathTag> {
static clang::CodeGen::TBAAPathTag getEmptyKey() {
return clang::CodeGen::TBAAPathTag(
DenseMapInfo<const clang::Type *>::getEmptyKey(),
DenseMapInfo<const MDNode *>::getEmptyKey(),
DenseMapInfo<uint64_t>::getEmptyKey());
}
static clang::CodeGen::TBAAPathTag getTombstoneKey() {
return clang::CodeGen::TBAAPathTag(
DenseMapInfo<const clang::Type *>::getTombstoneKey(),
DenseMapInfo<const MDNode *>::getTombstoneKey(),
DenseMapInfo<uint64_t>::getTombstoneKey());
}
static unsigned getHashValue(const clang::CodeGen::TBAAPathTag &Val) {
return DenseMapInfo<const clang::Type *>::getHashValue(Val.BaseT) ^
DenseMapInfo<const MDNode *>::getHashValue(Val.AccessN) ^
DenseMapInfo<uint64_t>::getHashValue(Val.Offset);
}
static bool isEqual(const clang::CodeGen::TBAAPathTag &LHS,
const clang::CodeGen::TBAAPathTag &RHS) {
return LHS.BaseT == RHS.BaseT &&
LHS.AccessN == RHS.AccessN &&
LHS.Offset == RHS.Offset;
}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenABITypes.cpp | //==--- CodeGenABITypes.cpp - Convert Clang types to LLVM types for ABI ----==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// CodeGenABITypes is a simple interface for getting LLVM types for
// the parameters and the return value of a function given the Clang
// types.
//
// The class is implemented as a public wrapper around the private
// CodeGenTypes class in lib/CodeGen.
//
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/CodeGenABITypes.h"
#include "CodeGenModule.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
using namespace clang;
using namespace CodeGen;
CodeGenABITypes::CodeGenABITypes(ASTContext &C,
llvm::Module &M,
const llvm::DataLayout &TD,
CoverageSourceInfo *CoverageInfo)
: CGO(new CodeGenOptions),
HSO(new HeaderSearchOptions),
PPO(new PreprocessorOptions),
CGM(new CodeGen::CodeGenModule(C, *HSO, *PPO, *CGO,
M, TD, C.getDiagnostics(),
CoverageInfo)) {
}
CodeGenABITypes::~CodeGenABITypes()
{
delete CGO;
delete CGM;
}
const CGFunctionInfo &
CodeGenABITypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
QualType receiverType) {
return CGM->getTypes().arrangeObjCMessageSendSignature(MD, receiverType);
}
const CGFunctionInfo &
CodeGenABITypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty) {
return CGM->getTypes().arrangeFreeFunctionType(Ty);
}
const CGFunctionInfo &
CodeGenABITypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty) {
return CGM->getTypes().arrangeFreeFunctionType(Ty);
}
const CGFunctionInfo &
CodeGenABITypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
const FunctionProtoType *FTP) {
return CGM->getTypes().arrangeCXXMethodType(RD, FTP);
}
const CGFunctionInfo &
CodeGenABITypes::arrangeFreeFunctionCall(CanQualType returnType,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs args) {
return CGM->getTypes().arrangeLLVMFunctionInfo(
returnType, /*IsInstanceMethod=*/false, /*IsChainCall=*/false, argTypes,
info, args);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp | //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Builder implementation for CGRecordLayout objects.
//
//===----------------------------------------------------------------------===//
#include "CGRecordLayout.h"
#include "CGCXXABI.h"
#include "CodeGenTypes.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace CodeGen;
namespace {
/// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
/// llvm::Type. Some of the lowering is straightforward, some is not. Here we
/// detail some of the complexities and weirdnesses here.
/// * LLVM does not have unions - Unions can, in theory be represented by any
/// llvm::Type with correct size. We choose a field via a specific heuristic
/// and add padding if necessary.
/// * LLVM does not have bitfields - Bitfields are collected into contiguous
/// runs and allocated as a single storage type for the run. ASTRecordLayout
/// contains enough information to determine where the runs break. Microsoft
/// and Itanium follow different rules and use different codepaths.
/// * It is desired that, when possible, bitfields use the appropriate iN type
/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
/// i24. This isn't always possible because i24 has storage size of 32 bit
/// and if it is possible to use that extra byte of padding we must use
/// [i8 x 3] instead of i24. The function clipTailPadding does this.
/// C++ examples that require clipping:
/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
/// struct A { int a : 24; }; // a must be clipped because a struct like B
// could exist: struct B : A { char b; }; // b goes at offset 3
/// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
/// fields. The existing asserts suggest that LLVM assumes that *every* field
/// has an underlying storage type. Therefore empty structures containing
/// zero sized subobjects such as empty records or zero sized arrays still get
/// a zero sized (empty struct) storage type.
/// * Clang reads the complete type rather than the base type when generating
/// code to access fields. Bitfields in tail position with tail padding may
/// be clipped in the base class but not the complete class (we may discover
/// that the tail padding is not used in the complete class.) However,
/// because LLVM reads from the complete type it can generate incorrect code
/// if we do not clip the tail padding off of the bitfield in the complete
/// layout. This introduces a somewhat awkward extra unnecessary clip stage.
/// The location of the clip is stored internally as a sentinal of type
/// SCISSOR. If LLVM were updated to read base types (which it probably
/// should because locations of things such as VBases are bogus in the llvm
/// type anyway) then we could eliminate the SCISSOR.
/// * Itanium allows nearly empty primary virtual bases. These bases don't get
/// get their own storage because they're laid out as part of another base
/// or at the beginning of the structure. Determining if a VBase actually
/// gets storage awkwardly involves a walk of all bases.
/// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
struct CGRecordLowering {
// MemberInfo is a helper structure that contains information about a record
// member. In additional to the standard member types, there exists a
// sentinal member type that ensures correct rounding.
struct MemberInfo {
CharUnits Offset;
enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
llvm::Type *Data;
union {
const FieldDecl *FD;
const CXXRecordDecl *RD;
};
MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
const FieldDecl *FD = nullptr)
: Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
const CXXRecordDecl *RD)
: Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
// MemberInfos are sorted so we define a < operator.
bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
};
// The constructor.
CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
// Short helper routines.
/// \brief Constructs a MemberInfo instance from an offset and llvm::Type *.
MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
return MemberInfo(Offset, MemberInfo::Field, Data);
}
/// The Microsoft bitfield layout rule allocates discrete storage
/// units of the field's formal type and only combines adjacent
/// fields of the same formal type. We want to emit a layout with
/// these discrete storage units instead of combining them into a
/// continuous run.
bool isDiscreteBitFieldABI() {
return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
D->isMsStruct(Context);
}
/// The Itanium base layout rule allows virtual bases to overlap
/// other bases, which complicates layout in specific ways.
///
/// Note specifically that the ms_struct attribute doesn't change this.
bool isOverlappingVBaseABI() {
return !Context.getTargetInfo().getCXXABI().isMicrosoft();
}
/// \brief Wraps llvm::Type::getIntNTy with some implicit arguments.
llvm::Type *getIntNType(uint64_t NumBits) {
return llvm::Type::getIntNTy(Types.getLLVMContext(),
(unsigned)llvm::RoundUpToAlignment(NumBits, 8));
}
/// \brief Gets an llvm type of size NumBytes and alignment 1.
llvm::Type *getByteArrayType(CharUnits NumBytes) {
assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
return NumBytes == CharUnits::One() ? Type :
(llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
}
/// \brief Gets the storage type for a field decl and handles storage
/// for itanium bitfields that are smaller than their declared type.
llvm::Type *getStorageType(const FieldDecl *FD) {
llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
if (!FD->isBitField()) return Type;
if (isDiscreteBitFieldABI()) return Type;
return getIntNType(std::min(FD->getBitWidthValue(Context),
(unsigned)Context.toBits(getSize(Type))));
}
/// \brief Gets the llvm Basesubobject type from a CXXRecordDecl.
llvm::Type *getStorageType(const CXXRecordDecl *RD) {
return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
}
CharUnits bitsToCharUnits(uint64_t BitOffset) {
return Context.toCharUnitsFromBits(BitOffset);
}
CharUnits getSize(llvm::Type *Type) {
return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
}
CharUnits getAlignment(llvm::Type *Type) {
return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
}
bool isZeroInitializable(const FieldDecl *FD) {
return Types.isZeroInitializable(FD->getType());
}
bool isZeroInitializable(const RecordDecl *RD) {
return Types.isZeroInitializable(RD);
}
void appendPaddingBytes(CharUnits Size) {
if (!Size.isZero())
FieldTypes.push_back(getByteArrayType(Size));
}
uint64_t getFieldBitOffset(const FieldDecl *FD) {
return Layout.getFieldOffset(FD->getFieldIndex());
}
// Layout routines.
void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
llvm::Type *StorageType);
/// \brief Lowers an ASTRecordLayout to a llvm type.
void lower(bool NonVirtualBaseType);
void lowerUnion();
void accumulateFields();
void accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd);
void accumulateBases();
void accumulateVPtrs();
void accumulateVBases();
/// \brief Recursively searches all of the bases to find out if a vbase is
/// not the primary vbase of some base class.
bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
void calculateZeroInit();
/// \brief Lowers bitfield storage types to I8 arrays for bitfields with tail
/// padding that is or can potentially be used.
void clipTailPadding();
/// \brief Determines if we need a packed llvm struct.
void determinePacked(bool NVBaseType);
/// \brief Inserts padding everwhere it's needed.
void insertPadding();
/// \brief Fills out the structures that are ultimately consumed.
void fillOutputFields();
// Input memoization fields.
CodeGenTypes &Types;
const ASTContext &Context;
const RecordDecl *D;
const CXXRecordDecl *RD;
const ASTRecordLayout &Layout;
const llvm::DataLayout &DataLayout;
// Helpful intermediate data-structures.
std::vector<MemberInfo> Members;
// Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
SmallVector<llvm::Type *, 16> FieldTypes;
llvm::DenseMap<const FieldDecl *, unsigned> Fields;
llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
bool IsZeroInitializable : 1;
bool IsZeroInitializableAsBase : 1;
bool Packed : 1;
private:
CGRecordLowering(const CGRecordLowering &) = delete;
void operator =(const CGRecordLowering &) = delete;
};
} // namespace {
CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed)
: Types(Types), Context(Types.getContext()), D(D),
RD(dyn_cast<CXXRecordDecl>(D)),
Layout(Types.getContext().getASTRecordLayout(D)),
DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
IsZeroInitializableAsBase(true), Packed(Packed) {}
void CGRecordLowering::setBitFieldInfo(
const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
Info.Size = FD->getBitWidthValue(Context);
Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
Info.StorageOffset = StartOffset;
if (Info.Size > Info.StorageSize)
Info.Size = Info.StorageSize;
// Reverse the bit offsets for big endian machines. Because we represent
// a bitfield as a single large integer load, we can imagine the bits
// counting from the most-significant-bit instead of the
// least-significant-bit.
if (DataLayout.isBigEndian())
Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
}
void CGRecordLowering::lower(bool NVBaseType) {
// The lowering process implemented in this function takes a variety of
// carefully ordered phases.
// 1) Store all members (fields and bases) in a list and sort them by offset.
// 2) Add a 1-byte capstone member at the Size of the structure.
// 3) Clip bitfield storages members if their tail padding is or might be
// used by another field or base. The clipping process uses the capstone
// by treating it as another object that occurs after the record.
// 4) Determine if the llvm-struct requires packing. It's important that this
// phase occur after clipping, because clipping changes the llvm type.
// This phase reads the offset of the capstone when determining packedness
// and updates the alignment of the capstone to be equal of the alignment
// of the record after doing so.
// 5) Insert padding everywhere it is needed. This phase requires 'Packed' to
// have been computed and needs to know the alignment of the record in
// order to understand if explicit tail padding is needed.
// 6) Remove the capstone, we don't need it anymore.
// 7) Determine if this record can be zero-initialized. This phase could have
// been placed anywhere after phase 1.
// 8) Format the complete list of members in a way that can be consumed by
// CodeGenTypes::ComputeRecordLayout.
CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
if (D->isUnion())
return lowerUnion();
accumulateFields();
// RD implies C++.
if (RD) {
accumulateVPtrs();
accumulateBases();
if (Members.empty())
return appendPaddingBytes(Size);
if (!NVBaseType)
accumulateVBases();
}
std::stable_sort(Members.begin(), Members.end());
#if 0 // HLSL Change - No padding for structure. Array offset will be handled when load/store is called
Members.push_back(StorageInfo(Size, getIntNType(8)));
clipTailPadding();
determinePacked(NVBaseType);
insertPadding();
Members.pop_back();
calculateZeroInit();
#endif // HLSL Change End
fillOutputFields();
}
void CGRecordLowering::lowerUnion() {
CharUnits LayoutSize = Layout.getSize();
llvm::Type *StorageType = nullptr;
bool SeenNamedMember = false;
// Iterate through the fields setting bitFieldInfo and the Fields array. Also
// locate the "most appropriate" storage type. The heuristic for finding the
// storage type isn't necessary, the first (non-0-length-bitfield) field's
// type would work fine and be simpler but would be different than what we've
// been doing and cause lit tests to change.
for (const auto *Field : D->fields()) {
if (Field->isBitField()) {
// Skip 0 sized bitfields.
if (Field->getBitWidthValue(Context) == 0)
continue;
llvm::Type *FieldType = getStorageType(Field);
if (LayoutSize < getSize(FieldType))
FieldType = getByteArrayType(LayoutSize);
setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
}
Fields[Field->getCanonicalDecl()] = 0;
llvm::Type *FieldType = getStorageType(Field);
// Compute zero-initializable status.
// This union might not be zero initialized: it may contain a pointer to
// data member which might have some exotic initialization sequence.
// If this is the case, then we aught not to try and come up with a "better"
// type, it might not be very easy to come up with a Constant which
// correctly initializes it.
if (!SeenNamedMember) {
SeenNamedMember = Field->getIdentifier();
if (!SeenNamedMember)
if (const auto *FieldRD =
dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl()))
SeenNamedMember = FieldRD->findFirstNamedDataMember();
if (SeenNamedMember && !isZeroInitializable(Field)) {
IsZeroInitializable = IsZeroInitializableAsBase = false;
StorageType = FieldType;
}
}
// Because our union isn't zero initializable, we won't be getting a better
// storage type.
if (!IsZeroInitializable)
continue;
// Conditionally update our storage type if we've got a new "better" one.
if (!StorageType ||
getAlignment(FieldType) > getAlignment(StorageType) ||
(getAlignment(FieldType) == getAlignment(StorageType) &&
getSize(FieldType) > getSize(StorageType)))
StorageType = FieldType;
}
// If we have no storage type just pad to the appropriate size and return.
if (!StorageType)
return appendPaddingBytes(LayoutSize);
// If our storage size was bigger than our required size (can happen in the
// case of packed bitfields on Itanium) then just use an I8 array.
if (LayoutSize < getSize(StorageType))
StorageType = getByteArrayType(LayoutSize);
FieldTypes.push_back(StorageType);
appendPaddingBytes(LayoutSize - getSize(StorageType));
// Set packed if we need it.
if (LayoutSize % getAlignment(StorageType))
Packed = true;
}
void CGRecordLowering::accumulateFields() {
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end();
Field != FieldEnd;)
if (Field->isBitField()) {
RecordDecl::field_iterator Start = Field;
// Iterate to gather the list of bitfields.
for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
accumulateBitFields(Start, Field);
} else {
Members.push_back(MemberInfo(
bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
getStorageType(*Field), *Field));
++Field;
}
}
void
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd) {
// Run stores the first element of the current run of bitfields. FieldEnd is
// used as a special value to note that we don't have a current run. A
// bitfield run is a contiguous collection of bitfields that can be stored in
// the same storage block. Zero-sized bitfields and bitfields that would
// cross an alignment boundary break a run and start a new one.
RecordDecl::field_iterator Run = FieldEnd;
// Tail is the offset of the first bit off the end of the current run. It's
// used to determine if the ASTRecordLayout is treating these two bitfields as
// contiguous. StartBitOffset is offset of the beginning of the Run.
uint64_t StartBitOffset, Tail = 0;
if (isDiscreteBitFieldABI()) {
for (; Field != FieldEnd; ++Field) {
uint64_t BitOffset = getFieldBitOffset(*Field);
// Zero-width bitfields end runs.
if (Field->getBitWidthValue(Context) == 0) {
Run = FieldEnd;
continue;
}
llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
Run = Field;
StartBitOffset = BitOffset;
Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
// Add the storage member to the record. This must be added to the
// record before the bitfield members so that it gets laid out before
// the bitfields it contains get laid out.
Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
}
// Bitfields get the offset of their storage but come afterward and remain
// there after a stable sort.
Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
MemberInfo::Field, nullptr, *Field));
}
return;
}
for (;;) {
// Check to see if we need to start a new run.
if (Run == FieldEnd) {
// If we're out of fields, return.
if (Field == FieldEnd)
break;
// Any non-zero-length bitfield can start a new run.
if (Field->getBitWidthValue(Context) != 0) {
Run = Field;
StartBitOffset = getFieldBitOffset(*Field);
Tail = StartBitOffset + Field->getBitWidthValue(Context);
}
++Field;
continue;
}
// Add bitfields to the run as long as they qualify.
if (Field != FieldEnd && Field->getBitWidthValue(Context) != 0 &&
Tail == getFieldBitOffset(*Field)) {
Tail += Field->getBitWidthValue(Context);
++Field;
continue;
}
// We've hit a break-point in the run and need to emit a storage field.
llvm::Type *Type = getIntNType(Tail - StartBitOffset);
// Add the storage member to the record and set the bitfield info for all of
// the bitfields in the run. Bitfields get the offset of their storage but
// come afterward and remain there after a stable sort.
Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
for (; Run != Field; ++Run)
Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
MemberInfo::Field, nullptr, *Run));
Run = FieldEnd;
}
}
void CGRecordLowering::accumulateBases() {
// If we've got a primary virtual base, we need to add it with the bases.
if (Layout.isPrimaryBaseVirtual()) {
const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
getStorageType(BaseDecl), BaseDecl));
}
// Accumulate the non-virtual bases.
for (const auto &Base : RD->bases()) {
if (Base.isVirtual())
continue;
// Bases can be zero-sized even if not technically empty if they
// contain only a trailing array member.
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
if (!BaseDecl->isEmpty() &&
!Context.getASTRecordLayout(BaseDecl).getSize().isZero())
Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
}
}
void CGRecordLowering::accumulateVPtrs() {
if (Layout.hasOwnVFPtr())
Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
getPointerTo()->getPointerTo()));
if (Layout.hasOwnVBPtr())
Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
}
void CGRecordLowering::accumulateVBases() {
CharUnits ScissorOffset = Layout.getNonVirtualSize();
// In the itanium ABI, it's possible to place a vbase at a dsize that is
// smaller than the nvsize. Here we check to see if such a base is placed
// before the nvsize and set the scissor offset to that, instead of the
// nvsize.
if (isOverlappingVBaseABI())
for (const auto &Base : RD->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
if (BaseDecl->isEmpty())
continue;
// If the vbase is a primary virtual base of some base, then it doesn't
// get its own storage location but instead lives inside of that base.
if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
continue;
ScissorOffset = std::min(ScissorOffset,
Layout.getVBaseClassOffset(BaseDecl));
}
Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
RD));
for (const auto &Base : RD->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
if (BaseDecl->isEmpty())
continue;
CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
// If the vbase is a primary virtual base of some base, then it doesn't
// get its own storage location but instead lives inside of that base.
if (isOverlappingVBaseABI() &&
Context.isNearlyEmpty(BaseDecl) &&
!hasOwnStorage(RD, BaseDecl)) {
Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
BaseDecl));
continue;
}
// If we've got a vtordisp, add it as a storage type.
if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
getIntNType(32)));
Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
getStorageType(BaseDecl), BaseDecl));
}
}
bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
const CXXRecordDecl *Query) {
const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
return false;
for (const auto &Base : Decl->bases())
if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
return false;
return true;
}
// HLSL Change: Remove unused function
#if 0
void CGRecordLowering::calculateZeroInit() {
for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
MemberEnd = Members.end();
IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
if (Member->Kind == MemberInfo::Field) {
if (!Member->FD || isZeroInitializable(Member->FD))
continue;
IsZeroInitializable = IsZeroInitializableAsBase = false;
} else if (Member->Kind == MemberInfo::Base ||
Member->Kind == MemberInfo::VBase) {
if (isZeroInitializable(Member->RD))
continue;
IsZeroInitializable = false;
if (Member->Kind == MemberInfo::Base)
IsZeroInitializableAsBase = false;
}
}
}
void CGRecordLowering::clipTailPadding() {
std::vector<MemberInfo>::iterator Prior = Members.begin();
CharUnits Tail = getSize(Prior->Data);
for (std::vector<MemberInfo>::iterator Member = Prior + 1,
MemberEnd = Members.end();
Member != MemberEnd; ++Member) {
// Only members with data and the scissor can cut into tail padding.
if (!Member->Data && Member->Kind != MemberInfo::Scissor)
continue;
if (Member->Offset < Tail) {
assert(Prior->Kind == MemberInfo::Field && !Prior->FD &&
"Only storage fields have tail padding!");
Prior->Data = getByteArrayType(bitsToCharUnits(llvm::RoundUpToAlignment(
cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
}
if (Member->Data)
Prior = Member;
Tail = Prior->Offset + getSize(Prior->Data);
}
}
void CGRecordLowering::determinePacked(bool NVBaseType) {
if (Packed)
return;
CharUnits Alignment = CharUnits::One();
CharUnits NVAlignment = CharUnits::One();
CharUnits NVSize =
!NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
MemberEnd = Members.end();
Member != MemberEnd; ++Member) {
if (!Member->Data)
continue;
// If any member falls at an offset that it not a multiple of its alignment,
// then the entire record must be packed.
if (Member->Offset % getAlignment(Member->Data))
Packed = true;
if (Member->Offset < NVSize)
NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
Alignment = std::max(Alignment, getAlignment(Member->Data));
}
// If the size of the record (the capstone's offset) is not a multiple of the
// record's alignment, it must be packed.
if (Members.back().Offset % Alignment)
Packed = true;
// If the non-virtual sub-object is not a multiple of the non-virtual
// sub-object's alignment, it must be packed. We cannot have a packed
// non-virtual sub-object and an unpacked complete object or vise versa.
if (NVSize % NVAlignment)
Packed = true;
// Update the alignment of the sentinal.
if (!Packed)
Members.back().Data = getIntNType(Context.toBits(Alignment));
}
void CGRecordLowering::insertPadding() {
std::vector<std::pair<CharUnits, CharUnits> > Padding;
CharUnits Size = CharUnits::Zero();
for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
MemberEnd = Members.end();
Member != MemberEnd; ++Member) {
if (!Member->Data)
continue;
CharUnits Offset = Member->Offset;
assert(Offset >= Size);
// Insert padding if we need to.
if (Offset != Size.RoundUpToAlignment(Packed ? CharUnits::One() :
getAlignment(Member->Data)))
Padding.push_back(std::make_pair(Size, Offset - Size));
Size = Offset + getSize(Member->Data);
}
if (Padding.empty())
return;
// Add the padding to the Members list and sort it.
for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
Pad = Padding.begin(), PadEnd = Padding.end();
Pad != PadEnd; ++Pad)
Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
std::stable_sort(Members.begin(), Members.end());
}
#endif
void CGRecordLowering::fillOutputFields() {
for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
MemberEnd = Members.end();
Member != MemberEnd; ++Member) {
if (Member->Data)
FieldTypes.push_back(Member->Data);
if (Member->Kind == MemberInfo::Field) {
if (Member->FD)
Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
// A field without storage must be a bitfield.
if (!Member->Data)
setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
} else if (Member->Kind == MemberInfo::Base)
NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
else if (Member->Kind == MemberInfo::VBase)
VirtualBases[Member->RD] = FieldTypes.size() - 1;
}
}
CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
const FieldDecl *FD,
uint64_t Offset, uint64_t Size,
uint64_t StorageSize,
CharUnits StorageOffset) {
// This function is vestigial from CGRecordLayoutBuilder days but is still
// used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that
// when addressed will allow for the removal of this function.
llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
CharUnits TypeSizeInBytes =
CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
if (Size > TypeSizeInBits) {
// We have a wide bit-field. The extra bits are only used for padding, so
// if we have a bitfield of type T, with size N:
//
// T t : N;
//
// We can just assume that it's:
//
// T t : sizeof(T);
//
Size = TypeSizeInBits;
}
// Reverse the bit offsets for big endian machines. Because we represent
// a bitfield as a single large integer load, we can imagine the bits
// counting from the most-significant-bit instead of the
// least-significant-bit.
if (Types.getDataLayout().isBigEndian()) {
Offset = StorageSize - (Offset + Size);
}
return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
}
CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
llvm::StructType *Ty) {
CGRecordLowering Builder(*this, D, /*Packed=*/false);
Builder.lower(/*NonVirtualBaseType=*/false);
// If we're in C++, compute the base subobject type.
llvm::StructType *BaseTy = nullptr;
if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
BaseTy = Ty;
if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
BaseBuilder.lower(/*NonVirtualBaseType=*/true);
BaseTy = llvm::StructType::create(
getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
addRecordTypeName(D, BaseTy, ".base");
// BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
// on both of them with the same index.
assert(Builder.Packed == BaseBuilder.Packed &&
"Non-virtual and complete types must agree on packedness");
}
}
// Fill in the struct *after* computing the base type. Filling in the body
// signifies that the type is no longer opaque and record layout is complete,
// but we may need to recursively layout D while laying D out as a base type.
Ty->setBody(Builder.FieldTypes, Builder.Packed);
CGRecordLayout *RL =
new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
Builder.IsZeroInitializableAsBase);
RL->NonVirtualBases.swap(Builder.NonVirtualBases);
RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
// Add all the field numbers.
RL->FieldInfo.swap(Builder.Fields);
// Add bitfield info.
RL->BitFields.swap(Builder.BitFields);
// Dump the layout, if requested.
if (getContext().getLangOpts().DumpRecordLayouts) {
llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
llvm::outs() << "Record: ";
D->dump(llvm::outs());
llvm::outs() << "\nLayout: ";
RL->print(llvm::outs());
}
#ifndef NDEBUG
// Verify that the computed LLVM struct size matches the AST layout size.
#if 0 // HLSL Change - No padding for structure. Disable validation check.
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
"Type size mismatch!");
if (BaseTy) {
CharUnits NonVirtualSize = Layout.getNonVirtualSize();
uint64_t AlignedNonVirtualTypeSizeInBits =
getContext().toBits(NonVirtualSize);
assert(AlignedNonVirtualTypeSizeInBits ==
getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
"Type size mismatch!");
}
// Verify that the LLVM and AST field offsets agree.
llvm::StructType *ST =
dyn_cast<llvm::StructType>(RL->getLLVMType());
const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
RecordDecl::field_iterator it = D->field_begin();
for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
const FieldDecl *FD = *it;
// For non-bit-fields, just check that the LLVM struct offset matches the
// AST offset.
if (!FD->isBitField()) {
unsigned FieldNo = RL->getLLVMFieldNo(FD);
assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
"Invalid field offset!");
continue;
}
// Ignore unnamed bit-fields.
if (!FD->getDeclName())
continue;
// Don't inspect zero-length bitfields.
if (FD->getBitWidthValue(getContext()) == 0)
continue;
const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
// Unions have overlapping elements dictating their layout, but for
// non-unions we can verify that this section of the layout is the exact
// expected size.
if (D->isUnion()) {
// For unions we verify that the start is zero and the size
// is in-bounds. However, on BE systems, the offset may be non-zero, but
// the size + offset should match the storage size in that case as it
// "starts" at the back.
if (getDataLayout().isBigEndian())
assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
Info.StorageSize &&
"Big endian union bitfield does not end at the back");
else
assert(Info.Offset == 0 &&
"Little endian union bitfield with a non-zero offset");
assert(Info.StorageSize <= SL->getSizeInBits() &&
"Union not large enough for bitfield storage");
} else {
assert(Info.StorageSize ==
getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
"Storage size does not match the element type size");
}
assert(Info.Size > 0 && "Empty bitfield!");
assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
"Bitfield outside of its allocated storage");
}
#endif // HLSL Change End
#endif
return RL;
}
void CGRecordLayout::print(raw_ostream &OS) const {
OS << "<CGRecordLayout\n";
OS << " LLVMType:" << *CompleteObjectType << "\n";
if (BaseSubobjectType)
OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
OS << " BitFields:[\n";
// Print bit-field infos in declaration order.
std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
it = BitFields.begin(), ie = BitFields.end();
it != ie; ++it) {
const RecordDecl *RD = it->first->getParent();
unsigned Index = 0;
for (RecordDecl::field_iterator
it2 = RD->field_begin(); *it2 != it->first; ++it2)
++Index;
BFIs.push_back(std::make_pair(Index, &it->second));
}
llvm::array_pod_sort(BFIs.begin(), BFIs.end());
for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
OS.indent(4);
BFIs[i].second->print(OS);
OS << "\n";
}
OS << "]>\n";
}
void CGRecordLayout::dump() const {
print(llvm::errs());
}
void CGBitFieldInfo::print(raw_ostream &OS) const {
OS << "<CGBitFieldInfo"
<< " Offset:" << Offset
<< " Size:" << Size
<< " IsSigned:" << IsSigned
<< " StorageSize:" << StorageSize
<< " StorageOffset:" << StorageOffset.getQuantity() << ">";
}
void CGBitFieldInfo::dump() const {
print(llvm::errs());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCUDARuntime.h | //===----- CGCUDARuntime.h - Interface to CUDA Runtimes ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for CUDA code generation. Concrete
// subclasses of this implement code generation for specific CUDA
// runtime libraries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
namespace llvm {
class Function;
}
namespace clang {
class CUDAKernelCallExpr;
namespace CodeGen {
class CodeGenFunction;
class CodeGenModule;
class FunctionArgList;
class ReturnValueSlot;
class RValue;
class CGCUDARuntime {
protected:
CodeGenModule &CGM;
public:
CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
virtual ~CGCUDARuntime();
virtual RValue EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue);
/// Emits a kernel launch stub.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
/// Constructs and returns a module initialization function or nullptr if it's
/// not needed. Must be called after all kernels have been emitted.
virtual llvm::Function *makeModuleCtorFunction() = 0;
/// Returns a module cleanup function or nullptr if it's not needed.
/// Must be called after ModuleCtorFunction
virtual llvm::Function *makeModuleDtorFunction() = 0;
};
/// Creates an instance of a CUDA runtime class.
CGCUDARuntime *CreateNVCUDARuntime(CodeGenModule &CGM);
}
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCUDARuntime.cpp | //===----- CGCUDARuntime.cpp - Interface to CUDA Runtimes -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for CUDA code generation. Concrete
// subclasses of this implement code generation for specific CUDA
// runtime libraries.
//
//===----------------------------------------------------------------------===//
#include "CGCUDARuntime.h"
#include "CGCall.h"
#include "CodeGenFunction.h"
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
using namespace clang;
using namespace CodeGen;
CGCUDARuntime::~CGCUDARuntime() {}
RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue) {
llvm::BasicBlock *ConfigOKBlock = CGF.createBasicBlock("kcall.configok");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("kcall.end");
CodeGenFunction::ConditionalEvaluation eval(CGF);
CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock,
/*TrueCount=*/0);
eval.begin(CGF);
CGF.EmitBlock(ConfigOKBlock);
const Decl *TargetDecl = nullptr;
if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
TargetDecl = DRE->getDecl();
}
}
llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
CGF.EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, TargetDecl);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
eval.end(CGF);
return RValue::get(nullptr);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp | //===----- CGOpenCLRuntime.cpp - Interface to OpenCL Runtimes -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for OpenCL code generation. Concrete
// subclasses of this implement code generation for specific OpenCL
// runtime libraries.
//
//===----------------------------------------------------------------------===//
#include "CGOpenCLRuntime.h"
#include "CodeGenFunction.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include <assert.h>
using namespace clang;
using namespace CodeGen;
// HLSL Change Starts
// No OpenCL codegen support, so simply skip all of this compilation.
// Here are enough stubs to link the current targets.
#if 0
// HLSL Change Ends
CGOpenCLRuntime::~CGOpenCLRuntime() {}
void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
const VarDecl &D) {
return CGF.EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
}
llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
assert(T->isOpenCLSpecificType() &&
"Not an OpenCL specific type!");
llvm::LLVMContext& Ctx = CGM.getLLVMContext();
uint32_t ImgAddrSpc =
CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
switch (cast<BuiltinType>(T)->getKind()) {
default:
llvm_unreachable("Unexpected opencl builtin type!");
return nullptr;
case BuiltinType::OCLImage1d:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image1d_t"), ImgAddrSpc);
case BuiltinType::OCLImage1dArray:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image1d_array_t"), ImgAddrSpc);
case BuiltinType::OCLImage1dBuffer:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image1d_buffer_t"), ImgAddrSpc);
case BuiltinType::OCLImage2d:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image2d_t"), ImgAddrSpc);
case BuiltinType::OCLImage2dArray:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image2d_array_t"), ImgAddrSpc);
case BuiltinType::OCLImage3d:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.image3d_t"), ImgAddrSpc);
case BuiltinType::OCLSampler:
return llvm::IntegerType::get(Ctx, 32);
case BuiltinType::OCLEvent:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.event_t"), 0);
}
}
#endif // HLSL Change
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGHLSLMS.cpp | //===----- CGHLSLMS.cpp - Interface to HLSL Runtime ----------------===//
///////////////////////////////////////////////////////////////////////////////
// //
// CGHLSLMS.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This provides a class for HLSL code generation. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "CGHLSLRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "dxc/DXIL/DxilOperations.h"
#include "dxc/DXIL/DxilTypeSystem.h"
#include "dxc/DXIL/DxilUtil.h"
#include "dxc/HLSL/HLMatrixType.h"
#include "dxc/HLSL/HLModule.h"
#include "dxc/HLSL/HLOperations.h"
#include "dxc/HlslIntrinsicOp.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/HlslTypes.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/HLSLMacroExpander.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include <memory>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include "dxc/DXIL/DxilCBuffer.h"
#include "dxc/DXIL/DxilResourceProperties.h"
#include "dxc/DxilRootSignature/DxilRootSignature.h"
#include "dxc/HLSL/DxilExportMap.h"
#include "dxc/HLSL/DxilGenerationPass.h" // support pause/resume passes
#include "dxc/HLSL/HLSLExtensionsCodegenHelper.h"
#include "dxc/Support/WinIncludes.h" // stream support
#include "dxc/dxcapi.h" // stream support
#include "clang/Parse/ParseHLSL.h" // root sig would be in Parser if part of lang
#include "CGHLSLMSHelper.h"
using namespace clang;
using namespace CodeGen;
using namespace hlsl;
using namespace llvm;
using std::unique_ptr;
using namespace CGHLSLMSHelper;
static const bool KeepUndefinedTrue =
true; // Keep interpolation mode undefined if not set explicitly.
namespace {
class CGMSHLSLRuntime : public CGHLSLRuntime {
private:
/// Convenience reference to LLVM Context
llvm::LLVMContext &Context;
/// Convenience reference to the current module
llvm::Module &TheModule;
HLModule *m_pHLModule;
llvm::Type *CBufferType;
uint32_t globalCBIndex;
// TODO: make sure how minprec works
llvm::DataLayout dataLayout;
// decl map to constant id for program
llvm::DenseMap<HLSLBufferDecl *, uint32_t> constantBufMap;
// Map from Constant to register bindings.
llvm::DenseMap<llvm::Constant *,
llvm::SmallVector<std::pair<DXIL::ResourceClass, unsigned>, 1>>
constantRegBindingMap;
// Adds value to DxilObjectProperties if it's resource or wave matrix.
// Returns true if added to one.
bool AddValToPropertyMap(Value *V, QualType Ty);
CGHLSLMSHelper::DxilObjectProperties objectProperties;
// Map to value to node properties
llvm::MapVector<llvm::Argument *, hlsl::NodeInputRecordProps>
NodeInputRecordParams;
llvm::MapVector<llvm::Argument *, hlsl::NodeProps> NodeOutputParams;
bool m_bDebugInfo;
bool m_bIsLib;
// For library, m_ExportMap maps from internal name to zero or more renames
dxilutil::ExportMap m_ExportMap;
HLCBuffer &GetGlobalCBuffer() {
return *static_cast<HLCBuffer *>(&(m_pHLModule->GetCBuffer(globalCBIndex)));
}
void AddConstantToCB(GlobalVariable *CV, StringRef Name, QualType Ty,
unsigned LowerBound, HLCBuffer &CB);
void AddConstant(VarDecl *constDecl, HLCBuffer &CB);
uint32_t AddSampler(VarDecl *samplerDecl);
uint32_t AddUAVSRV(VarDecl *decl, hlsl::DxilResourceBase::Class resClass);
bool SetUAVSRV(SourceLocation loc, hlsl::DxilResourceBase::Class resClass,
DxilResource *hlslRes, QualType QualTy);
uint32_t AddCBuffer(HLSLBufferDecl *D);
void AddCBufferDecls(DeclContext *DC, HLCBuffer *CB);
uint32_t AddConstantBufferView(VarDecl *D);
hlsl::DxilResourceBase::Class TypeToClass(clang::QualType Ty);
void CreateSubobject(DXIL::SubobjectKind kind, const StringRef name,
clang::Expr **args, unsigned int argCount,
DXIL::HitGroupType hgType = (DXIL::HitGroupType)(-1));
bool GetAsConstantString(clang::Expr *expr, StringRef *value,
bool failWhenEmpty = false);
bool GetAsConstantUInt32(clang::Expr *expr, uint32_t *value);
std::vector<StringRef> ParseSubobjectExportsAssociations(StringRef exports);
EntryFunctionInfo Entry;
StringMap<PatchConstantInfo> patchConstantFunctionMap;
std::unordered_map<Function *, std::unique_ptr<DxilFunctionProps>>
patchConstantFunctionPropsMap;
std::unordered_map<Function *, const clang::HLSLPatchConstantFuncAttr *>
HSEntryPatchConstantFuncAttr;
// Map to save entry functions.
StringMap<EntryFunctionInfo> entryFunctionMap;
// Map to save static global init exp.
std::unordered_map<Expr *, GlobalVariable *> staticConstGlobalInitMap;
std::unordered_map<GlobalVariable *, std::vector<Constant *>>
staticConstGlobalInitListMap;
std::unordered_map<GlobalVariable *, Function *> staticConstGlobalCtorMap;
// List for functions with clip plane.
std::vector<Function *> clipPlaneFuncList;
std::unordered_map<Value *, DebugLoc> debugInfoMap;
DxilRootSignatureVersion rootSigVer;
Value *EmitHLSLMatrixLoad(CGBuilderTy &Builder, Value *Ptr, QualType Ty);
void EmitHLSLMatrixStore(CGBuilderTy &Builder, Value *Val, Value *DestPtr,
QualType Ty);
// Flatten the val into scalar val and push into elts and eltTys.
void FlattenValToInitList(CodeGenFunction &CGF, SmallVector<Value *, 4> &elts,
SmallVector<QualType, 4> &eltTys, QualType Ty,
Value *val);
// Push every value on InitListExpr into EltValList and EltTyList.
void ScanInitList(CodeGenFunction &CGF, InitListExpr *E,
SmallVector<Value *, 4> &EltValList,
SmallVector<QualType, 4> &EltTyList);
void FlattenAggregatePtrToGepList(CodeGenFunction &CGF, Value *Ptr,
SmallVector<Value *, 4> &idxList,
clang::QualType Type, llvm::Type *Ty,
SmallVector<Value *, 4> &GepList,
SmallVector<QualType, 4> &EltTyList);
void LoadElements(CodeGenFunction &CGF, ArrayRef<Value *> Ptrs,
ArrayRef<QualType> QualTys, SmallVector<Value *, 4> &Vals);
void ConvertAndStoreElements(CodeGenFunction &CGF, ArrayRef<Value *> SrcVals,
ArrayRef<QualType> SrcQualTys,
ArrayRef<Value *> DstPtrs,
ArrayRef<QualType> DstQualTys);
void EmitHLSLAggregateCopy(CodeGenFunction &CGF, llvm::Value *SrcPtr,
llvm::Value *DestPtr,
SmallVector<Value *, 4> &idxList,
clang::QualType SrcType, clang::QualType DestType,
llvm::Type *Ty);
void EmitHLSLSplat(CodeGenFunction &CGF, Value *SrcVal, llvm::Value *DestPtr,
SmallVector<Value *, 4> &idxList, QualType Type,
QualType SrcType, llvm::Type *Ty);
void EmitHLSLRootSignature(HLSLRootSignatureAttr *RSA, Function *Fn,
DxilFunctionProps &props);
void CheckParameterAnnotation(SourceLocation SLoc,
const DxilParameterAnnotation ¶mInfo,
bool isPatchConstantFunction);
void CheckParameterAnnotation(SourceLocation SLoc,
DxilParamInputQual paramQual,
llvm::StringRef semFullName,
bool isPatchConstantFunction);
void RemapObsoleteSemantic(DxilParameterAnnotation ¶mInfo,
bool isPatchConstantFunction);
SourceLocation SetSemantic(const NamedDecl *decl,
DxilParameterAnnotation ¶mInfo);
hlsl::InterpolationMode GetInterpMode(const Decl *decl, CompType compType,
bool bKeepUndefined);
hlsl::CompType GetCompType(const BuiltinType *BT);
// save intrinsic opcode
std::vector<std::pair<Function *, unsigned>> m_IntrinsicMap;
void AddHLSLIntrinsicOpcodeToFunction(Function *, unsigned opcode);
// Type annotation related.
unsigned ConstructStructAnnotation(DxilStructAnnotation *annotation,
DxilPayloadAnnotation *payloadAnnotation,
const RecordDecl *RD,
DxilTypeSystem &dxilTypeSys);
unsigned AddTypeAnnotation(QualType Ty, DxilTypeSystem &dxilTypeSys,
unsigned &arrayEltSize);
DxilResourceProperties BuildResourceProperty(QualType resTy);
void ConstructFieldAttributedAnnotation(DxilFieldAnnotation &fieldAnnotation,
QualType fieldTy,
bool bDefaultRowMajor);
std::unordered_map<Constant *, DxilFieldAnnotation> m_ConstVarAnnotationMap;
StringSet<> m_PreciseOutputSet;
DenseSet<Value *> mismatchGLCArgSet;
DenseMap<Function *, ScopeInfo> m_ScopeMap;
ScopeInfo *GetScopeInfo(Function *F);
public:
CGMSHLSLRuntime(CodeGenModule &CGM);
/// Add resouce to the program
void addResource(Decl *D) override;
void addSubobject(Decl *D) override;
void FinishCodeGen() override;
bool IsTrivalInitListExpr(CodeGenFunction &CGF, InitListExpr *E) override;
Value *EmitHLSLInitListExpr(CodeGenFunction &CGF, InitListExpr *E,
Value *DestPtr) override;
Constant *EmitHLSLConstInitListExpr(CodeGenModule &CGM,
InitListExpr *E) override;
RValue EmitHLSLBuiltinCallExpr(CodeGenFunction &CGF, const FunctionDecl *FD,
const CallExpr *E,
ReturnValueSlot ReturnValue) override;
void EmitHLSLOutParamConversionInit(
CodeGenFunction &CGF, const FunctionDecl *FD, const CallExpr *E,
llvm::SmallVector<LValue, 8> &castArgList,
llvm::SmallVector<const Stmt *, 8> &argList,
llvm::SmallVector<LValue, 8> &lifetimeCleanupList,
const std::function<void(const VarDecl *, llvm::Value *)> &TmpArgMap)
override;
void EmitHLSLOutParamConversionCopyBack(
CodeGenFunction &CGF, llvm::SmallVector<LValue, 8> &castArgList,
llvm::SmallVector<LValue, 8> &lifetimeCleanupList) override;
Value *EmitHLSLMatrixOperationCall(CodeGenFunction &CGF, const clang::Expr *E,
llvm::Type *RetType,
ArrayRef<Value *> paramList) override;
void EmitHLSLDiscard(CodeGenFunction &CGF) override;
BranchInst *EmitHLSLCondBreak(CodeGenFunction &CGF, llvm::Function *F,
llvm::BasicBlock *DestBB,
llvm::BasicBlock *AltBB) override;
Value *EmitHLSLMatrixSubscript(CodeGenFunction &CGF, llvm::Type *RetType,
Value *Ptr, Value *Idx, QualType Ty) override;
Value *EmitHLSLMatrixElement(CodeGenFunction &CGF, llvm::Type *RetType,
ArrayRef<Value *> paramList,
QualType Ty) override;
Value *EmitHLSLMatrixLoad(CodeGenFunction &CGF, Value *Ptr,
QualType Ty) override;
void EmitHLSLMatrixStore(CodeGenFunction &CGF, Value *Val, Value *DestPtr,
QualType Ty) override;
void EmitHLSLAggregateCopy(CodeGenFunction &CGF, llvm::Value *SrcPtr,
llvm::Value *DestPtr, clang::QualType Ty) override;
void EmitHLSLFlatConversion(CodeGenFunction &CGF, Value *Val, Value *DestPtr,
QualType Ty, QualType SrcTy) override;
Value *EmitHLSLLiteralCast(CodeGenFunction &CGF, Value *Src, QualType SrcType,
QualType DstType) override;
void EmitHLSLFlatConversionAggregateCopy(CodeGenFunction &CGF,
llvm::Value *SrcPtr,
clang::QualType SrcTy,
llvm::Value *DestPtr,
clang::QualType DestTy) override;
void AddHLSLFunctionInfo(llvm::Function *, const FunctionDecl *FD) override;
void AddHLSLNodeRecordTypeInfo(const clang::ParmVarDecl *parmDecl,
hlsl::NodeIOProperties &node);
void EmitHLSLFunctionProlog(llvm::Function *,
const FunctionDecl *FD) override;
void AddControlFlowHint(CodeGenFunction &CGF, const Stmt &S,
llvm::TerminatorInst *TI,
ArrayRef<const Attr *> Attrs) override;
void MarkPotentialResourceTemp(CodeGenFunction &CGF, llvm::Value *V,
clang::QualType QaulTy) override;
void FinishAutoVar(CodeGenFunction &CGF, const VarDecl &D,
llvm::Value *V) override;
const clang::Expr *CheckReturnStmtGLCMismatch(
CodeGenFunction &CGF, const Expr *RV, const clang::ReturnStmt &S,
clang::QualType FnRetTy,
const std::function<void(const VarDecl *, llvm::Value *)> &TmpArgMap)
override;
void MarkIfStmt(CodeGenFunction &CGF, BasicBlock *endIfBB) override;
void MarkSwitchStmt(CodeGenFunction &CGF, SwitchInst *switchInst,
BasicBlock *endSwitch) override;
void MarkReturnStmt(CodeGenFunction &CGF, BasicBlock *bbWithRet) override;
void MarkCleanupBlock(CodeGenFunction &CGF,
llvm::BasicBlock *cleanupBB) override;
void MarkLoopStmt(CodeGenFunction &CGF, BasicBlock *loopContinue,
BasicBlock *loopExit) override;
CGHLSLMSHelper::Scope *MarkScopeEnd(CodeGenFunction &CGF) override;
bool NeedHLSLMartrixCastForStoreOp(
const clang::Decl *TD,
llvm::SmallVector<llvm::Value *, 16> &IRCallArgs) override;
void EmitHLSLMartrixCastForStoreOp(
CodeGenFunction &CGF, SmallVector<llvm::Value *, 16> &IRCallArgs,
llvm::SmallVector<clang::QualType, 16> &ArgTys) override;
/// Get or add constant to the program
HLCBuffer &GetOrCreateCBuffer(HLSLBufferDecl *D);
};
} // namespace
//------------------------------------------------------------------------------
//
// CGMSHLSLRuntime methods.
//
CGMSHLSLRuntime::CGMSHLSLRuntime(CodeGenModule &CGM)
: CGHLSLRuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()),
// FIXME: Can we avoid the need for this fake CBufferType?
CBufferType(
llvm::StructType::create(TheModule.getContext(), "ConstantBuffer")),
dataLayout(CGM.getLangOpts().UseMinPrecision
? hlsl::DXIL::kLegacyLayoutString
: hlsl::DXIL::kNewLayoutString),
Entry() {
const hlsl::ShaderModel *SM =
hlsl::ShaderModel::GetByName(CGM.getCodeGenOpts().HLSLProfile.c_str());
// Only accept valid, 6.0 shader model.
if (!SM->IsValid() || SM->GetMajor() != 6) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error, "invalid profile %0");
Diags.Report(DiagID) << CGM.getCodeGenOpts().HLSLProfile;
return;
}
if (CGM.getCodeGenOpts().HLSLValidatorMajorVer != 0) {
// Check validator version against minimum for target profile:
unsigned MinMajor, MinMinor;
SM->GetMinValidatorVersion(MinMajor, MinMinor);
if (DXIL::CompareVersions(CGM.getCodeGenOpts().HLSLValidatorMajorVer,
CGM.getCodeGenOpts().HLSLValidatorMinorVer,
MinMajor, MinMinor) < 0) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"validator version %0,%1 does not support target profile.");
Diags.Report(DiagID) << CGM.getCodeGenOpts().HLSLValidatorMajorVer
<< CGM.getCodeGenOpts().HLSLValidatorMinorVer;
return;
}
}
m_bIsLib = SM->IsLib();
// TODO: add AllResourceBound.
if (CGM.getCodeGenOpts().HLSLAvoidControlFlow &&
!CGM.getCodeGenOpts().HLSLAllResourcesBound) {
if (SM->IsSM51Plus()) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"Gfa option cannot be used in SM_5_1+ unless "
"all_resources_bound flag is specified");
Diags.Report(DiagID);
}
}
// Create HLModule.
const bool skipInit = true;
m_pHLModule = &TheModule.GetOrCreateHLModule(skipInit);
// Precise Output.
for (auto &preciseOutput : CGM.getCodeGenOpts().HLSLPreciseOutputs) {
m_PreciseOutputSet.insert(StringRef(preciseOutput).lower());
}
// Set Option.
HLOptions opts;
opts.bIEEEStrict = CGM.getCodeGenOpts().UnsafeFPMath;
opts.bDisableOptimizations = CGM.getCodeGenOpts().DisableLLVMOpts;
opts.bAllResourcesBound = CGM.getCodeGenOpts().HLSLAllResourcesBound;
opts.bResMayAlias = CGM.getCodeGenOpts().HLSLResMayAlias;
opts.PackingStrategy = CGM.getCodeGenOpts().HLSLSignaturePackingStrategy;
opts.bLegacyResourceReservation =
CGM.getCodeGenOpts().HLSLLegacyResourceReservation;
opts.bForceZeroStoreLifetimes =
CGM.getCodeGenOpts().HLSLForceZeroStoreLifetimes;
opts.bDefaultRowMajor = CGM.getLangOpts().HLSLDefaultRowMajor;
opts.bUseMinPrecision = CGM.getLangOpts().UseMinPrecision;
opts.bDX9CompatMode = CGM.getLangOpts().EnableDX9CompatMode;
opts.bFXCCompatMode = CGM.getLangOpts().EnableFXCCompatMode;
m_pHLModule->SetHLOptions(opts);
m_pHLModule->GetOP()->InitWithMinPrecision(opts.bUseMinPrecision);
m_pHLModule->GetTypeSystem().SetMinPrecision(opts.bUseMinPrecision);
m_pHLModule->SetAutoBindingSpace(CGM.getCodeGenOpts().HLSLDefaultSpace);
m_pHLModule->SetValidatorVersion(CGM.getCodeGenOpts().HLSLValidatorMajorVer,
CGM.getCodeGenOpts().HLSLValidatorMinorVer);
m_bDebugInfo =
CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::FullDebugInfo;
// set profile
m_pHLModule->SetShaderModel(SM);
// set entry name
if (!SM->IsLib())
m_pHLModule->SetEntryFunctionName(CGM.getCodeGenOpts().HLSLEntryFunction);
// set root signature version.
if (CGM.getLangOpts().RootSigMinor == 0) {
rootSigVer = hlsl::DxilRootSignatureVersion::Version_1_0;
} else {
DXASSERT(CGM.getLangOpts().RootSigMinor == 1,
"else CGMSHLSLRuntime Constructor needs to be updated");
rootSigVer = hlsl::DxilRootSignatureVersion::Version_1_1;
}
DXASSERT(CGM.getLangOpts().RootSigMajor == 1,
"else CGMSHLSLRuntime Constructor needs to be updated");
// add globalCB
unique_ptr<HLCBuffer> CB = llvm::make_unique<HLCBuffer>(false, false);
std::string globalCBName = "$Globals";
CB->SetGlobalSymbol(nullptr);
CB->SetGlobalName(globalCBName);
globalCBIndex = m_pHLModule->GetCBuffers().size();
CB->SetID(globalCBIndex);
CB->SetRangeSize(1);
CB->SetLowerBound(UINT_MAX);
DXVERIFY_NOMSG(globalCBIndex == m_pHLModule->AddCBuffer(std::move(CB)));
// set Float Denorm Mode
m_pHLModule->SetFloat32DenormMode(CGM.getCodeGenOpts().HLSLFloat32DenormMode);
// set DefaultLinkage
m_pHLModule->SetDefaultLinkage(CGM.getCodeGenOpts().DefaultLinkage);
// Fill in m_ExportMap, which maps from internal name to zero or more renames
m_ExportMap.clear();
std::string errors;
llvm::raw_string_ostream os(errors);
if (!m_ExportMap.ParseExports(CGM.getCodeGenOpts().HLSLLibraryExports, os)) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "Error parsing -exports options: %0");
Diags.Report(DiagID) << os.str();
}
}
void CGMSHLSLRuntime::AddHLSLIntrinsicOpcodeToFunction(Function *F,
unsigned opcode) {
m_IntrinsicMap.emplace_back(F, opcode);
}
void CGMSHLSLRuntime::CheckParameterAnnotation(
SourceLocation SLoc, const DxilParameterAnnotation ¶mInfo,
bool isPatchConstantFunction) {
if (!paramInfo.HasSemanticString()) {
return;
}
llvm::StringRef semFullName = paramInfo.GetSemanticStringRef();
DxilParamInputQual paramQual = paramInfo.GetParamInputQual();
if (paramQual == DxilParamInputQual::Inout) {
CheckParameterAnnotation(SLoc, DxilParamInputQual::In, semFullName,
isPatchConstantFunction);
CheckParameterAnnotation(SLoc, DxilParamInputQual::Out, semFullName,
isPatchConstantFunction);
return;
}
CheckParameterAnnotation(SLoc, paramQual, semFullName,
isPatchConstantFunction);
}
void CGMSHLSLRuntime::CheckParameterAnnotation(SourceLocation SLoc,
DxilParamInputQual paramQual,
llvm::StringRef semFullName,
bool isPatchConstantFunction) {
const ShaderModel *SM = m_pHLModule->GetShaderModel();
DXIL::SigPointKind sigPoint =
SigPointFromInputQual(paramQual, SM->GetKind(), isPatchConstantFunction);
llvm::StringRef semName;
unsigned semIndex;
Semantic::DecomposeNameAndIndex(semFullName, &semName, &semIndex);
const Semantic *pSemantic =
Semantic::GetByName(semName, sigPoint, SM->GetMajor(), SM->GetMinor());
if (pSemantic->IsInvalid()) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "invalid semantic '%0' for %1 %2.%3");
Diags.Report(SLoc, DiagID)
<< semName << SM->GetKindName() << SM->GetMajor() << SM->GetMinor();
}
}
SourceLocation
CGMSHLSLRuntime::SetSemantic(const NamedDecl *decl,
DxilParameterAnnotation ¶mInfo) {
for (const hlsl::UnusualAnnotation *it : decl->getUnusualAnnotations()) {
if (it->getKind() == hlsl::UnusualAnnotation::UA_SemanticDecl) {
const hlsl::SemanticDecl *sd = cast<hlsl::SemanticDecl>(it);
paramInfo.SetSemanticString(sd->SemanticName);
if (m_PreciseOutputSet.count(StringRef(sd->SemanticName).lower()))
paramInfo.SetPrecise();
return it->Loc;
}
}
return SourceLocation();
}
static DXIL::TessellatorDomain StringToDomain(StringRef domain) {
return llvm::StringSwitch<DXIL::TessellatorDomain>(domain)
.Case("isoline", DXIL::TessellatorDomain::IsoLine)
.Case("tri", DXIL::TessellatorDomain::Tri)
.Case("quad", DXIL::TessellatorDomain::Quad)
.Default(DXIL::TessellatorDomain::Undefined);
}
static DXIL::TessellatorPartitioning StringToPartitioning(StringRef partition) {
return llvm::StringSwitch<DXIL::TessellatorPartitioning>(partition)
.Case("integer", DXIL::TessellatorPartitioning::Integer)
.Case("pow2", DXIL::TessellatorPartitioning::Pow2)
.Case("fractional_even", DXIL::TessellatorPartitioning::FractionalEven)
.Case("fractional_odd", DXIL::TessellatorPartitioning::FractionalOdd)
.Default(DXIL::TessellatorPartitioning::Undefined);
}
static DXIL::TessellatorOutputPrimitive
StringToTessOutputPrimitive(StringRef primitive) {
return llvm::StringSwitch<DXIL::TessellatorOutputPrimitive>(primitive)
.Case("point", DXIL::TessellatorOutputPrimitive::Point)
.Case("line", DXIL::TessellatorOutputPrimitive::Line)
.Case("triangle_cw", DXIL::TessellatorOutputPrimitive::TriangleCW)
.Case("triangle_ccw", DXIL::TessellatorOutputPrimitive::TriangleCCW)
.Default(DXIL::TessellatorOutputPrimitive::Undefined);
}
static DXIL::MeshOutputTopology StringToMeshOutputTopology(StringRef topology) {
return llvm::StringSwitch<DXIL::MeshOutputTopology>(topology)
.Case("line", DXIL::MeshOutputTopology::Line)
.Case("triangle", DXIL::MeshOutputTopology::Triangle)
.Default(DXIL::MeshOutputTopology::Undefined);
}
static DxilSampler::SamplerKind
StringToSamplerKind(llvm::StringRef samplerKind) {
return llvm::StringSwitch<DxilSampler::SamplerKind>(samplerKind)
.Case("SamplerState", DxilSampler::SamplerKind::Default)
.Case("SamplerComparisonState", DxilSampler::SamplerKind::Comparison)
.Default(DxilSampler::SamplerKind::Invalid);
}
static unsigned GetMatrixSizeInCB(QualType Ty, bool defaultRowMajor,
bool b64Bit) {
bool bRowMajor;
if (!hlsl::HasHLSLMatOrientation(Ty, &bRowMajor))
bRowMajor = defaultRowMajor;
unsigned row, col;
hlsl::GetHLSLMatRowColCount(Ty, row, col);
unsigned EltSize = b64Bit ? 8 : 4;
// Align to 4 * 4bytes.
unsigned alignment = 4 * 4;
if (bRowMajor) {
unsigned rowSize = EltSize * col;
// 3x64bit or 4x64bit align to 32 bytes.
if (rowSize > alignment)
alignment <<= 1;
return alignment * (row - 1) + col * EltSize;
} else {
unsigned rowSize = EltSize * row;
// 3x64bit or 4x64bit align to 32 bytes.
if (rowSize > alignment)
alignment <<= 1;
return alignment * (col - 1) + row * EltSize;
}
}
static CompType::Kind BuiltinTyToCompTy(const BuiltinType *BTy, bool bSNorm,
bool bUNorm) {
CompType::Kind kind = CompType::Kind::Invalid;
switch (BTy->getKind()) {
// HLSL Changes begin
case BuiltinType::Int8_4Packed:
kind = CompType::Kind::PackedS8x32;
break;
case BuiltinType::UInt8_4Packed:
kind = CompType::Kind::PackedU8x32;
break;
// HLSL Changes end
case BuiltinType::UInt:
kind = CompType::Kind::U32;
break;
case BuiltinType::Min16UInt: // HLSL Change
case BuiltinType::UShort:
kind = CompType::Kind::U16;
break;
case BuiltinType::ULongLong:
kind = CompType::Kind::U64;
break;
case BuiltinType::Int:
kind = CompType::Kind::I32;
break;
// HLSL Changes begin
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
// HLSL Changes end
case BuiltinType::Short:
kind = CompType::Kind::I16;
break;
case BuiltinType::LongLong:
kind = CompType::Kind::I64;
break;
// HLSL Changes begin
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
// HLSL Changes end
case BuiltinType::Half:
if (bSNorm)
kind = CompType::Kind::SNormF16;
else if (bUNorm)
kind = CompType::Kind::UNormF16;
else
kind = CompType::Kind::F16;
break;
case BuiltinType::HalfFloat: // HLSL Change
case BuiltinType::Float:
if (bSNorm)
kind = CompType::Kind::SNormF32;
else if (bUNorm)
kind = CompType::Kind::UNormF32;
else
kind = CompType::Kind::F32;
break;
case BuiltinType::Double:
if (bSNorm)
kind = CompType::Kind::SNormF64;
else if (bUNorm)
kind = CompType::Kind::UNormF64;
else
kind = CompType::Kind::F64;
break;
case BuiltinType::Bool:
kind = CompType::Kind::I1;
break;
default:
// Other types not used by HLSL.
break;
}
return kind;
}
namespace {
MatrixOrientation GetMatrixMajor(QualType Ty, bool bDefaultRowMajor) {
DXASSERT_NOMSG(hlsl::IsHLSLMatType(Ty));
bool bIsRowMajor = bDefaultRowMajor;
HasHLSLMatOrientation(Ty, &bIsRowMajor);
return bIsRowMajor ? MatrixOrientation::RowMajor
: MatrixOrientation::ColumnMajor;
}
QualType GetArrayEltType(ASTContext &Context, QualType Ty) {
while (const clang::ArrayType *ArrayTy = Context.getAsArrayType(Ty))
Ty = ArrayTy->getElementType();
return Ty;
}
bool IsTextureBufferViewName(StringRef keyword) {
return keyword == "TextureBuffer";
}
bool IsTextureBufferView(clang::QualType Ty, clang::ASTContext &context) {
Ty = Ty.getCanonicalType();
if (const clang::ArrayType *arrayType = context.getAsArrayType(Ty)) {
return IsTextureBufferView(arrayType->getElementType(), context);
} else if (const RecordType *RT = Ty->getAsStructureType()) {
return IsTextureBufferViewName(RT->getDecl()->getName());
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
return IsTextureBufferViewName(templateDecl->getName());
}
}
return false;
}
} // namespace
DxilResourceProperties CGMSHLSLRuntime::BuildResourceProperty(QualType resTy) {
resTy = GetArrayEltType(CGM.getContext(), resTy);
const RecordType *RT = resTy->getAs<RecordType>();
DxilResourceProperties RP;
if (!RT) {
return RP;
}
RecordDecl *RD = RT->getDecl();
SourceLocation loc = RD->getLocation();
hlsl::DxilResourceBase::Class resClass = TypeToClass(resTy);
if (resClass == DXIL::ResourceClass::Invalid)
return RP;
llvm::Type *Ty = CGM.getTypes().ConvertType(resTy);
switch (resClass) {
case DXIL::ResourceClass::UAV: {
DxilResource UAV;
// TODO: save globalcoherent to variable in EmitHLSLBuiltinCallExpr.
SetUAVSRV(loc, resClass, &UAV, resTy);
UAV.SetGlobalSymbol(UndefValue::get(Ty->getPointerTo()));
RP = resource_helper::loadPropsFromResourceBase(&UAV);
} break;
case DXIL::ResourceClass::SRV: {
DxilResource SRV;
SetUAVSRV(loc, resClass, &SRV, resTy);
SRV.SetGlobalSymbol(UndefValue::get(Ty->getPointerTo()));
RP = resource_helper::loadPropsFromResourceBase(&SRV);
} break;
case DXIL::ResourceClass::Sampler: {
DxilSampler::SamplerKind kind = StringToSamplerKind(RD->getName());
DxilSampler Sampler;
Sampler.SetSamplerKind(kind);
RP = resource_helper::loadPropsFromResourceBase(&Sampler);
} break;
case DXIL::ResourceClass::CBuffer: {
DxilCBuffer CB;
CB.SetGlobalSymbol(UndefValue::get(Ty->getPointerTo()));
if (IsTextureBufferView(resTy, CGM.getContext()))
CB.SetKind(DXIL::ResourceKind::TBuffer);
DxilTypeSystem &typeSys = m_pHLModule->GetTypeSystem();
unsigned arrayEltSize = 0;
QualType ResultTy = hlsl::GetHLSLResourceResultType(resTy);
unsigned Size = AddTypeAnnotation(ResultTy, typeSys, arrayEltSize);
CB.SetSize(Size);
RP = resource_helper::loadPropsFromResourceBase(&CB);
} break;
default:
break;
}
return RP;
}
bool CGMSHLSLRuntime::AddValToPropertyMap(Value *V, QualType Ty) {
return objectProperties.AddResource(V, BuildResourceProperty(Ty));
}
void CGMSHLSLRuntime::ConstructFieldAttributedAnnotation(
DxilFieldAnnotation &fieldAnnotation, QualType fieldTy,
bool bDefaultRowMajor) {
QualType Ty = fieldTy;
if (Ty->isReferenceType())
Ty = Ty.getNonReferenceType();
// Get element type.
Ty = GetArrayEltType(CGM.getContext(), Ty);
QualType EltTy = Ty;
if (hlsl::IsHLSLMatType(Ty)) {
DxilMatrixAnnotation Matrix;
Matrix.Orientation = GetMatrixMajor(Ty, bDefaultRowMajor);
hlsl::GetHLSLMatRowColCount(Ty, Matrix.Rows, Matrix.Cols);
fieldAnnotation.SetMatrixAnnotation(Matrix);
EltTy = hlsl::GetHLSLMatElementType(Ty);
}
if (hlsl::IsHLSLVecType(Ty)) {
unsigned rows, cols;
hlsl::GetRowsAndColsForAny(Ty, rows, cols);
fieldAnnotation.SetVectorSize(cols);
EltTy = hlsl::GetHLSLVecElementType(Ty);
}
if (IsHLSLResourceType(Ty)) {
fieldAnnotation.SetResourceProperties(BuildResourceProperty(Ty));
}
bool bSNorm = false;
bool bUNorm = false;
if (HasHLSLUNormSNorm(Ty, &bSNorm) && !bSNorm)
bUNorm = true;
if (EltTy->isBuiltinType()) {
const BuiltinType *BTy = EltTy->getAs<BuiltinType>();
CompType::Kind kind = BuiltinTyToCompTy(BTy, bSNorm, bUNorm);
fieldAnnotation.SetCompType(kind);
} else if (EltTy->isEnumeralType()) {
const EnumType *ETy = EltTy->getAs<EnumType>();
QualType type = ETy->getDecl()->getIntegerType();
if (const BuiltinType *BTy =
dyn_cast<BuiltinType>(type->getCanonicalTypeInternal()))
fieldAnnotation.SetCompType(BuiltinTyToCompTy(BTy, bSNorm, bUNorm));
} else {
DXASSERT(!bSNorm && !bUNorm,
"snorm/unorm on invalid type, validate at handleHLSLTypeAttr");
}
}
static void ConstructFieldInterpolation(DxilFieldAnnotation &fieldAnnotation,
FieldDecl *fieldDecl) {
// Keep undefined for interpMode here.
InterpolationMode InterpMode = {fieldDecl->hasAttr<HLSLNoInterpolationAttr>(),
fieldDecl->hasAttr<HLSLLinearAttr>(),
fieldDecl->hasAttr<HLSLNoPerspectiveAttr>(),
fieldDecl->hasAttr<HLSLCentroidAttr>(),
fieldDecl->hasAttr<HLSLSampleAttr>()};
if (InterpMode.GetKind() != InterpolationMode::Kind::Undefined)
fieldAnnotation.SetInterpolationMode(InterpMode);
}
static unsigned AlignBaseOffset(unsigned baseOffset, unsigned size, QualType Ty,
bool bDefaultRowMajor) {
// Do not align if resource, since resource isn't really here.
if (IsHLSLResourceType(Ty) || IsHLSLNodeType(Ty))
return baseOffset;
bool needNewAlign = Ty->isArrayType();
if (IsHLSLMatType(Ty)) {
bool bRowMajor = false;
if (!hlsl::HasHLSLMatOrientation(Ty, &bRowMajor))
bRowMajor = bDefaultRowMajor;
unsigned row, col;
hlsl::GetHLSLMatRowColCount(Ty, row, col);
needNewAlign |= !bRowMajor && col > 1;
needNewAlign |= bRowMajor && row > 1;
} else if (Ty->isStructureOrClassType() && !hlsl::IsHLSLVecType(Ty)) {
needNewAlign = true;
}
unsigned scalarSizeInBytes = 4;
const clang::BuiltinType *BT = Ty->getAs<clang::BuiltinType>();
if (hlsl::IsHLSLVecMatType(Ty)) {
BT = hlsl::GetElementTypeOrType(Ty)->getAs<clang::BuiltinType>();
}
if (BT) {
if (BT->getKind() == clang::BuiltinType::Kind::Double ||
BT->getKind() == clang::BuiltinType::Kind::LongLong ||
BT->getKind() == clang::BuiltinType::Kind::ULongLong)
scalarSizeInBytes = 8;
else if (BT->getKind() == clang::BuiltinType::Kind::Half ||
BT->getKind() == clang::BuiltinType::Kind::Short ||
BT->getKind() == clang::BuiltinType::Kind::UShort)
scalarSizeInBytes = 2;
}
return AlignBufferOffsetInLegacy(baseOffset, size, scalarSizeInBytes,
needNewAlign);
}
static unsigned AlignBaseOffset(QualType Ty, unsigned baseOffset,
bool bDefaultRowMajor,
CodeGen::CodeGenModule &CGM,
llvm::DataLayout &layout) {
QualType paramTy = Ty.getCanonicalType();
if (const ReferenceType *RefType = dyn_cast<ReferenceType>(paramTy))
paramTy = RefType->getPointeeType();
// Get size.
llvm::Type *Type = CGM.getTypes().ConvertType(paramTy);
unsigned size = layout.getTypeAllocSize(Type);
return AlignBaseOffset(baseOffset, size, paramTy, bDefaultRowMajor);
}
unsigned CGMSHLSLRuntime::ConstructStructAnnotation(
DxilStructAnnotation *annotation, DxilPayloadAnnotation *payloadAnnotation,
const RecordDecl *RD, DxilTypeSystem &dxilTypeSys) {
unsigned fieldIdx = 0;
unsigned CBufferOffset = 0;
bool bDefaultRowMajor = m_pHLModule->GetHLOptions().bDefaultRowMajor;
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
// If template, save template args
if (const ClassTemplateSpecializationDecl *templateSpecializationDecl =
dyn_cast<ClassTemplateSpecializationDecl>(CXXRD)) {
const clang::TemplateArgumentList &args =
templateSpecializationDecl->getTemplateInstantiationArgs();
for (unsigned i = 0; i < args.size(); ++i) {
DxilTemplateArgAnnotation &argAnnotation =
annotation->GetTemplateArgAnnotation(i);
const clang::TemplateArgument &arg = args[i];
switch (arg.getKind()) {
case clang::TemplateArgument::ArgKind::Type:
argAnnotation.SetType(CGM.getTypes().ConvertType(arg.getAsType()));
break;
case clang::TemplateArgument::ArgKind::Integral:
argAnnotation.SetIntegral(arg.getAsIntegral().getExtValue());
break;
default:
break;
}
}
}
if (CXXRD->getNumBases()) {
// Add base as field.
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
std::string fieldSemName = "";
QualType parentTy = QualType(BaseDecl->getTypeForDecl(), 0);
// Process field to make sure the size of field is ready.
unsigned arrayEltSize = 0;
unsigned size = AddTypeAnnotation(parentTy, dxilTypeSys, arrayEltSize);
// Align offset.
if (size)
CBufferOffset = AlignBaseOffset(parentTy, CBufferOffset,
bDefaultRowMajor, CGM, dataLayout);
llvm::StructType *baseType =
cast<llvm::StructType>(CGM.getTypes().ConvertType(parentTy));
DxilStructAnnotation *baseAnnotation =
dxilTypeSys.GetStructAnnotation(baseType);
if (size || (baseAnnotation && !baseAnnotation->IsEmptyStruct())) {
DxilFieldAnnotation &fieldAnnotation =
annotation->GetFieldAnnotation(fieldIdx++);
fieldAnnotation.SetCBufferOffset(CBufferOffset);
fieldAnnotation.SetFieldName(BaseDecl->getNameAsString());
}
// Update offset.
CBufferOffset += size;
}
}
}
unsigned CBufferSize = CBufferOffset;
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end();
Field != FieldEnd;) {
if (Field->isBitField()) {
// TODO(?): Consider refactoring, as this branch duplicates much
// of the logic of CGRecordLowering::accumulateBitFields().
DXASSERT(CGM.getLangOpts().HLSLVersion > hlsl::LangStd::v2015,
"We should have already ensured we have no bitfields.");
CodeGenTypes &Types = CGM.getTypes();
ASTContext &Context = Types.getContext();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const llvm::DataLayout &DataLayout = Types.getDataLayout();
RecordDecl::field_iterator End = Field;
for (++End; End != FieldEnd && End->isBitField(); ++End)
;
std::vector<DxilFieldAnnotation> BitFields;
RecordDecl::field_iterator Run = End;
uint64_t StartBitOffset = Layout.getFieldOffset(Field->getFieldIndex());
uint64_t Tail = 0;
for (; Field != End; ++Field) {
uint64_t BitOffset = Layout.getFieldOffset(Field->getFieldIndex());
// Zero-width bitfields end runs.
if (Field->getBitWidthValue(Context) == 0) {
Run = End;
continue;
}
llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == End || BitOffset >= Tail) {
// Add BitFields to current field.
if (BitOffset >= Tail && BitOffset > 0) {
DxilFieldAnnotation &curFieldAnnotation =
annotation->GetFieldAnnotation(fieldIdx - 1);
curFieldAnnotation.SetBitFields(BitFields);
BitFields.clear();
}
Run = Field;
StartBitOffset = BitOffset;
Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
QualType fieldTy = Field->getType();
// Align offset.
CBufferOffset = AlignBaseOffset(fieldTy, CBufferOffset,
bDefaultRowMajor, CGM, dataLayout);
DxilFieldAnnotation &fieldAnnotation =
annotation->GetFieldAnnotation(fieldIdx++);
ConstructFieldAttributedAnnotation(fieldAnnotation, fieldTy,
bDefaultRowMajor);
fieldAnnotation.SetCBufferOffset(CBufferOffset);
unsigned arrayEltSize = 0;
// Process field to make sure the size of field is ready.
unsigned size = AddTypeAnnotation(fieldTy, dxilTypeSys, arrayEltSize);
// Update offset.
CBufferOffset += size;
}
DxilFieldAnnotation bitfieldAnnotation;
bitfieldAnnotation.SetBitFieldWidth(Field->getBitWidthValue(Context));
QualType FieldTy = Field->getType().getCanonicalType();
const BuiltinType *BTy = FieldTy->getAs<BuiltinType>();
if (!BTy) {
// Should be enum type.
EnumDecl *Decl = FieldTy->getAs<EnumType>()->getDecl();
BTy = Decl->getPromotionType()->getAs<BuiltinType>();
}
CompType::Kind kind =
BuiltinTyToCompTy(BTy, /*bSNorm*/ false, /*bUNorm*/ false);
bitfieldAnnotation.SetCompType(kind);
bitfieldAnnotation.SetFieldName(Field->getName());
bitfieldAnnotation.SetCBufferOffset(
(unsigned)(BitOffset - StartBitOffset));
BitFields.emplace_back(bitfieldAnnotation);
}
if (!BitFields.empty()) {
DxilFieldAnnotation &curFieldAnnotation =
annotation->GetFieldAnnotation(fieldIdx - 1);
curFieldAnnotation.SetBitFields(BitFields);
BitFields.clear();
}
CBufferSize = CBufferOffset;
continue; // Field has already been advanced past bitfields
}
FieldDecl *fieldDecl = *Field;
std::string fieldSemName = "";
QualType fieldTy = fieldDecl->getType();
// Align offset.
CBufferOffset = AlignBaseOffset(fieldTy, CBufferOffset, bDefaultRowMajor,
CGM, dataLayout);
DxilFieldAnnotation &fieldAnnotation =
annotation->GetFieldAnnotation(fieldIdx++);
ConstructFieldAttributedAnnotation(fieldAnnotation, fieldTy,
bDefaultRowMajor);
// Try to get info from fieldDecl.
const hlsl::ConstantPacking *packOffset = nullptr;
for (const hlsl::UnusualAnnotation *it :
fieldDecl->getUnusualAnnotations()) {
switch (it->getKind()) {
case hlsl::UnusualAnnotation::UA_SemanticDecl: {
const hlsl::SemanticDecl *sd = cast<hlsl::SemanticDecl>(it);
fieldSemName = sd->SemanticName;
} break;
case hlsl::UnusualAnnotation::UA_ConstantPacking: {
packOffset = cast<hlsl::ConstantPacking>(it);
CBufferOffset = packOffset->Subcomponent << 2;
CBufferOffset += packOffset->ComponentOffset;
// Change to byte.
CBufferOffset <<= 2;
} break;
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
// register assignment only works on global constant.
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"location semantics cannot be specified on members.");
Diags.Report(it->Loc, DiagID);
return 0;
} break;
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier: {
// Forward payload access qualifiers to fieldAnnotation.
if (payloadAnnotation) {
const hlsl::PayloadAccessAnnotation *annotation =
cast<hlsl::PayloadAccessAnnotation>(it);
DxilPayloadFieldAnnotation &payloadFieldAnnotation =
payloadAnnotation->GetFieldAnnotation(fieldIdx - 1);
payloadFieldAnnotation.SetCompType(
fieldAnnotation.GetCompType().GetKind());
for (auto stage : annotation->ShaderStages) {
payloadFieldAnnotation.AddPayloadFieldQualifier(
stage, annotation->qualifier);
}
}
} break;
default:
llvm_unreachable("only semantic for input/output");
break;
}
}
// Process field to make sure the size of field is ready.
unsigned arrayEltSize = 0;
unsigned size =
AddTypeAnnotation(fieldDecl->getType(), dxilTypeSys, arrayEltSize);
// Align offset.
if (size) {
unsigned offset = AlignBaseOffset(fieldTy, CBufferOffset,
bDefaultRowMajor, CGM, dataLayout);
if (packOffset && CBufferOffset != offset) {
// custom offset has an alignment problem, or this code does
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"custom offset mis-aligned.");
Diags.Report(packOffset->Loc, DiagID);
return 0;
}
CBufferOffset = offset;
}
ConstructFieldInterpolation(fieldAnnotation, fieldDecl);
if (fieldDecl->hasAttr<HLSLPreciseAttr>())
fieldAnnotation.SetPrecise();
fieldAnnotation.SetCBufferOffset(CBufferOffset);
fieldAnnotation.SetFieldName(fieldDecl->getName());
if (!fieldSemName.empty()) {
fieldAnnotation.SetSemanticString(fieldSemName);
if (m_PreciseOutputSet.count(StringRef(fieldSemName).lower()))
fieldAnnotation.SetPrecise();
}
// Update offset.
CBufferSize = std::max(CBufferSize, CBufferOffset + size);
CBufferOffset = CBufferSize;
++Field;
}
annotation->SetCBufferSize(CBufferSize);
dxilTypeSys.FinishStructAnnotation(*annotation);
return CBufferSize;
}
static bool IsElementInputOutputType(QualType Ty) {
return Ty->isBuiltinType() || hlsl::IsHLSLVecMatType(Ty) ||
Ty->isEnumeralType();
}
static unsigned GetNumTemplateArgsForRecordDecl(const RecordDecl *RD) {
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (const ClassTemplateSpecializationDecl *templateSpecializationDecl =
dyn_cast<ClassTemplateSpecializationDecl>(CXXRD)) {
const clang::TemplateArgumentList &args =
templateSpecializationDecl->getTemplateInstantiationArgs();
return args.size();
}
}
return 0;
}
static bool ValidatePayloadDecl(const RecordDecl *Decl,
const ShaderModel &Model,
DiagnosticsEngine &Diag,
const CodeGenOptions &Options) {
// Already checked in Sema, this is not a payload.
if (!Decl->hasAttr<HLSLRayPayloadAttr>())
return false;
// If we have a payload warn about them beeing dropped.
if (!Options.HLSLEnablePayloadAccessQualifiers) {
Diag.ReportOnce(Decl->getLocation(),
diag::warn_hlsl_payload_qualifer_dropped);
return false;
}
// Check if all fileds have a payload qualifier.
bool allFieldsQualifed = true;
for (FieldDecl *field : Decl->fields()) {
bool fieldHasPayloadQualifier = false;
bool isPayloadStruct = false;
for (UnusualAnnotation *annotation : field->getUnusualAnnotations()) {
fieldHasPayloadQualifier |=
isa<hlsl::PayloadAccessAnnotation>(annotation);
}
// Check if this is a struct type.
// If it is, check for the [payload] field, [payload] structs must carry
// PayloadAccessQualifiers and these are taken from the struct directly.
// If it is not a payload struct, check if it has qualifiers attached.
if (RecordDecl *recordTy = field->getType()->getAsCXXRecordDecl()) {
if (recordTy->hasAttr<HLSLRayPayloadAttr>())
isPayloadStruct = true;
}
if (fieldHasPayloadQualifier && isPayloadStruct) {
Diag.Report(field->getLocation(),
diag::err_payload_fields_is_payload_and_overqualified)
<< field->getName();
continue;
} else {
if (isPayloadStruct)
fieldHasPayloadQualifier = true;
}
if (!fieldHasPayloadQualifier) {
Diag.Report(field->getLocation(), diag::err_payload_fields_not_qualified)
<< field->getName();
}
allFieldsQualifed &= fieldHasPayloadQualifier;
}
if (!allFieldsQualifed) {
Diag.Report(Decl->getLocation(), diag::err_not_all_payload_fields_qualified)
<< Decl->getName();
return false;
}
return true;
}
// Return the size for constant buffer of each decl.
unsigned CGMSHLSLRuntime::AddTypeAnnotation(QualType Ty,
DxilTypeSystem &dxilTypeSys,
unsigned &arrayEltSize) {
if (Ty.isNull())
return 0;
QualType paramTy = Ty.getCanonicalType();
if (const ReferenceType *RefType = dyn_cast<ReferenceType>(paramTy))
paramTy = RefType->getPointeeType();
// Get size.
llvm::Type *Type = CGM.getTypes().ConvertType(paramTy);
unsigned size = dataLayout.getTypeAllocSize(Type);
if (IsHLSLMatType(Ty)) {
llvm::Type *EltTy = HLMatrixType::cast(Type).getElementTypeForReg();
bool b64Bit = dataLayout.getTypeAllocSize(EltTy) == 8;
size = GetMatrixSizeInCB(Ty, m_pHLModule->GetHLOptions().bDefaultRowMajor,
b64Bit);
}
// Skip element types.
if (IsElementInputOutputType(paramTy))
return size;
else if (IsHLSLStreamOutputType(Ty)) {
return AddTypeAnnotation(GetHLSLOutputPatchElementType(Ty), dxilTypeSys,
arrayEltSize);
} else if (IsHLSLInputPatchType(Ty))
return AddTypeAnnotation(GetHLSLInputPatchElementType(Ty), dxilTypeSys,
arrayEltSize);
else if (IsHLSLOutputPatchType(Ty))
return AddTypeAnnotation(GetHLSLOutputPatchElementType(Ty), dxilTypeSys,
arrayEltSize);
else if (!IsHLSLStructuredBufferType(Ty) && IsHLSLResourceType(Ty)) {
// Save result type info.
AddTypeAnnotation(GetHLSLResourceResultType(Ty), dxilTypeSys, arrayEltSize);
// Resources don't count towards cbuffer size.
return 0;
} else if (const RecordType *RT = paramTy->getAs<RecordType>()) {
// For this pointer.
RecordDecl *RD = RT->getDecl();
llvm::StructType *ST = CGM.getTypes().ConvertRecordDeclType(RD);
// Skip if already created.
if (DxilStructAnnotation *annotation =
dxilTypeSys.GetStructAnnotation(ST)) {
unsigned structSize = annotation->GetCBufferSize();
return structSize;
}
DxilStructAnnotation *annotation = dxilTypeSys.AddStructAnnotation(
ST, GetNumTemplateArgsForRecordDecl(RT->getDecl()));
DxilPayloadAnnotation *payloadAnnotation = nullptr;
if (ValidatePayloadDecl(RT->getDecl(), *m_pHLModule->GetShaderModel(),
CGM.getDiags(), CGM.getCodeGenOpts()))
payloadAnnotation = dxilTypeSys.AddPayloadAnnotation(ST);
unsigned size = ConstructStructAnnotation(annotation, payloadAnnotation, RD,
dxilTypeSys);
// Resources don't count towards cbuffer size.
return IsHLSLResourceType(Ty) ? 0 : size;
} else if (IsStringType(Ty)) {
// string won't be included in cbuffer
return 0;
} else {
unsigned arraySize = 0;
QualType arrayElementTy = Ty;
if (Ty->isConstantArrayType()) {
const ConstantArrayType *arrayTy =
CGM.getContext().getAsConstantArrayType(Ty);
DXASSERT(arrayTy != nullptr, "Must array type here");
arraySize = arrayTy->getSize().getLimitedValue();
arrayElementTy = arrayTy->getElementType();
} else if (Ty->isIncompleteArrayType()) {
const IncompleteArrayType *arrayTy =
CGM.getContext().getAsIncompleteArrayType(Ty);
arrayElementTy = arrayTy->getElementType();
} else {
DXASSERT(0, "Must array type here");
}
unsigned elementSize =
AddTypeAnnotation(arrayElementTy, dxilTypeSys, arrayEltSize);
// Only set arrayEltSize once.
if (arrayEltSize == 0)
arrayEltSize = elementSize;
// Align to 4 * 4bytes.
unsigned alignedSize = (elementSize + 15) & 0xfffffff0;
return alignedSize * (arraySize - 1) + elementSize;
}
}
static DxilResource::Kind KeywordToKind(StringRef keyword) {
// TODO: refactor for faster search (switch by 1/2/3 first letters, then
// compare)
if (keyword == "Texture1D" || keyword == "RWTexture1D" ||
keyword == "RasterizerOrderedTexture1D")
return DxilResource::Kind::Texture1D;
if (keyword == "Texture2D" || keyword == "RWTexture2D" ||
keyword == "RasterizerOrderedTexture2D")
return DxilResource::Kind::Texture2D;
if (keyword == "Texture2DMS" || keyword == "RWTexture2DMS")
return DxilResource::Kind::Texture2DMS;
if (keyword == "FeedbackTexture2D")
return DxilResource::Kind::FeedbackTexture2D;
if (keyword == "Texture3D" || keyword == "RWTexture3D" ||
keyword == "RasterizerOrderedTexture3D")
return DxilResource::Kind::Texture3D;
if (keyword == "TextureCube" || keyword == "RWTextureCube")
return DxilResource::Kind::TextureCube;
if (keyword == "Texture1DArray" || keyword == "RWTexture1DArray" ||
keyword == "RasterizerOrderedTexture1DArray")
return DxilResource::Kind::Texture1DArray;
if (keyword == "Texture2DArray" || keyword == "RWTexture2DArray" ||
keyword == "RasterizerOrderedTexture2DArray")
return DxilResource::Kind::Texture2DArray;
if (keyword == "FeedbackTexture2DArray")
return DxilResource::Kind::FeedbackTexture2DArray;
if (keyword == "Texture2DMSArray" || keyword == "RWTexture2DMSArray")
return DxilResource::Kind::Texture2DMSArray;
if (keyword == "TextureCubeArray" || keyword == "RWTextureCubeArray")
return DxilResource::Kind::TextureCubeArray;
if (keyword == "ByteAddressBuffer" || keyword == "RWByteAddressBuffer" ||
keyword == "RasterizerOrderedByteAddressBuffer")
return DxilResource::Kind::RawBuffer;
if (keyword == "StructuredBuffer" || keyword == "RWStructuredBuffer" ||
keyword == "RasterizerOrderedStructuredBuffer")
return DxilResource::Kind::StructuredBuffer;
if (keyword == "AppendStructuredBuffer" ||
keyword == "ConsumeStructuredBuffer")
return DxilResource::Kind::StructuredBuffer;
// TODO: this is not efficient.
bool isBuffer = keyword == "Buffer";
isBuffer |= keyword == "RWBuffer";
isBuffer |= keyword == "RasterizerOrderedBuffer";
if (isBuffer)
return DxilResource::Kind::TypedBuffer;
if (keyword == "RaytracingAccelerationStructure")
return DxilResource::Kind::RTAccelerationStructure;
return DxilResource::Kind::Invalid;
}
void CGMSHLSLRuntime::AddHLSLFunctionInfo(Function *F, const FunctionDecl *FD) {
// Add hlsl intrinsic attr
unsigned intrinsicOpcode;
StringRef intrinsicGroup;
if (hlsl::GetIntrinsicOp(FD, intrinsicOpcode, intrinsicGroup)) {
AddHLSLIntrinsicOpcodeToFunction(F, intrinsicOpcode);
F->addFnAttr(hlsl::HLPrefix, intrinsicGroup);
StringRef lower;
if (hlsl::GetIntrinsicLowering(FD, lower))
hlsl::SetHLLowerStrategy(F, lower);
if (FD->hasAttr<HLSLWaveSensitiveAttr>())
hlsl::SetHLWaveSensitive(F);
// Don't need to add FunctionQual for intrinsic function.
return;
}
if (m_pHLModule->GetFloat32DenormMode() == DXIL::Float32DenormMode::FTZ) {
F->addFnAttr(DXIL::kFP32DenormKindString, DXIL::kFP32DenormValueFtzString);
} else if (m_pHLModule->GetFloat32DenormMode() ==
DXIL::Float32DenormMode::Preserve) {
F->addFnAttr(DXIL::kFP32DenormKindString,
DXIL::kFP32DenormValuePreserveString);
} else if (m_pHLModule->GetFloat32DenormMode() ==
DXIL::Float32DenormMode::Any) {
F->addFnAttr(DXIL::kFP32DenormKindString, DXIL::kFP32DenormValueAnyString);
}
// Set entry function
const ShaderModel *SM = m_pHLModule->GetShaderModel();
const std::string &entryName = m_pHLModule->GetEntryFunctionName();
bool isEntry =
!SM->IsLib() &&
FD->getDeclContext()->getDeclKind() == Decl::Kind::TranslationUnit &&
FD->getNameAsString() == entryName;
if (isEntry) {
Entry.Func = F;
Entry.SL = FD->getLocation();
}
DiagnosticsEngine &Diags = CGM.getDiags();
std::unique_ptr<DxilFunctionProps> funcProps =
llvm::make_unique<DxilFunctionProps>();
funcProps->shaderKind = DXIL::ShaderKind::Invalid;
funcProps->Node.LaunchType = DXIL::NodeLaunchType::Invalid;
bool isCS = false;
bool isGS = false;
bool isHS = false;
bool isDS = false;
bool isVS = false;
bool isPS = false;
bool isRay = false;
bool isMS = false;
bool isAS = false;
bool isNode = false;
// SetStageFlag returns true if valid as function attribute
auto SetStageFlag = [&](DXIL::ShaderKind shaderKind) -> bool {
switch (shaderKind) {
case DXIL::ShaderKind::Pixel:
isPS = true;
break;
case DXIL::ShaderKind::Vertex:
isVS = true;
break;
case DXIL::ShaderKind::Geometry:
isGS = true;
break;
case DXIL::ShaderKind::Hull:
isHS = true;
break;
case DXIL::ShaderKind::Domain:
isDS = true;
break;
case DXIL::ShaderKind::Compute:
isCS = true;
break;
case DXIL::ShaderKind::Mesh:
isMS = true;
break;
case DXIL::ShaderKind::Amplification:
isAS = true;
break;
case DXIL::ShaderKind::Node:
isNode = true;
break;
case DXIL::ShaderKind::ClosestHit:
case DXIL::ShaderKind::Callable:
case DXIL::ShaderKind::RayGeneration:
case DXIL::ShaderKind::Intersection:
case DXIL::ShaderKind::AnyHit:
case DXIL::ShaderKind::Miss:
isRay = true;
break;
case DXIL::ShaderKind::Library:
default:
return false;
}
return true;
};
clang::SourceLocation priorShaderAttrLoc;
enum class ShaderStageSource : unsigned {
Attribute,
Profile,
};
// Some diagnostic assumptions for shader attribute:
// - duplicate attribute of same kind is ok
// - all attributes parsed before set from insertion or target shader model
auto DiagShaderStage = [&priorShaderAttrLoc,
&Diags](clang::SourceLocation diagLoc,
llvm::StringRef shaderStage,
ShaderStageSource source) {
bool bFromProfile = source == ShaderStageSource::Profile;
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "Invalid shader %select{profile|attribute}0");
Diags.Report(diagLoc, DiagID) << bFromProfile;
if (priorShaderAttrLoc.isValid()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Note, "See conflicting shader attribute");
Diags.Report(priorShaderAttrLoc, DiagID);
}
};
auto SetShaderKind =
[&](clang::SourceLocation diagLoc, DXIL::ShaderKind shaderKind,
llvm::StringRef shaderStage, ShaderStageSource source) {
if (!SetStageFlag(shaderKind)) {
DiagShaderStage(diagLoc, shaderStage, source);
}
if (isEntry && isRay) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"Ray function cannot be used as a global entry point");
Diags.Report(diagLoc, DiagID);
}
// Update shaderKind, unless we would be overriding one with node, so
// when node+compute, kind = compute. Other conflicts are diagnosed
// above.
if (funcProps->shaderKind == DXIL::ShaderKind::Invalid ||
shaderKind != DXIL::ShaderKind::Node)
funcProps->shaderKind = shaderKind;
};
// Used when a function attribute implies a particular stage.
// This will emit an error if the stage it implies conflicts with a stage set
// from some other source.
auto CheckImpliedShaderStageAttr =
[&SetShaderKind](clang::SourceLocation diagLoc,
DXIL::ShaderKind shaderKind) {
SetShaderKind(diagLoc, shaderKind, "", ShaderStageSource::Attribute);
};
auto ParseShaderStage = [&SetShaderKind](clang::SourceLocation diagLoc,
llvm::StringRef shaderStage,
ShaderStageSource source) {
if (!shaderStage.empty()) {
DXIL::ShaderKind shaderKind = ShaderModel::KindFromFullName(shaderStage);
SetShaderKind(diagLoc, shaderKind, shaderStage, source);
}
};
// Parse all shader attributes and report conflicts.
for (auto *Attr : FD->specific_attrs<HLSLShaderAttr>()) {
ParseShaderStage(Attr->getLocation(), Attr->getStage(),
ShaderStageSource::Attribute);
priorShaderAttrLoc = Attr->getLocation();
}
if (isEntry) {
// Set shaderKind from the shader target profile
SetShaderKind(FD->getLocation(), SM->GetKind(), "",
ShaderStageSource::Profile);
}
// Save patch constant function to patchConstantFunctionMap.
bool isPatchConstantFunction = false;
if (!isEntry && CGM.getContext().IsPatchConstantFunctionDecl(FD)) {
isPatchConstantFunction = true;
auto &PCI = patchConstantFunctionMap[FD->getName()];
PCI.SL = FD->getLocation();
PCI.Func = F;
++PCI.NumOverloads;
for (ParmVarDecl *parmDecl : FD->parameters()) {
QualType Ty = parmDecl->getType();
if (IsHLSLOutputPatchType(Ty)) {
funcProps->ShaderProps.HS.outputControlPoints =
GetHLSLOutputPatchCount(parmDecl->getType());
} else if (IsHLSLInputPatchType(Ty)) {
funcProps->ShaderProps.HS.inputControlPoints =
GetHLSLInputPatchCount(parmDecl->getType());
}
}
// Mark patch constant functions that cannot be linked as exports
// InternalLinkage. Patch constant functions that are actually used
// will be set back to ExternalLinkage in FinishCodeGen.
if (funcProps->ShaderProps.HS.outputControlPoints ||
funcProps->ShaderProps.HS.inputControlPoints) {
PCI.Func->setLinkage(GlobalValue::InternalLinkage);
}
funcProps->shaderKind = DXIL::ShaderKind::Hull;
}
if (FD->hasAttr<HLSLWaveOpsIncludeHelperLanesAttr>()) {
if (SM->IsSM67Plus() &&
(funcProps->shaderKind == DXIL::ShaderKind::Pixel ||
(isEntry && SM->GetKind() == DXIL::ShaderKind::Pixel)))
F->addFnAttr(DXIL::kWaveOpsIncludeHelperLanesString);
}
// Geometry shader.
if (const HLSLMaxVertexCountAttr *Attr =
FD->getAttr<HLSLMaxVertexCountAttr>()) {
CheckImpliedShaderStageAttr(Attr->getLocation(),
DXIL::ShaderKind::Geometry);
funcProps->ShaderProps.GS.maxVertexCount = Attr->getCount();
funcProps->ShaderProps.GS.inputPrimitive = DXIL::InputPrimitive::Undefined;
if (isEntry && !SM->IsGS()) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"attribute maxvertexcount only valid for GS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
}
if (const HLSLInstanceAttr *Attr = FD->getAttr<HLSLInstanceAttr>()) {
CheckImpliedShaderStageAttr(Attr->getLocation(),
DXIL::ShaderKind::Geometry);
unsigned instanceCount = Attr->getCount();
funcProps->ShaderProps.GS.instanceCount = instanceCount;
if (isEntry && !SM->IsGS()) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"attribute maxvertexcount only valid for GS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
} else {
// Set default instance count.
if (isGS)
funcProps->ShaderProps.GS.instanceCount = 1;
}
// Populate numThreads
if (const HLSLNumThreadsAttr *Attr = FD->getAttr<HLSLNumThreadsAttr>()) {
funcProps->numThreads[0] = Attr->getX();
funcProps->numThreads[1] = Attr->getY();
funcProps->numThreads[2] = Attr->getZ();
if (isEntry && !SM->IsCS() && !SM->IsMS() && !SM->IsAS()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"attribute numthreads only valid for CS/MS/AS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
}
// Hull shader.
if (const HLSLPatchConstantFuncAttr *Attr =
FD->getAttr<HLSLPatchConstantFuncAttr>()) {
if (isEntry && !SM->IsHS()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"attribute patchconstantfunc only valid for HS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
CheckImpliedShaderStageAttr(Attr->getLocation(), DXIL::ShaderKind::Hull);
HSEntryPatchConstantFuncAttr[F] = Attr;
} else {
// TODO: This is a duplicate check. We also have this check in
// hlsl::DiagnoseTranslationUnit(clang::Sema*).
if (isEntry && SM->IsHS()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"HS entry point must have a valid patchconstantfunc attribute");
Diags.Report(FD->getLocation(), DiagID);
return;
}
}
if (const HLSLOutputControlPointsAttr *Attr =
FD->getAttr<HLSLOutputControlPointsAttr>()) {
if (isHS) {
funcProps->ShaderProps.HS.outputControlPoints = Attr->getCount();
} else if (isEntry && !SM->IsHS()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"attribute outputcontrolpoints only valid for HS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
}
if (const HLSLPartitioningAttr *Attr = FD->getAttr<HLSLPartitioningAttr>()) {
if (isHS) {
DXIL::TessellatorPartitioning partition =
StringToPartitioning(Attr->getScheme());
funcProps->ShaderProps.HS.partition = partition;
} else if (isEntry && !SM->IsHS()) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Warning,
"attribute partitioning only valid for HS.");
Diags.Report(Attr->getLocation(), DiagID);
}
}
if (const HLSLOutputTopologyAttr *Attr =
FD->getAttr<HLSLOutputTopologyAttr>()) {
if (isHS) {
DXIL::TessellatorOutputPrimitive primitive =
StringToTessOutputPrimitive(Attr->getTopology());
funcProps->ShaderProps.HS.outputPrimitive = primitive;
} else if (isMS) {
DXIL::MeshOutputTopology topology =
StringToMeshOutputTopology(Attr->getTopology());
funcProps->ShaderProps.MS.outputTopology = topology;
} else if (isEntry && !SM->IsHS() && !SM->IsMS()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Warning,
"attribute outputtopology only valid for HS and MS.");
Diags.Report(Attr->getLocation(), DiagID);
}
}
if (isHS) {
funcProps->ShaderProps.HS.maxTessFactor = DXIL::kHSMaxTessFactorUpperBound;
funcProps->ShaderProps.HS.inputControlPoints =
DXIL::kHSDefaultInputControlPointCount;
}
if (const HLSLMaxTessFactorAttr *Attr =
FD->getAttr<HLSLMaxTessFactorAttr>()) {
if (isHS) {
// TODO: change getFactor to return float.
llvm::APInt intV(32, Attr->getFactor());
funcProps->ShaderProps.HS.maxTessFactor = intV.bitsToFloat();
} else if (isEntry && !SM->IsHS()) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"attribute maxtessfactor only valid for HS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
}
// Hull or domain shader.
if (const HLSLDomainAttr *Attr = FD->getAttr<HLSLDomainAttr>()) {
if (isEntry && !SM->IsHS() && !SM->IsDS()) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"attribute domain only valid for HS or DS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
if (!isHS)
CheckImpliedShaderStageAttr(Attr->getLocation(),
DXIL::ShaderKind::Domain);
DXIL::TessellatorDomain domain = StringToDomain(Attr->getDomainType());
if (isHS)
funcProps->ShaderProps.HS.domain = domain;
else
funcProps->ShaderProps.DS.domain = domain;
}
// Vertex shader.
if (const HLSLClipPlanesAttr *Attr = FD->getAttr<HLSLClipPlanesAttr>()) {
if (isEntry && !SM->IsVS()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "attribute clipplane only valid for VS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
// The real job is done at EmitHLSLFunctionProlog where debug info is
// available. Only set shader kind here.
CheckImpliedShaderStageAttr(Attr->getLocation(), DXIL::ShaderKind::Vertex);
}
// Pixel shader.
if (const HLSLEarlyDepthStencilAttr *Attr =
FD->getAttr<HLSLEarlyDepthStencilAttr>()) {
if (isEntry && !SM->IsPS()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"attribute earlydepthstencil only valid for PS.");
Diags.Report(Attr->getLocation(), DiagID);
return;
}
CheckImpliedShaderStageAttr(Attr->getLocation(), DXIL::ShaderKind::Pixel);
funcProps->ShaderProps.PS.EarlyDepthStencil = true;
}
if (const HLSLWaveSizeAttr *Attr = FD->getAttr<HLSLWaveSizeAttr>()) {
funcProps->WaveSize = DxilWaveSize::Translate(
Attr->getMin(), Attr->getMax(), Attr->getPreferred());
}
// Node shader
if (isNode) {
// Default launch type is defined to be Broadcasting.
funcProps->Node.LaunchType = DXIL::NodeLaunchType::Broadcasting;
// Assign function properties for all "node" attributes.
if (const auto *pAttr = FD->getAttr<HLSLNodeLaunchAttr>()) {
funcProps->Node.LaunchType =
ShaderModel::NodeLaunchTypeFromName(pAttr->getLaunchType());
}
if (const auto *pAttr = FD->getAttr<HLSLNodeIsProgramEntryAttr>()) {
funcProps->Node.IsProgramEntry = true;
}
if (const auto *pAttr = FD->getAttr<HLSLNodeIdAttr>()) {
funcProps->NodeShaderID.Name = pAttr->getName().str();
funcProps->NodeShaderID.Index = pAttr->getArrayIndex();
} else {
funcProps->NodeShaderID.Name = FD->getName().str();
funcProps->NodeShaderID.Index = 0;
}
if (const auto *pAttr =
FD->getAttr<HLSLNodeLocalRootArgumentsTableIndexAttr>()) {
funcProps->Node.LocalRootArgumentsTableIndex = pAttr->getIndex();
}
if (const auto *pAttr = FD->getAttr<HLSLNodeShareInputOfAttr>()) {
funcProps->NodeShaderSharedInput.Name = pAttr->getName().str();
funcProps->NodeShaderSharedInput.Index = pAttr->getArrayIndex();
}
if (const auto *pAttr = FD->getAttr<HLSLNodeDispatchGridAttr>()) {
funcProps->Node.DispatchGrid[0] = pAttr->getX();
funcProps->Node.DispatchGrid[1] = pAttr->getY();
funcProps->Node.DispatchGrid[2] = pAttr->getZ();
}
if (const auto *pAttr = FD->getAttr<HLSLNodeMaxDispatchGridAttr>()) {
funcProps->Node.MaxDispatchGrid[0] = pAttr->getX();
funcProps->Node.MaxDispatchGrid[1] = pAttr->getY();
funcProps->Node.MaxDispatchGrid[2] = pAttr->getZ();
}
if (const auto *pAttr = FD->getAttr<HLSLNodeMaxRecursionDepthAttr>()) {
funcProps->Node.MaxRecursionDepth = pAttr->getCount();
}
if (!FD->getAttr<HLSLNumThreadsAttr>()) {
// NumThreads wasn't specified.
// For a Thread launch node the default is (1,1,1,) which we set here.
// Other node launch types require NumThreads and an error will have
// been generated earlier.
funcProps->numThreads[0] = 1;
funcProps->numThreads[1] = 1;
funcProps->numThreads[2] = 1;
}
}
const unsigned profileAttributes =
isCS + isHS + isDS + isGS + isVS + isPS + isRay + isMS + isAS + isNode;
if (isEntry) {
switch (funcProps->shaderKind) {
case ShaderModel::Kind::Compute:
case ShaderModel::Kind::Hull:
case ShaderModel::Kind::Domain:
case ShaderModel::Kind::Geometry:
case ShaderModel::Kind::Vertex:
case ShaderModel::Kind::Pixel:
case ShaderModel::Kind::Mesh:
case ShaderModel::Kind::Amplification:
DXASSERT(funcProps->shaderKind == SM->GetKind(),
"attribute profile not match entry function profile");
break;
case ShaderModel::Kind::Library:
case ShaderModel::Kind::Invalid:
// Non-shader stage shadermodels don't have entry points.
break;
}
}
DxilFunctionAnnotation *FuncAnnotation =
m_pHLModule->AddFunctionAnnotation(F);
bool bDefaultRowMajor = m_pHLModule->GetHLOptions().bDefaultRowMajor;
// Param Info
unsigned streamIndex = 0;
unsigned inputPatchCount = 0;
unsigned outputPatchCount = 0;
unsigned ArgNo = 0;
unsigned ParmIdx = 0;
auto ArgIt = F->arg_begin();
if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(FD)) {
if (MethodDecl->isInstance()) {
QualType ThisTy = MethodDecl->getThisType(FD->getASTContext());
DxilParameterAnnotation ¶mAnnotation =
FuncAnnotation->GetParameterAnnotation(ArgNo++);
++ArgIt;
// Construct annoation for this pointer.
ConstructFieldAttributedAnnotation(paramAnnotation, ThisTy,
bDefaultRowMajor);
if (MethodDecl->isConst()) {
paramAnnotation.SetParamInputQual(DxilParamInputQual::In);
} else {
paramAnnotation.SetParamInputQual(DxilParamInputQual::Inout);
}
}
}
// Ret Info
QualType retTy = FD->getReturnType();
DxilParameterAnnotation *pRetTyAnnotation = nullptr;
if (F->getReturnType()->isVoidTy() && !retTy->isVoidType()) {
// SRet.
pRetTyAnnotation = &FuncAnnotation->GetParameterAnnotation(ArgNo++);
// Save resource properties for parameters.
AddValToPropertyMap(ArgIt, retTy);
++ArgIt;
} else {
pRetTyAnnotation = &FuncAnnotation->GetRetTypeAnnotation();
}
DxilParameterAnnotation &retTyAnnotation = *pRetTyAnnotation;
// keep Undefined here, we cannot decide for struct
retTyAnnotation.SetInterpolationMode(
GetInterpMode(FD, CompType::Kind::Invalid, /*bKeepUndefined*/ true)
.GetKind());
SourceLocation retTySemanticLoc = SetSemantic(FD, retTyAnnotation);
retTyAnnotation.SetParamInputQual(DxilParamInputQual::Out);
if (isEntry) {
if (CGM.getLangOpts().EnableDX9CompatMode &&
retTyAnnotation.HasSemanticString()) {
RemapObsoleteSemantic(retTyAnnotation, /*isPatchConstantFunction*/ false);
}
CheckParameterAnnotation(retTySemanticLoc, retTyAnnotation,
/*isPatchConstantFunction*/ false);
}
ConstructFieldAttributedAnnotation(retTyAnnotation, retTy, bDefaultRowMajor);
if (FD->hasAttr<HLSLPreciseAttr>())
retTyAnnotation.SetPrecise();
if (isRay) {
funcProps->ShaderProps.Ray.payloadSizeInBytes = 0;
funcProps->ShaderProps.Ray.attributeSizeInBytes = 0;
}
bool hasOutIndices = false;
bool hasOutVertices = false;
bool hasOutPrimitives = false;
bool hasInPayload = false;
bool rayShaderHaveErrors = false;
unsigned int NodeInputParamIdx = 0;
unsigned int NodeOutputParamIdx = 0;
SmallMapVector<StringRef, const ParmVarDecl *, 8> outputDecls;
for (; ArgNo < F->arg_size(); ++ArgNo, ++ParmIdx, ++ArgIt) {
DxilParameterAnnotation ¶mAnnotation =
FuncAnnotation->GetParameterAnnotation(ArgNo);
const ParmVarDecl *parmDecl = FD->getParamDecl(ParmIdx);
QualType fieldTy = parmDecl->getType();
// Save object properties for parameters.
AddValToPropertyMap(ArgIt, fieldTy);
// if parameter type is a typedef, try to desugar it first.
if (isa<TypedefType>(fieldTy.getTypePtr()))
fieldTy = fieldTy.getDesugaredType(FD->getASTContext());
ConstructFieldAttributedAnnotation(paramAnnotation, fieldTy,
bDefaultRowMajor);
if (parmDecl->hasAttr<HLSLPreciseAttr>())
paramAnnotation.SetPrecise();
// keep Undefined here, we cannot decide for struct
InterpolationMode paramIM =
GetInterpMode(parmDecl, CompType::Kind::Invalid, KeepUndefinedTrue);
paramAnnotation.SetInterpolationMode(paramIM);
SourceLocation paramSemanticLoc = SetSemantic(parmDecl, paramAnnotation);
DxilParamInputQual dxilInputQ = DxilParamInputQual::In;
if (parmDecl->hasAttr<HLSLInOutAttr>())
dxilInputQ = DxilParamInputQual::Inout;
else if (parmDecl->hasAttr<HLSLOutAttr>())
dxilInputQ = DxilParamInputQual::Out;
if (parmDecl->hasAttr<HLSLOutAttr>() && parmDecl->hasAttr<HLSLInAttr>())
dxilInputQ = DxilParamInputQual::Inout;
if (parmDecl->hasAttr<HLSLOutAttr>() &&
parmDecl->hasAttr<HLSLIndicesAttr>()) {
if (hasOutIndices) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"multiple out indices parameters not allowed");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
const ConstantArrayType *CAT =
dyn_cast<ConstantArrayType>(fieldTy.getCanonicalType());
if (CAT == nullptr) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"indices output is not an constant-length array");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
unsigned count = CAT->getSize().getZExtValue();
if (count > DXIL::kMaxMSOutputPrimitiveCount) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"max primitive count should not exceed %0");
Diags.Report(parmDecl->getLocation(), DiagID)
<< DXIL::kMaxMSOutputPrimitiveCount;
continue;
}
if (funcProps->ShaderProps.MS.maxPrimitiveCount != 0 &&
funcProps->ShaderProps.MS.maxPrimitiveCount != count) {
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"max primitive count mismatch");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
// Get element type.
QualType arrayEleTy = CAT->getElementType();
if (hlsl::IsHLSLVecType(arrayEleTy)) {
QualType vecEltTy = hlsl::GetHLSLVecElementType(arrayEleTy);
if (!vecEltTy->isUnsignedIntegerType() ||
CGM.getContext().getTypeSize(vecEltTy) != 32) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"the element of out_indices array must be uint2 for line output "
"or uint3 for triangle output");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
unsigned vecEltCount = hlsl::GetHLSLVecSize(arrayEleTy);
if (funcProps->ShaderProps.MS.outputTopology ==
DXIL::MeshOutputTopology::Line &&
vecEltCount != 2) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"the element of out_indices array in a mesh shader whose output "
"topology is line must be uint2");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
if (funcProps->ShaderProps.MS.outputTopology ==
DXIL::MeshOutputTopology::Triangle &&
vecEltCount != 3) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"the element of out_indices array in a mesh shader whose output "
"topology is triangle must be uint3");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
} else {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"the element of out_indices array must be uint2 for line output or "
"uint3 for triangle output");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
dxilInputQ = DxilParamInputQual::OutIndices;
funcProps->ShaderProps.MS.maxPrimitiveCount = count;
hasOutIndices = true;
}
if (parmDecl->hasAttr<HLSLOutAttr>() &&
parmDecl->hasAttr<HLSLVerticesAttr>()) {
if (hasOutVertices) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"multiple out vertices parameters not allowed");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
const ConstantArrayType *CAT =
dyn_cast<ConstantArrayType>(fieldTy.getCanonicalType());
if (CAT == nullptr) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"vertices output is not an constant-length array");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
unsigned count = CAT->getSize().getZExtValue();
if (count > DXIL::kMaxMSOutputVertexCount) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "max vertex count should not exceed %0");
Diags.Report(parmDecl->getLocation(), DiagID)
<< DXIL::kMaxMSOutputVertexCount;
continue;
}
dxilInputQ = DxilParamInputQual::OutVertices;
funcProps->ShaderProps.MS.maxVertexCount = count;
hasOutVertices = true;
}
if (parmDecl->hasAttr<HLSLOutAttr>() &&
parmDecl->hasAttr<HLSLPrimitivesAttr>()) {
if (hasOutPrimitives) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"multiple out primitives parameters not allowed");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
const ConstantArrayType *CAT =
dyn_cast<ConstantArrayType>(fieldTy.getCanonicalType());
if (CAT == nullptr) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"primitives output is not an constant-length array");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
unsigned count = CAT->getSize().getZExtValue();
if (count > DXIL::kMaxMSOutputPrimitiveCount) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"max primitive count should not exceed %0");
Diags.Report(parmDecl->getLocation(), DiagID)
<< DXIL::kMaxMSOutputPrimitiveCount;
continue;
}
if (funcProps->ShaderProps.MS.maxPrimitiveCount != 0 &&
funcProps->ShaderProps.MS.maxPrimitiveCount != count) {
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"max primitive count mismatch");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
dxilInputQ = DxilParamInputQual::OutPrimitives;
funcProps->ShaderProps.MS.maxPrimitiveCount = count;
hasOutPrimitives = true;
}
if (parmDecl->hasAttr<HLSLInAttr>() &&
parmDecl->hasAttr<HLSLPayloadAttr>()) {
if (hasInPayload) {
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"multiple in payload parameters not allowed");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
dxilInputQ = DxilParamInputQual::InPayload;
DataLayout DL(&this->TheModule);
funcProps->ShaderProps.MS.payloadSizeInBytes =
DL.getTypeAllocSize(F->getFunctionType()
->getFunctionParamType(ArgNo)
->getPointerElementType());
hasInPayload = true;
}
DXIL::InputPrimitive inputPrimitive = DXIL::InputPrimitive::Undefined;
if (IsHLSLOutputPatchType(parmDecl->getType())) {
outputPatchCount++;
if (dxilInputQ != DxilParamInputQual::In) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"OutputPatch should not be out/inout parameter");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
dxilInputQ = DxilParamInputQual::OutputPatch;
if (isDS)
funcProps->ShaderProps.DS.inputControlPoints =
GetHLSLOutputPatchCount(parmDecl->getType());
} else if (IsHLSLInputPatchType(parmDecl->getType())) {
inputPatchCount++;
if (dxilInputQ != DxilParamInputQual::In) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"InputPatch should not be out/inout parameter");
Diags.Report(parmDecl->getLocation(), DiagID);
continue;
}
dxilInputQ = DxilParamInputQual::InputPatch;
if (isHS) {
funcProps->ShaderProps.HS.inputControlPoints =
GetHLSLInputPatchCount(parmDecl->getType());
} else if (isGS) {
inputPrimitive = (DXIL::InputPrimitive)(
(unsigned)DXIL::InputPrimitive::ControlPointPatch1 +
GetHLSLInputPatchCount(parmDecl->getType()) - 1);
}
} else if (IsHLSLStreamOutputType(parmDecl->getType())) {
// TODO: validation this at ASTContext::getFunctionType in
// AST/ASTContext.cpp
DXASSERT(dxilInputQ == DxilParamInputQual::Inout,
"stream output parameter must be inout");
switch (streamIndex) {
case 0:
dxilInputQ = DxilParamInputQual::OutStream0;
break;
case 1:
dxilInputQ = DxilParamInputQual::OutStream1;
break;
case 2:
dxilInputQ = DxilParamInputQual::OutStream2;
break;
case 3:
default:
// TODO: validation this at ASTContext::getFunctionType in
// AST/ASTContext.cpp
DXASSERT(streamIndex == 3, "stream number out of bound");
dxilInputQ = DxilParamInputQual::OutStream3;
break;
}
DXIL::PrimitiveTopology &streamTopology =
funcProps->ShaderProps.GS.streamPrimitiveTopologies[streamIndex];
if (IsHLSLPointStreamType(parmDecl->getType()))
streamTopology = DXIL::PrimitiveTopology::PointList;
else if (IsHLSLLineStreamType(parmDecl->getType()))
streamTopology = DXIL::PrimitiveTopology::LineStrip;
else {
DXASSERT(IsHLSLTriangleStreamType(parmDecl->getType()),
"invalid StreamType");
streamTopology = DXIL::PrimitiveTopology::TriangleStrip;
}
if (streamIndex > 0) {
bool bAllPoint =
streamTopology == DXIL::PrimitiveTopology::PointList &&
funcProps->ShaderProps.GS.streamPrimitiveTopologies[0] ==
DXIL::PrimitiveTopology::PointList;
if (!bAllPoint) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "when multiple GS output streams are "
"used they must be pointlists.");
Diags.Report(FD->getLocation(), DiagID);
}
}
streamIndex++;
}
unsigned GsInputArrayDim = 0;
if (parmDecl->hasAttr<HLSLTriangleAttr>()) {
inputPrimitive = DXIL::InputPrimitive::Triangle;
GsInputArrayDim = 3;
} else if (parmDecl->hasAttr<HLSLTriangleAdjAttr>()) {
inputPrimitive = DXIL::InputPrimitive::TriangleWithAdjacency;
GsInputArrayDim = 6;
} else if (parmDecl->hasAttr<HLSLPointAttr>()) {
inputPrimitive = DXIL::InputPrimitive::Point;
GsInputArrayDim = 1;
} else if (parmDecl->hasAttr<HLSLLineAdjAttr>()) {
inputPrimitive = DXIL::InputPrimitive::LineWithAdjacency;
GsInputArrayDim = 4;
} else if (parmDecl->hasAttr<HLSLLineAttr>()) {
inputPrimitive = DXIL::InputPrimitive::Line;
GsInputArrayDim = 2;
}
if (inputPrimitive != DXIL::InputPrimitive::Undefined) {
// Set to InputPrimitive for GS.
dxilInputQ = DxilParamInputQual::InputPrimitive;
if (funcProps->ShaderProps.GS.inputPrimitive ==
DXIL::InputPrimitive::Undefined) {
funcProps->ShaderProps.GS.inputPrimitive = inputPrimitive;
} else if (funcProps->ShaderProps.GS.inputPrimitive != inputPrimitive) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "input parameter conflicts with geometry "
"specifier of previous input parameters");
Diags.Report(parmDecl->getLocation(), DiagID);
}
}
if (GsInputArrayDim != 0) {
QualType Ty = parmDecl->getType();
if (!Ty->isConstantArrayType()) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"input types for geometry shader must be constant size arrays");
Diags.Report(parmDecl->getLocation(), DiagID);
} else {
const ConstantArrayType *CAT = cast<ConstantArrayType>(Ty);
if (CAT->getSize().getLimitedValue() != GsInputArrayDim) {
StringRef primtiveNames[] = {
"invalid", // 0
"point", // 1
"line", // 2
"triangle", // 3
"lineadj", // 4
"invalid", // 5
"triangleadj", // 6
};
DXASSERT(GsInputArrayDim < llvm::array_lengthof(primtiveNames),
"Invalid array dim");
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "array dimension for %0 must be %1");
Diags.Report(parmDecl->getLocation(), DiagID)
<< primtiveNames[GsInputArrayDim] << GsInputArrayDim;
}
}
}
// Validate Ray Tracing function parameter (some validation may be pushed
// into front end)
if (isRay) {
switch (funcProps->shaderKind) {
case DXIL::ShaderKind::RayGeneration:
case DXIL::ShaderKind::Intersection:
break;
case DXIL::ShaderKind::AnyHit:
case DXIL::ShaderKind::ClosestHit: {
DataLayout DL(&this->TheModule);
unsigned size = DL.getTypeAllocSize(F->getFunctionType()
->getFunctionParamType(ArgNo)
->getPointerElementType());
if (0 == ArgNo)
funcProps->ShaderProps.Ray.payloadSizeInBytes = size;
else
funcProps->ShaderProps.Ray.attributeSizeInBytes = size;
break;
}
case DXIL::ShaderKind::Miss: {
DataLayout DL(&this->TheModule);
unsigned size = DL.getTypeAllocSize(F->getFunctionType()
->getFunctionParamType(ArgNo)
->getPointerElementType());
funcProps->ShaderProps.Ray.payloadSizeInBytes = size;
break;
}
case DXIL::ShaderKind::Callable: {
DataLayout DL(&this->TheModule);
unsigned size = DL.getTypeAllocSize(F->getFunctionType()
->getFunctionParamType(ArgNo)
->getPointerElementType());
funcProps->ShaderProps.Ray.paramSizeInBytes = size;
break;
}
}
}
// Parse the function arguments and fill out the node i/o properties
if (isNode) {
hlsl::NodeFlags nodeFlags;
if (GetHLSLNodeIORecordType(parmDecl, nodeFlags)) {
hlsl::NodeIOProperties node(nodeFlags);
dxilInputQ = DxilParamInputQual::NodeIO;
// Add Node Record Type
AddHLSLNodeRecordTypeInfo(parmDecl, node);
if (nodeFlags.IsInputRecord()) {
// Add Node Shader parameter to a ValToProp map
// This will be used later to lower the Node parameters
// to handles
// Note: there may be a maximum of one input record
NodeInputRecordParams[ArgIt].MetadataIdx = NodeInputParamIdx++;
if (parmDecl->hasAttr<HLSLMaxRecordsAttr>()) {
node.MaxRecords =
parmDecl->getAttr<HLSLMaxRecordsAttr>()->getMaxCount();
}
if (parmDecl->hasAttr<HLSLGloballyCoherentAttr>())
node.Flags.SetGloballyCoherent();
NodeInputRecordParams[ArgIt].RecordInfo = node.GetNodeRecordInfo();
funcProps->InputNodes.push_back(node);
} else {
DXASSERT(node.Flags.IsOutputNode(), "Invalid NodeIO Kind");
// Add Node Shader parameter to a ValToProp map
// This will be used later to lower the Node parameters
// to handles
NodeOutputParams[ArgIt].MetadataIdx = NodeOutputParamIdx++;
if (parmDecl->hasAttr<HLSLAllowSparseNodesAttr>())
node.AllowSparseNodes = true;
// OutputArraySize from NodeArraySize attribute
if (parmDecl->hasAttr<HLSLNodeArraySizeAttr>()) {
node.OutputArraySize =
parmDecl->getAttr<HLSLNodeArraySizeAttr>()->getCount();
} else {
node.OutputArraySize = 0;
}
if (parmDecl->hasAttr<HLSLUnboundedSparseNodesAttr>()) {
node.AllowSparseNodes = true;
node.OutputArraySize = UINT_MAX;
}
// OutputID from attribute
if (const auto *Attr = parmDecl->getAttr<HLSLNodeIdAttr>()) {
node.OutputID.Name = Attr->getName().str();
node.OutputID.Index = Attr->getArrayIndex();
} else {
node.OutputID.Name = parmDecl->getName().str();
node.OutputID.Index = 0;
}
// Insert output decls for cross referencing once all info is
// available
outputDecls.insert(std::make_pair(parmDecl->getName(), parmDecl));
NodeOutputParams[ArgIt].Info = node.GetNodeInfo();
funcProps->OutputNodes.push_back(node);
}
}
}
paramAnnotation.SetParamInputQual(dxilInputQ);
if (isEntry) {
if (CGM.getLangOpts().EnableDX9CompatMode &&
paramAnnotation.HasSemanticString()) {
RemapObsoleteSemantic(paramAnnotation,
/*isPatchConstantFunction*/ false);
}
CheckParameterAnnotation(paramSemanticLoc, paramAnnotation,
/*isPatchConstantFunction*/ false);
}
}
// All output decls and param names are available and errors can be generated
// and parameter output array indices that correspond to param names can be
// added to the properties
auto outIt = outputDecls.begin();
for (unsigned outputNo = 0; outputNo < funcProps->OutputNodes.size();
outputNo++) {
const ParmVarDecl *parmDecl = outIt->second;
outIt++;
hlsl::NodeIOProperties &node = funcProps->OutputNodes[outputNo];
if (const auto *Attr = parmDecl->getAttr<HLSLMaxRecordsSharedWithAttr>()) {
// Find matching argument name if present
StringRef sharedName = Attr->getName()->getName();
auto snIt = outputDecls.find(sharedName);
int ix = snIt - outputDecls.begin();
if (snIt == outputDecls.end()) {
Diags.Report(
parmDecl->getLocation(),
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"MaxRecordsSharedWith must reference a valid "
"ouput parameter name."));
} else if (ix == (int)outputNo) {
Diags.Report(
parmDecl->getLocation(),
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"MaxRecordsSharedWith must not reference the "
"same parameter it is applied to."));
}
node.MaxRecordsSharedWith = ix;
}
if (const auto *Attr = parmDecl->getAttr<HLSLMaxRecordsAttr>())
node.MaxRecords = Attr->getMaxCount();
}
if (inputPatchCount > 1) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "may only have one InputPatch parameter");
Diags.Report(FD->getLocation(), DiagID);
}
if (outputPatchCount > 1) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "may only have one OutputPatch parameter");
Diags.Report(FD->getLocation(), DiagID);
}
// If Shader is a ray shader that requires parameters, make sure size is
// non-zero
if (isRay) {
bool bNeedsAttributes = false;
bool bNeedsPayload = false;
switch (funcProps->shaderKind) {
case DXIL::ShaderKind::AnyHit:
case DXIL::ShaderKind::ClosestHit:
bNeedsAttributes = true;
LLVM_FALLTHROUGH;
case DXIL::ShaderKind::Miss:
bNeedsPayload = true;
LLVM_FALLTHROUGH;
case DXIL::ShaderKind::Callable:
if (0 == funcProps->ShaderProps.Ray.payloadSizeInBytes) {
unsigned DiagID =
bNeedsPayload
? Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"shader must include inout payload structure parameter.")
: Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"shader must include inout parameter structure.");
Diags.Report(FD->getLocation(), DiagID);
rayShaderHaveErrors = true;
}
}
if (bNeedsAttributes &&
0 == funcProps->ShaderProps.Ray.attributeSizeInBytes) {
Diags.Report(FD->getLocation(),
Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"shader must include attributes structure parameter."));
rayShaderHaveErrors = true;
}
}
// If we encountered an error during verification of RayTracing
// shader signatures, stop here. Otherwise we risk to trigger
// unhandled behaviour, i.e., DXC crashes when the payload is
// declared as matrix<float...> type.
if (rayShaderHaveErrors)
return;
// Type annotation for parameters and return type.
{
DxilTypeSystem &dxilTypeSys = m_pHLModule->GetTypeSystem();
unsigned arrayEltSize = 0;
AddTypeAnnotation(FD->getReturnType(), dxilTypeSys, arrayEltSize);
// Type annotation for this pointer.
if (const CXXMethodDecl *MFD = dyn_cast<CXXMethodDecl>(FD)) {
const CXXRecordDecl *RD = MFD->getParent();
QualType Ty = CGM.getContext().getTypeDeclType(RD);
AddTypeAnnotation(Ty, dxilTypeSys, arrayEltSize);
}
for (const ValueDecl *param : FD->params()) {
QualType Ty = param->getType();
AddTypeAnnotation(Ty, dxilTypeSys, arrayEltSize);
}
dxilTypeSys.FinishFunctionAnnotation(*FuncAnnotation);
}
// clear isExportedEntry if not exporting entry
bool isExportedEntry = SM->IsLib() && profileAttributes != 0;
if (isExportedEntry) {
// use unmangled or mangled name depending on which is used for final entry
// function
StringRef name = isRay ? F->getName() : FD->getName();
if (!m_ExportMap.IsExported(name)) {
isExportedEntry = false;
}
}
// Only parse root signature for entry function.
if (HLSLRootSignatureAttr *RSA = FD->getAttr<HLSLRootSignatureAttr>()) {
if (isExportedEntry || isEntry)
EmitHLSLRootSignature(RSA, F, *funcProps);
}
// Only add functionProps when exist.
if (isExportedEntry || isEntry)
m_pHLModule->AddDxilFunctionProps(F, funcProps);
if (isPatchConstantFunction)
patchConstantFunctionPropsMap[F] = std::move(funcProps);
// Save F to entry map.
if (isExportedEntry) {
if (entryFunctionMap.count(FD->getName())) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error, "redefinition of %0");
Diags.Report(FD->getLocStart(), DiagID) << FD->getName();
}
auto &Entry = entryFunctionMap[FD->getNameAsString()];
Entry.SL = FD->getLocation();
Entry.Func = F;
}
// Add target-dependent experimental function attributes
for (const HLSLExperimentalAttr *Attr :
FD->specific_attrs<HLSLExperimentalAttr>()) {
F->addFnAttr(Twine("exp-", Attr->getName()).str(), Attr->getValue());
}
m_ScopeMap[F] = ScopeInfo(F, FD->getLocation());
}
void CGMSHLSLRuntime::AddHLSLNodeRecordTypeInfo(
const clang::ParmVarDecl *parmDecl, hlsl::NodeIOProperties &node) {
clang::QualType paramTy = parmDecl->getType().getCanonicalType();
if (auto arrayType = dyn_cast<ConstantArrayType>(paramTy)) {
paramTy = arrayType->getElementType();
}
if (const RecordType *RT = dyn_cast<RecordType>(paramTy)) {
// Node I/O records are templateTypes
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
auto &TemplateArgs = templateDecl->getTemplateArgs();
if (!node.Flags.IsEmpty()) {
DiagnosticsEngine &Diags = CGM.getDiags();
auto &Rec = TemplateArgs.get(0);
clang::QualType RecType = Rec.getAsType();
llvm::Type *Type = CGM.getTypes().ConvertType(RecType);
CXXRecordDecl *RD = RecType->getAsCXXRecordDecl();
// Get the TrackRWInputSharing flag from the record attribute
if (RD->hasAttr<HLSLNodeTrackRWInputSharingAttr>()) {
if (node.Flags.IsInputRecord() &&
node.Flags.GetNodeIOKind() !=
hlsl::DXIL::NodeIOKind::RWDispatchNodeInputRecord) {
Diags.Report(
parmDecl->getLocation(),
Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"NodeTrackRWInputSharing attribute cannot be applied to "
"Input Records that are not RWDispatchNodeInputRecord"));
}
node.Flags.SetTrackRWInputSharing();
}
// Ex: For DispatchNodeInputRecord<MY_RECORD>, set size =
// size(MY_RECORD), alignment = alignof(MY_RECORD)
node.RecordType.size = CGM.getDataLayout().getTypeAllocSize(Type);
node.RecordType.alignment =
CGM.getDataLayout().getABITypeAlignment(Type);
// Iterate over fields of the MY_RECORD(example) struct
for (auto fieldDecl : RD->fields()) {
// Check if any of the fields have a semantic annotation =
// SV_DispatchGrid
for (const hlsl::UnusualAnnotation *it :
fieldDecl->getUnusualAnnotations()) {
if (it->getKind() == hlsl::UnusualAnnotation::UA_SemanticDecl) {
const hlsl::SemanticDecl *sd = cast<hlsl::SemanticDecl>(it);
// if we find a field with SV_DispatchGrid, fill out the
// SV_DispatchGrid member with byteoffset of the field,
// NumComponents (3 for uint3 etc) and U32 vs U16 types, which are
// the only types allowed
if (sd->SemanticName.equals("SV_DispatchGrid")) {
clang::QualType FT = fieldDecl->getType();
auto &DL = CGM.getDataLayout();
auto &SDGRec = node.RecordType.SV_DispatchGrid;
DXASSERT_NOMSG(SDGRec.NumComponents == 0);
unsigned fieldIdx = fieldDecl->getFieldIndex();
if (StructType *ST = dyn_cast<StructType>(Type)) {
SDGRec.ByteOffset =
DL.getStructLayout(ST)->getElementOffset(fieldIdx);
}
const llvm::Type *lTy = CGM.getTypes().ConvertType(FT);
if (const llvm::VectorType *VT =
dyn_cast<llvm::VectorType>(lTy)) {
DXASSERT(VT->getElementType()->isIntegerTy(), "invalid type");
SDGRec.NumComponents = VT->getNumElements();
SDGRec.ComponentType =
(VT->getElementType()->getIntegerBitWidth() == 16)
? DXIL::ComponentType::U16
: DXIL::ComponentType::U32;
} else if (const llvm::ArrayType *AT =
dyn_cast<llvm::ArrayType>(lTy)) {
DXASSERT(AT->getElementType()->isIntegerTy(), "invalid type");
DXASSERT_NOMSG(AT->getNumElements() <= 3);
SDGRec.NumComponents = AT->getNumElements();
SDGRec.ComponentType =
(AT->getElementType()->getIntegerBitWidth() == 16)
? DXIL::ComponentType::U16
: DXIL::ComponentType::U32;
} else {
// Scalar U16 or U32
DXASSERT(lTy->isIntegerTy(), "invalid type");
SDGRec.NumComponents = 1;
SDGRec.ComponentType = (lTy->getIntegerBitWidth() == 16)
? DXIL::ComponentType::U16
: DXIL::ComponentType::U32;
}
}
}
}
}
}
}
}
}
void CGMSHLSLRuntime::RemapObsoleteSemantic(DxilParameterAnnotation ¶mInfo,
bool isPatchConstantFunction) {
DXASSERT(CGM.getLangOpts().EnableDX9CompatMode,
"should be used only in back-compat mode");
const ShaderModel *SM = m_pHLModule->GetShaderModel();
DXIL::SigPointKind sigPointKind = SigPointFromInputQual(
paramInfo.GetParamInputQual(), SM->GetKind(), isPatchConstantFunction);
hlsl::RemapObsoleteSemantic(paramInfo, sigPointKind, CGM.getLLVMContext());
}
void CGMSHLSLRuntime::EmitHLSLFunctionProlog(Function *F,
const FunctionDecl *FD) {
// Support clip plane need debug info which not available when create function
// attribute.
if (const HLSLClipPlanesAttr *Attr = FD->getAttr<HLSLClipPlanesAttr>()) {
DxilFunctionProps &funcProps = m_pHLModule->GetDxilFunctionProps(F);
// Initialize to null.
memset(funcProps.ShaderProps.VS.clipPlanes, 0,
sizeof(funcProps.ShaderProps.VS.clipPlanes));
// Create global for each clip plane, and use the clip plane val as init
// val.
auto AddClipPlane = [&](Expr *clipPlane, unsigned idx) {
if (DeclRefExpr *decl = dyn_cast<DeclRefExpr>(clipPlane)) {
const VarDecl *VD = cast<VarDecl>(decl->getDecl());
Constant *clipPlaneVal = CGM.GetAddrOfGlobalVar(VD);
funcProps.ShaderProps.VS.clipPlanes[idx] = clipPlaneVal;
if (m_bDebugInfo) {
CodeGenFunction CGF(CGM);
ApplyDebugLocation applyDebugLoc(CGF, clipPlane);
debugInfoMap[clipPlaneVal] = CGF.Builder.getCurrentDebugLocation();
}
} else {
// Must be a MemberExpr.
const MemberExpr *ME = cast<MemberExpr>(clipPlane);
CodeGenFunction CGF(CGM);
CodeGen::LValue LV = CGF.EmitMemberExpr(ME);
Value *addr = LV.getAddress();
funcProps.ShaderProps.VS.clipPlanes[idx] = cast<Constant>(addr);
if (m_bDebugInfo) {
CodeGenFunction CGF(CGM);
ApplyDebugLocation applyDebugLoc(CGF, clipPlane);
debugInfoMap[addr] = CGF.Builder.getCurrentDebugLocation();
}
}
};
if (Expr *clipPlane = Attr->getClipPlane1())
AddClipPlane(clipPlane, 0);
if (Expr *clipPlane = Attr->getClipPlane2())
AddClipPlane(clipPlane, 1);
if (Expr *clipPlane = Attr->getClipPlane3())
AddClipPlane(clipPlane, 2);
if (Expr *clipPlane = Attr->getClipPlane4())
AddClipPlane(clipPlane, 3);
if (Expr *clipPlane = Attr->getClipPlane5())
AddClipPlane(clipPlane, 4);
if (Expr *clipPlane = Attr->getClipPlane6())
AddClipPlane(clipPlane, 5);
clipPlaneFuncList.emplace_back(F);
}
// Update function linkage based on DefaultLinkage
// We will take care of patch constant functions later, once identified for
// certain.
if (!m_pHLModule->HasDxilFunctionProps(F)) {
if (F->getLinkage() == GlobalValue::LinkageTypes::ExternalLinkage) {
if (!FD->hasAttr<HLSLExportAttr>()) {
switch (CGM.getCodeGenOpts().DefaultLinkage) {
case DXIL::DefaultLinkage::Default:
if (m_pHLModule->GetShaderModel()->GetMinor() !=
ShaderModel::kOfflineMinor)
F->setLinkage(GlobalValue::LinkageTypes::InternalLinkage);
break;
case DXIL::DefaultLinkage::Internal:
F->setLinkage(GlobalValue::LinkageTypes::InternalLinkage);
break;
}
}
}
}
}
void CGMSHLSLRuntime::AddControlFlowHint(CodeGenFunction &CGF, const Stmt &S,
llvm::TerminatorInst *TI,
ArrayRef<const Attr *> Attrs) {
// Build hints.
bool bNoBranchFlatten = true;
bool bBranch = false;
bool bFlatten = false;
std::vector<DXIL::ControlFlowHint> hints;
for (const auto *Attr : Attrs) {
if (isa<HLSLBranchAttr>(Attr)) {
hints.emplace_back(DXIL::ControlFlowHint::Branch);
bNoBranchFlatten = false;
bBranch = true;
} else if (isa<HLSLFlattenAttr>(Attr)) {
hints.emplace_back(DXIL::ControlFlowHint::Flatten);
bNoBranchFlatten = false;
bFlatten = true;
} else if (isa<HLSLForceCaseAttr>(Attr)) {
if (isa<SwitchStmt>(&S)) {
hints.emplace_back(DXIL::ControlFlowHint::ForceCase);
}
}
// Ignore fastopt, allow_uav_condition and call for now.
}
if (bNoBranchFlatten) {
// CHECK control flow option.
if (CGF.CGM.getCodeGenOpts().HLSLPreferControlFlow)
hints.emplace_back(DXIL::ControlFlowHint::Branch);
else if (CGF.CGM.getCodeGenOpts().HLSLAvoidControlFlow)
hints.emplace_back(DXIL::ControlFlowHint::Flatten);
}
if (bFlatten && bBranch) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"can't use branch and flatten attributes together");
Diags.Report(S.getLocStart(), DiagID);
}
if (hints.size()) {
// Add meta data to the instruction.
MDNode *hintsNode = DxilMDHelper::EmitControlFlowHints(Context, hints);
TI->setMetadata(DxilMDHelper::kDxilControlFlowHintMDName, hintsNode);
}
}
void CGMSHLSLRuntime::MarkPotentialResourceTemp(CodeGenFunction &CGF,
llvm::Value *V,
clang::QualType QualTy) {
// Save object properties for temp that may be created for
// call args, return value, or agg expr copy.
if (objectProperties.GetResource(V).isValid())
return;
AddValToPropertyMap(V, QualTy);
}
static bool isGLCMismatch(QualType Ty0, QualType Ty1, const Expr *SrcExp,
clang::SourceLocation Loc, DiagnosticsEngine &Diags) {
if (HasHLSLGloballyCoherent(Ty0) == HasHLSLGloballyCoherent(Ty1))
return false;
if (const CastExpr *Cast = dyn_cast<CastExpr>(SrcExp)) {
// Skip flat conversion which is for createHandleFromHeap.
if (Cast->getCastKind() == CastKind::CK_FlatConversion)
return false;
}
return true;
}
void CGMSHLSLRuntime::FinishAutoVar(CodeGenFunction &CGF, const VarDecl &D,
llvm::Value *V) {
if (D.hasAttr<HLSLPreciseAttr>()) {
AllocaInst *AI = cast<AllocaInst>(V);
HLModule::MarkPreciseAttributeWithMetadata(AI);
}
// Add type annotation for local variable.
DxilTypeSystem &typeSys = m_pHLModule->GetTypeSystem();
unsigned arrayEltSize = 0;
AddTypeAnnotation(D.getType(), typeSys, arrayEltSize);
// Save object properties for local variables.
AddValToPropertyMap(V, D.getType());
if (D.hasInit()) {
if (isGLCMismatch(D.getType(), D.getInit()->getType(), D.getInit(),
D.getLocation(), CGM.getDiags())) {
objectProperties.updateGLC(V);
}
}
}
const clang::Expr *CGMSHLSLRuntime::CheckReturnStmtGLCMismatch(
CodeGenFunction &CGF, const Expr *RV, const clang::ReturnStmt &S,
clang::QualType FnRetTy,
const std::function<void(const VarDecl *, llvm::Value *)> &TmpArgMap) {
if (!isGLCMismatch(RV->getType(), FnRetTy, RV, S.getReturnLoc(),
CGM.getDiags())) {
return RV;
}
const FunctionDecl *FD = cast<FunctionDecl>(CGF.CurFuncDecl);
// create temp Var
VarDecl *tmpArg =
VarDecl::Create(CGF.getContext(), const_cast<FunctionDecl *>(FD),
SourceLocation(), SourceLocation(),
/*IdentifierInfo*/ nullptr, FnRetTy,
CGF.getContext().getTrivialTypeSourceInfo(FnRetTy),
StorageClass::SC_Auto);
// Aggregate type will be indirect param convert to pointer type.
// So don't update to ReferenceType, use RValue for it.
const DeclRefExpr *tmpRef = DeclRefExpr::Create(
CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), tmpArg,
/*enclosing*/ false, tmpArg->getLocation(), FnRetTy, VK_RValue);
// create alloc for the tmp arg
Value *tmpArgAddr = nullptr;
BasicBlock *InsertBlock = CGF.Builder.GetInsertBlock();
Function *F = InsertBlock->getParent();
// Make sure the alloca is in entry block to stop inline create stacksave.
IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(F));
tmpArgAddr = AllocaBuilder.CreateAlloca(CGF.ConvertTypeForMem(FnRetTy));
// add it to local decl map
TmpArgMap(tmpArg, tmpArgAddr);
LValue argLV = CGF.EmitLValue(RV);
Value *argAddr = argLV.getAddress();
// Annotate return value when mismatch with function return type.
DxilResourceProperties RP = BuildResourceProperty(RV->getType());
CopyAndAnnotateResourceArgument(argAddr, tmpArgAddr, RP, *m_pHLModule, CGF);
return tmpRef;
}
hlsl::InterpolationMode CGMSHLSLRuntime::GetInterpMode(const Decl *decl,
CompType compType,
bool bKeepUndefined) {
InterpolationMode Interp(
decl->hasAttr<HLSLNoInterpolationAttr>(), decl->hasAttr<HLSLLinearAttr>(),
decl->hasAttr<HLSLNoPerspectiveAttr>(), decl->hasAttr<HLSLCentroidAttr>(),
decl->hasAttr<HLSLSampleAttr>());
DXASSERT(Interp.IsValid(), "otherwise front-end missing validation");
if (Interp.IsUndefined() && !bKeepUndefined) {
// Type-based default: linear for floats, constant for others.
if (compType.IsFloatTy())
Interp = InterpolationMode::Kind::Linear;
else
Interp = InterpolationMode::Kind::Constant;
}
return Interp;
}
/// Add resource to the program
void CGMSHLSLRuntime::addResource(Decl *D) {
if (HLSLBufferDecl *BD = dyn_cast<HLSLBufferDecl>(D))
GetOrCreateCBuffer(BD);
else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
hlsl::DxilResourceBase::Class resClass = TypeToClass(VD->getType());
// Save resource properties for global variables.
if (resClass != DXIL::ResourceClass::Invalid) {
GlobalVariable *GV = cast<GlobalVariable>(CGM.GetAddrOfGlobalVar(VD));
AddValToPropertyMap(GV, VD->getType());
}
// skip decl has init which is resource.
if (VD->hasInit() && resClass != DXIL::ResourceClass::Invalid) {
if (resClass == DXIL::ResourceClass::UAV) {
if (isGLCMismatch(VD->getType(), VD->getInit()->getType(),
VD->getInit(), D->getLocation(), CGM.getDiags())) {
GlobalVariable *GV = cast<GlobalVariable>(CGM.GetAddrOfGlobalVar(VD));
objectProperties.updateGLC(GV);
}
}
return;
}
// skip static global.
if (!VD->hasExternalFormalLinkage()) {
if (VD->hasInit() && VD->getType().isConstQualified()) {
Expr *InitExp = VD->getInit();
GlobalVariable *GV = cast<GlobalVariable>(CGM.GetAddrOfGlobalVar(VD));
// Only save const static global of struct type.
if (GV->getType()->getElementType()->isStructTy()) {
staticConstGlobalInitMap[InitExp] = GV;
}
}
// Add type annotation for static global variable.
DxilTypeSystem &typeSys = m_pHLModule->GetTypeSystem();
unsigned arrayEltSize = 0;
AddTypeAnnotation(VD->getType(), typeSys, arrayEltSize);
return;
}
if (D->hasAttr<HLSLGroupSharedAttr>()) {
GlobalVariable *GV = cast<GlobalVariable>(CGM.GetAddrOfGlobalVar(VD));
DxilTypeSystem &dxilTypeSys = m_pHLModule->GetTypeSystem();
unsigned arraySize = 0;
AddTypeAnnotation(VD->getType(), dxilTypeSys, arraySize);
m_pHLModule->AddGroupSharedVariable(GV);
return;
}
switch (resClass) {
case hlsl::DxilResourceBase::Class::Sampler:
AddSampler(VD);
break;
case hlsl::DxilResourceBase::Class::UAV:
case hlsl::DxilResourceBase::Class::SRV:
AddUAVSRV(VD, resClass);
break;
case hlsl::DxilResourceBase::Class::Invalid: {
// normal global constant, add to global CB
HLCBuffer &globalCB = GetGlobalCBuffer();
AddConstant(VD, globalCB);
break;
}
case DXIL::ResourceClass::CBuffer:
AddConstantBufferView(VD);
break;
}
}
}
/// Add subobject to the module
void CGMSHLSLRuntime::addSubobject(Decl *D) {
VarDecl *VD = dyn_cast<VarDecl>(D);
DXASSERT(VD != nullptr, "must be a global variable");
DXIL::SubobjectKind subobjKind;
DXIL::HitGroupType hgType;
if (!hlsl::GetHLSLSubobjectKind(VD->getType(), subobjKind, hgType)) {
DXASSERT(false, "not a valid subobject declaration");
return;
}
Expr *initExpr = const_cast<Expr *>(VD->getAnyInitializer());
if (!initExpr) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "subobject needs to be initialized");
Diags.Report(D->getLocStart(), DiagID);
return;
}
if (InitListExpr *initListExpr = dyn_cast<InitListExpr>(initExpr)) {
try {
CreateSubobject(subobjKind, VD->getName(), initListExpr->getInits(),
initListExpr->getNumInits(), hgType);
} catch (hlsl::Exception &) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "internal error creating subobject");
Diags.Report(initExpr->getLocStart(), DiagID);
return;
}
} else {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"expected initialization list");
Diags.Report(initExpr->getLocStart(), DiagID);
return;
}
}
// TODO: collect such helper utility functions in one place.
static DxilResourceBase::Class KeywordToClass(const std::string &keyword) {
// TODO: refactor for faster search (switch by 1/2/3 first letters, then
// compare)
if (keyword == "SamplerState")
return DxilResourceBase::Class::Sampler;
if (keyword == "SamplerComparisonState")
return DxilResourceBase::Class::Sampler;
if (keyword == "ConstantBuffer")
return DxilResourceBase::Class::CBuffer;
if (keyword == "TextureBuffer")
return DxilResourceBase::Class::CBuffer;
bool isSRV = keyword == "Buffer";
isSRV |= keyword == "ByteAddressBuffer";
isSRV |= keyword == "RaytracingAccelerationStructure";
isSRV |= keyword == "StructuredBuffer";
isSRV |= keyword == "Texture1D";
isSRV |= keyword == "Texture1DArray";
isSRV |= keyword == "Texture2D";
isSRV |= keyword == "Texture2DArray";
isSRV |= keyword == "Texture3D";
isSRV |= keyword == "TextureCube";
isSRV |= keyword == "TextureCubeArray";
isSRV |= keyword == "Texture2DMS";
isSRV |= keyword == "Texture2DMSArray";
if (isSRV)
return DxilResourceBase::Class::SRV;
bool isUAV = keyword == "RWBuffer";
isUAV |= keyword == "RWByteAddressBuffer";
isUAV |= keyword == "RWStructuredBuffer";
isUAV |= keyword == "RWTexture1D";
isUAV |= keyword == "RWTexture1DArray";
isUAV |= keyword == "RWTexture2D";
isUAV |= keyword == "RWTexture2DArray";
isUAV |= keyword == "RWTexture3D";
isUAV |= keyword == "RWTextureCube";
isUAV |= keyword == "RWTextureCubeArray";
isUAV |= keyword == "RWTexture2DMS";
isUAV |= keyword == "RWTexture2DMSArray";
isUAV |= keyword == "AppendStructuredBuffer";
isUAV |= keyword == "ConsumeStructuredBuffer";
isUAV |= keyword == "RasterizerOrderedBuffer";
isUAV |= keyword == "RasterizerOrderedByteAddressBuffer";
isUAV |= keyword == "RasterizerOrderedStructuredBuffer";
isUAV |= keyword == "RasterizerOrderedTexture1D";
isUAV |= keyword == "RasterizerOrderedTexture1DArray";
isUAV |= keyword == "RasterizerOrderedTexture2D";
isUAV |= keyword == "RasterizerOrderedTexture2DArray";
isUAV |= keyword == "RasterizerOrderedTexture3D";
isUAV |= keyword == "FeedbackTexture2D";
isUAV |= keyword == "FeedbackTexture2DArray";
if (isUAV)
return DxilResourceBase::Class::UAV;
return DxilResourceBase::Class::Invalid;
}
// This should probably be refactored to ASTContextHLSL, and follow types
// rather than do string comparisons.
DXIL::ResourceClass
hlsl::GetResourceClassForType(const clang::ASTContext &context,
clang::QualType Ty) {
Ty = Ty.getCanonicalType();
if (const clang::ArrayType *arrayType = context.getAsArrayType(Ty)) {
return GetResourceClassForType(context, arrayType->getElementType());
} else if (const RecordType *RT = Ty->getAsStructureType()) {
return KeywordToClass(RT->getDecl()->getName());
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
return KeywordToClass(templateDecl->getName());
}
}
return hlsl::DxilResourceBase::Class::Invalid;
}
hlsl::DxilResourceBase::Class CGMSHLSLRuntime::TypeToClass(clang::QualType Ty) {
return hlsl::GetResourceClassForType(CGM.getContext(), Ty);
}
namespace {
void GetResourceDeclElemTypeAndRangeSize(CodeGenModule &CGM, HLModule &HL,
VarDecl &VD, QualType &ElemType,
unsigned &rangeSize) {
// We can't canonicalize nor desugar the type without losing the 'snorm' in
// Buffer<snorm float>
ElemType = VD.getType();
rangeSize = 1;
while (const clang::ArrayType *arrayType =
CGM.getContext().getAsArrayType(ElemType)) {
if (rangeSize != UINT_MAX) {
if (arrayType->isConstantArrayType()) {
rangeSize *=
cast<ConstantArrayType>(arrayType)->getSize().getLimitedValue();
} else {
if (HL.GetHLOptions().bLegacyResourceReservation) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "unbounded resources are not supported "
"with -flegacy-resource-reservation");
Diags.Report(VD.getLocation(), DiagID);
}
rangeSize = UINT_MAX;
}
}
ElemType = arrayType->getElementType();
}
}
} // namespace
static void InitFromUnusualAnnotations(DxilResourceBase &Resource,
NamedDecl &Decl) {
for (hlsl::UnusualAnnotation *It : Decl.getUnusualAnnotations()) {
switch (It->getKind()) {
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
hlsl::RegisterAssignment *RegAssign = cast<hlsl::RegisterAssignment>(It);
if (RegAssign->RegisterType) {
Resource.SetLowerBound(RegAssign->RegisterNumber);
// For backcompat, don't auto-assign the register space if there's an
// explicit register type.
Resource.SetSpaceID(RegAssign->RegisterSpace.getValueOr(0));
} else {
Resource.SetSpaceID(RegAssign->RegisterSpace.getValueOr(UINT_MAX));
}
break;
}
case hlsl::UnusualAnnotation::UA_SemanticDecl:
// Ignore Semantics
break;
case hlsl::UnusualAnnotation::UA_ConstantPacking:
// Should be handled by front-end
llvm_unreachable("packoffset on resource");
break;
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier:
// Should be handled by front-end
llvm_unreachable("payload qualifier on resource");
break;
default:
llvm_unreachable("unknown UnusualAnnotation on resource");
break;
}
}
}
uint32_t CGMSHLSLRuntime::AddSampler(VarDecl *samplerDecl) {
llvm::GlobalVariable *val =
cast<llvm::GlobalVariable>(CGM.GetAddrOfGlobalVar(samplerDecl));
unique_ptr<DxilSampler> hlslRes(new DxilSampler);
hlslRes->SetLowerBound(UINT_MAX);
hlslRes->SetSpaceID(UINT_MAX);
hlslRes->SetGlobalSymbol(val);
hlslRes->SetGlobalName(samplerDecl->getName());
QualType VarTy;
unsigned rangeSize;
GetResourceDeclElemTypeAndRangeSize(CGM, *m_pHLModule, *samplerDecl, VarTy,
rangeSize);
hlslRes->SetRangeSize(rangeSize);
const RecordType *RT = VarTy->getAs<RecordType>();
DxilSampler::SamplerKind kind = StringToSamplerKind(RT->getDecl()->getName());
hlslRes->SetSamplerKind(kind);
InitFromUnusualAnnotations(*hlslRes, *samplerDecl);
hlslRes->SetID(m_pHLModule->GetSamplers().size());
return m_pHLModule->AddSampler(std::move(hlslRes));
}
bool CGMSHLSLRuntime::GetAsConstantUInt32(clang::Expr *expr, uint32_t *value) {
APSInt result;
if (!expr->EvaluateAsInt(result, CGM.getContext())) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "cannot convert to constant unsigned int");
Diags.Report(expr->getLocStart(), DiagID);
return false;
}
*value = result.getLimitedValue(UINT32_MAX);
return true;
}
bool CGMSHLSLRuntime::GetAsConstantString(clang::Expr *expr, StringRef *value,
bool failWhenEmpty /*=false*/) {
Expr::EvalResult result;
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = 0;
if (expr->EvaluateAsRValue(result, CGM.getContext())) {
if (result.Val.isLValue()) {
DXASSERT_NOMSG(result.Val.getLValueOffset().isZero());
DXASSERT_NOMSG(result.Val.getLValueCallIndex() == 0);
const Expr *evExpr = result.Val.getLValueBase().get<const Expr *>();
if (const StringLiteral *strLit = dyn_cast<const StringLiteral>(evExpr)) {
*value = strLit->getBytes();
if (!failWhenEmpty || !(*value).empty()) {
return true;
}
DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"empty string not expected here");
}
}
}
if (!DiagID)
DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot convert to constant string");
Diags.Report(expr->getLocStart(), DiagID);
return false;
}
std::vector<StringRef>
CGMSHLSLRuntime::ParseSubobjectExportsAssociations(StringRef exports) {
std::vector<StringRef> parsedExports;
const char *pData = exports.data();
const char *pEnd = pData + exports.size();
const char *pLast = pData;
while (pData < pEnd) {
if (*pData == ';') {
if (pLast < pData) {
parsedExports.emplace_back(StringRef(pLast, pData - pLast));
}
pLast = pData + 1;
}
pData++;
}
if (pLast < pData) {
parsedExports.emplace_back(StringRef(pLast, pData - pLast));
}
return parsedExports;
}
void CGMSHLSLRuntime::CreateSubobject(
DXIL::SubobjectKind kind, const StringRef name, clang::Expr **args,
unsigned int argCount,
DXIL::HitGroupType hgType /*= (DXIL::HitGroupType)(-1)*/) {
DxilSubobjects *subobjects = m_pHLModule->GetSubobjects();
if (!subobjects) {
subobjects = new DxilSubobjects();
m_pHLModule->ResetSubobjects(subobjects);
}
DxilRootSignatureCompilationFlags flags =
DxilRootSignatureCompilationFlags::GlobalRootSignature;
switch (kind) {
case DXIL::SubobjectKind::StateObjectConfig: {
uint32_t flags;
DXASSERT_NOMSG(argCount == 1);
if (GetAsConstantUInt32(args[0], &flags)) {
subobjects->CreateStateObjectConfig(name, flags);
}
break;
}
case DXIL::SubobjectKind::LocalRootSignature:
flags = DxilRootSignatureCompilationFlags::LocalRootSignature;
LLVM_FALLTHROUGH;
case DXIL::SubobjectKind::GlobalRootSignature: {
DXASSERT_NOMSG(argCount == 1);
StringRef signature;
if (!GetAsConstantString(args[0], &signature, true))
return;
RootSignatureHandle RootSigHandle;
CompileRootSignature(signature, CGM.getDiags(), args[0]->getLocStart(),
rootSigVer, flags, &RootSigHandle);
if (!RootSigHandle.IsEmpty()) {
RootSigHandle.EnsureSerializedAvailable();
subobjects->CreateRootSignature(
name, kind == DXIL::SubobjectKind::LocalRootSignature,
RootSigHandle.GetSerializedBytes(), RootSigHandle.GetSerializedSize(),
&signature);
}
break;
}
case DXIL::SubobjectKind::SubobjectToExportsAssociation: {
DXASSERT_NOMSG(argCount == 2);
StringRef subObjName, exports;
if (!GetAsConstantString(args[0], &subObjName, true) ||
!GetAsConstantString(args[1], &exports, false))
return;
std::vector<StringRef> exportList =
ParseSubobjectExportsAssociations(exports);
subobjects->CreateSubobjectToExportsAssociation(
name, subObjName, exportList.data(), exportList.size());
break;
}
case DXIL::SubobjectKind::RaytracingShaderConfig: {
DXASSERT_NOMSG(argCount == 2);
uint32_t maxPayloadSize;
uint32_t MaxAttributeSize;
if (!GetAsConstantUInt32(args[0], &maxPayloadSize) ||
!GetAsConstantUInt32(args[1], &MaxAttributeSize))
return;
subobjects->CreateRaytracingShaderConfig(name, maxPayloadSize,
MaxAttributeSize);
break;
}
case DXIL::SubobjectKind::RaytracingPipelineConfig: {
DXASSERT_NOMSG(argCount == 1);
uint32_t maxTraceRecursionDepth;
if (!GetAsConstantUInt32(args[0], &maxTraceRecursionDepth))
return;
subobjects->CreateRaytracingPipelineConfig(name, maxTraceRecursionDepth);
break;
}
case DXIL::SubobjectKind::HitGroup: {
switch (hgType) {
case DXIL::HitGroupType::Triangle: {
DXASSERT_NOMSG(argCount == 2);
StringRef anyhit, closesthit;
if (!GetAsConstantString(args[0], &anyhit) ||
!GetAsConstantString(args[1], &closesthit))
return;
subobjects->CreateHitGroup(name, DXIL::HitGroupType::Triangle, anyhit,
closesthit, llvm::StringRef(""));
break;
}
case DXIL::HitGroupType::ProceduralPrimitive: {
DXASSERT_NOMSG(argCount == 3);
StringRef anyhit, closesthit, intersection;
if (!GetAsConstantString(args[0], &anyhit) ||
!GetAsConstantString(args[1], &closesthit) ||
!GetAsConstantString(args[2], &intersection, true))
return;
subobjects->CreateHitGroup(name, DXIL::HitGroupType::ProceduralPrimitive,
anyhit, closesthit, intersection);
break;
}
default:
llvm_unreachable("unknown HitGroupType");
}
break;
}
case DXIL::SubobjectKind::RaytracingPipelineConfig1: {
DXASSERT_NOMSG(argCount == 2);
uint32_t maxTraceRecursionDepth;
uint32_t raytracingPipelineFlags;
if (!GetAsConstantUInt32(args[0], &maxTraceRecursionDepth))
return;
if (!GetAsConstantUInt32(args[1], &raytracingPipelineFlags))
return;
subobjects->CreateRaytracingPipelineConfig1(name, maxTraceRecursionDepth,
raytracingPipelineFlags);
break;
}
default:
llvm_unreachable("unknown SubobjectKind");
break;
}
}
static void CollectScalarTypes(std::vector<QualType> &ScalarTys, QualType Ty) {
if (Ty->isRecordType()) {
if (hlsl::IsHLSLMatType(Ty)) {
QualType EltTy = hlsl::GetHLSLMatElementType(Ty);
unsigned row = 0;
unsigned col = 0;
hlsl::GetRowsAndCols(Ty, row, col);
unsigned size = col * row;
for (unsigned i = 0; i < size; i++) {
CollectScalarTypes(ScalarTys, EltTy);
}
} else if (hlsl::IsHLSLVecType(Ty)) {
QualType EltTy = hlsl::GetHLSLVecElementType(Ty);
unsigned row = 0;
unsigned col = 0;
hlsl::GetRowsAndColsForAny(Ty, row, col);
unsigned size = col;
for (unsigned i = 0; i < size; i++) {
CollectScalarTypes(ScalarTys, EltTy);
}
} else {
const RecordType *RT = Ty->getAs<RecordType>();
RecordDecl *RD = RT->getDecl();
for (FieldDecl *field : RD->fields())
CollectScalarTypes(ScalarTys, field->getType());
}
} else if (Ty->isArrayType()) {
const clang::ArrayType *AT = Ty->getAsArrayTypeUnsafe();
QualType EltTy = AT->getElementType();
// Set it to 5 for unsized array.
unsigned size = 5;
if (AT->isConstantArrayType()) {
size = cast<ConstantArrayType>(AT)->getSize().getLimitedValue();
}
for (unsigned i = 0; i < size; i++) {
CollectScalarTypes(ScalarTys, EltTy);
}
} else {
ScalarTys.emplace_back(Ty);
}
}
bool CGMSHLSLRuntime::SetUAVSRV(SourceLocation loc,
hlsl::DxilResourceBase::Class resClass,
DxilResource *hlslRes, QualType QualTy) {
RecordDecl *RD = QualTy->getAs<RecordType>()->getDecl();
hlsl::DxilResource::Kind kind = KeywordToKind(RD->getName());
DXASSERT_NOMSG(kind != hlsl::DxilResource::Kind::Invalid);
hlslRes->SetKind(kind);
// Type annotation for result type of resource.
DxilTypeSystem &dxilTypeSys = m_pHLModule->GetTypeSystem();
unsigned arrayEltSize = 0;
AddTypeAnnotation(QualType(RD->getTypeForDecl(), 0), dxilTypeSys,
arrayEltSize);
if (kind == hlsl::DxilResource::Kind::Texture2DMS ||
kind == hlsl::DxilResource::Kind::Texture2DMSArray) {
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(RD);
const clang::TemplateArgument &sampleCountArg =
templateDecl->getTemplateArgs()[1];
uint32_t sampleCount = sampleCountArg.getAsIntegral().getLimitedValue();
hlslRes->SetSampleCount(sampleCount);
}
if (hlsl::DxilResource::IsAnyTexture(kind)) {
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(RD);
const clang::TemplateArgument &texelTyArg =
templateDecl->getTemplateArgs()[0];
llvm::Type *texelTy = CGM.getTypes().ConvertType(texelTyArg.getAsType());
if (!texelTy->isFloatingPointTy() && !texelTy->isIntegerTy() &&
!hlsl::IsHLSLVecType(texelTyArg.getAsType())) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"texture resource texel type must be scalar or vector");
Diags.Report(loc, DiagID);
return false;
}
}
QualType resultTy = hlsl::GetHLSLResourceResultType(QualTy);
if (kind != hlsl::DxilResource::Kind::StructuredBuffer &&
!resultTy.isNull()) {
QualType Ty = resultTy;
QualType EltTy = Ty;
if (hlsl::IsHLSLVecType(Ty)) {
EltTy = hlsl::GetHLSLVecElementType(Ty);
} else if (hlsl::IsHLSLMatType(Ty)) {
EltTy = hlsl::GetHLSLMatElementType(Ty);
} else if (hlsl::IsHLSLAggregateType(resultTy)) {
// Struct or array in a none-struct resource.
std::vector<QualType> ScalarTys;
CollectScalarTypes(ScalarTys, resultTy);
unsigned size = ScalarTys.size();
if (size == 0) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"object's templated type must have at least one element");
Diags.Report(loc, DiagID);
return false;
}
if (size > 4) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "elements of typed buffers and textures "
"must fit in four 32-bit quantities");
Diags.Report(loc, DiagID);
return false;
}
EltTy = ScalarTys[0];
for (QualType ScalarTy : ScalarTys) {
if (ScalarTy != EltTy) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"all template type components must have the same type");
Diags.Report(loc, DiagID);
return false;
}
}
}
bool bSNorm = false;
bool bHasNormAttribute = hlsl::HasHLSLUNormSNorm(Ty, &bSNorm);
if (const BuiltinType *BTy = EltTy->getAs<BuiltinType>()) {
CompType::Kind kind = BuiltinTyToCompTy(BTy, bHasNormAttribute && bSNorm,
bHasNormAttribute && !bSNorm);
// 64bits types are implemented with u32.
if (kind == CompType::Kind::U64 || kind == CompType::Kind::I64 ||
kind == CompType::Kind::SNormF64 ||
kind == CompType::Kind::UNormF64 || kind == CompType::Kind::F64) {
kind = CompType::Kind::U32;
}
hlslRes->SetCompType(kind);
} else {
DXASSERT(!bHasNormAttribute, "snorm/unorm on invalid type");
}
}
if (hlslRes->IsFeedbackTexture()) {
hlslRes->SetSamplerFeedbackType(static_cast<DXIL::SamplerFeedbackType>(
hlsl::GetHLSLResourceTemplateUInt(QualTy)));
}
hlslRes->SetROV(RD->getName().startswith("RasterizerOrdered"));
if (kind == hlsl::DxilResource::Kind::TypedBuffer ||
kind == hlsl::DxilResource::Kind::StructuredBuffer) {
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(RD);
const clang::TemplateArgument &retTyArg =
templateDecl->getTemplateArgs()[0];
llvm::Type *retTy = CGM.getTypes().ConvertType(retTyArg.getAsType());
uint32_t strideInBytes = dataLayout.getTypeAllocSize(retTy);
hlslRes->SetElementStride(strideInBytes);
if (kind == hlsl::DxilResource::Kind::StructuredBuffer) {
if (StructType *ST = dyn_cast<StructType>(retTy)) {
const StructLayout *SL = dataLayout.getStructLayout(ST);
hlslRes->SetBaseAlignLog2(Log2_32(SL->getAlignment()));
}
}
}
if (HasHLSLGloballyCoherent(QualTy)) {
hlslRes->SetGloballyCoherent(true);
}
if (resClass == hlsl::DxilResourceBase::Class::SRV) {
hlslRes->SetRW(false);
hlslRes->SetID(m_pHLModule->GetSRVs().size());
} else {
hlslRes->SetRW(true);
hlslRes->SetID(m_pHLModule->GetUAVs().size());
}
return true;
}
uint32_t CGMSHLSLRuntime::AddUAVSRV(VarDecl *decl,
hlsl::DxilResourceBase::Class resClass) {
llvm::GlobalVariable *val =
cast<llvm::GlobalVariable>(CGM.GetAddrOfGlobalVar(decl));
unique_ptr<HLResource> hlslRes(new HLResource);
hlslRes->SetLowerBound(UINT_MAX);
hlslRes->SetSpaceID(UINT_MAX);
hlslRes->SetGlobalSymbol(val);
hlslRes->SetGlobalName(decl->getName());
QualType VarTy;
unsigned rangeSize;
GetResourceDeclElemTypeAndRangeSize(CGM, *m_pHLModule, *decl, VarTy,
rangeSize);
hlslRes->SetRangeSize(rangeSize);
InitFromUnusualAnnotations(*hlslRes, *decl);
if (decl->hasAttr<HLSLGloballyCoherentAttr>()) {
hlslRes->SetGloballyCoherent(true);
}
if (!SetUAVSRV(decl->getLocation(), resClass, hlslRes.get(), VarTy))
return 0;
if (resClass == hlsl::DxilResourceBase::Class::SRV) {
return m_pHLModule->AddSRV(std::move(hlslRes));
} else {
return m_pHLModule->AddUAV(std::move(hlslRes));
}
}
static bool IsResourceInType(const clang::ASTContext &context,
clang::QualType Ty) {
Ty = Ty.getCanonicalType();
if (const clang::ArrayType *arrayType = context.getAsArrayType(Ty)) {
return IsResourceInType(context, arrayType->getElementType());
} else if (const RecordType *RT = Ty->getAsStructureType()) {
if (KeywordToClass(RT->getDecl()->getName()) !=
DxilResourceBase::Class::Invalid)
return true;
const CXXRecordDecl *typeRecordDecl = RT->getAsCXXRecordDecl();
if (typeRecordDecl && !typeRecordDecl->isImplicit()) {
for (auto field : typeRecordDecl->fields()) {
if (IsResourceInType(context, field->getType()))
return true;
}
}
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
if (KeywordToClass(templateDecl->getName()) !=
DxilResourceBase::Class::Invalid)
return true;
}
}
return false; // no resources found
}
void CGMSHLSLRuntime::AddConstantToCB(GlobalVariable *CV, StringRef Name,
QualType Ty, unsigned LowerBound,
HLCBuffer &CB) {
std::unique_ptr<DxilResourceBase> pHlslConst =
llvm::make_unique<DxilResourceBase>(DXIL::ResourceClass::Invalid);
pHlslConst->SetLowerBound(LowerBound);
pHlslConst->SetSpaceID(0);
pHlslConst->SetGlobalSymbol(CV);
pHlslConst->SetGlobalName(Name);
DxilTypeSystem &dxilTypeSys = m_pHLModule->GetTypeSystem();
unsigned arrayEltSize = 0;
unsigned size = AddTypeAnnotation(Ty, dxilTypeSys, arrayEltSize);
pHlslConst->SetRangeSize(size);
CB.AddConst(pHlslConst);
}
void CGMSHLSLRuntime::AddConstant(VarDecl *constDecl, HLCBuffer &CB) {
if (constDecl->getStorageClass() == SC_Static) {
// For static inside cbuffer, take as global static.
// Don't add to cbuffer.
CGM.EmitGlobal(constDecl);
// Add type annotation for static global types.
// May need it when cast from cbuf.
DxilTypeSystem &dxilTypeSys = m_pHLModule->GetTypeSystem();
unsigned arraySize = 0;
AddTypeAnnotation(constDecl->getType(), dxilTypeSys, arraySize);
return;
}
llvm::Constant *constVal = CGM.GetAddrOfGlobalVar(constDecl);
// Add debug info for constVal.
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
if (CGM.getCodeGenOpts().getDebugInfo() >=
CodeGenOptions::LimitedDebugInfo) {
DI->EmitGlobalVariable(cast<GlobalVariable>(constVal), constDecl);
}
auto ®Bindings = constantRegBindingMap[constVal];
// Save resource properties for cbuffer variables.
AddValToPropertyMap(constVal, constDecl->getType());
bool isGlobalCB = CB.GetID() == globalCBIndex;
uint32_t offset = 0;
bool userOffset = false;
for (hlsl::UnusualAnnotation *it : constDecl->getUnusualAnnotations()) {
switch (it->getKind()) {
case hlsl::UnusualAnnotation::UA_ConstantPacking: {
if (!isGlobalCB) {
// TODO: check cannot mix packoffset elements with nonpackoffset
// elements in a cbuffer.
hlsl::ConstantPacking *cp = cast<hlsl::ConstantPacking>(it);
offset = cp->Subcomponent << 2;
offset += cp->ComponentOffset;
// Change to byte.
offset <<= 2;
userOffset = true;
} else {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"packoffset is only allowed in a constant buffer.");
Diags.Report(it->Loc, DiagID);
}
break;
}
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
RegisterAssignment *ra = cast<RegisterAssignment>(it);
if (isGlobalCB) {
if (ra->RegisterSpace.hasValue()) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"register space cannot be specified on global constants.");
Diags.Report(it->Loc, DiagID);
}
offset = ra->RegisterNumber << 2;
// Change to byte.
offset <<= 2;
userOffset = true;
}
switch (ra->RegisterType) {
default:
break;
case 't':
regBindings.emplace_back(
std::make_pair(DXIL::ResourceClass::SRV, ra->RegisterNumber));
break;
case 'u':
regBindings.emplace_back(
std::make_pair(DXIL::ResourceClass::UAV, ra->RegisterNumber));
break;
case 's':
regBindings.emplace_back(
std::make_pair(DXIL::ResourceClass::Sampler, ra->RegisterNumber));
break;
}
break;
}
case hlsl::UnusualAnnotation::UA_SemanticDecl:
// skip semantic on constant
break;
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier:
// skip payload qualifers on constant
break;
}
}
unsigned LowerBound = userOffset ? offset : UINT_MAX;
AddConstantToCB(cast<llvm::GlobalVariable>(constVal),
constDecl->getQualifiedNameAsString(), constDecl->getType(),
LowerBound, CB);
// Save fieldAnnotation for the const var.
DxilFieldAnnotation fieldAnnotation;
if (userOffset)
fieldAnnotation.SetCBufferOffset(offset);
QualType Ty = constDecl->getType();
// Get the nested element type.
if (Ty->isArrayType()) {
while (const ConstantArrayType *arrayTy =
CGM.getContext().getAsConstantArrayType(Ty)) {
Ty = arrayTy->getElementType();
}
}
bool bDefaultRowMajor = m_pHLModule->GetHLOptions().bDefaultRowMajor;
ConstructFieldAttributedAnnotation(fieldAnnotation, Ty, bDefaultRowMajor);
m_ConstVarAnnotationMap[constVal] = fieldAnnotation;
}
namespace {
unique_ptr<HLCBuffer> CreateHLCBuf(NamedDecl *D, bool bIsView, bool bIsTBuf) {
unique_ptr<HLCBuffer> CB = llvm::make_unique<HLCBuffer>(bIsView, bIsTBuf);
// setup the CB
CB->SetGlobalSymbol(nullptr);
CB->SetGlobalName(D->getNameAsString());
CB->SetSpaceID(UINT_MAX);
CB->SetLowerBound(UINT_MAX);
if (bIsTBuf)
CB->SetKind(DXIL::ResourceKind::TBuffer);
InitFromUnusualAnnotations(*CB, *D);
return CB;
}
} // namespace
void CGMSHLSLRuntime::AddCBufferDecls(DeclContext *DC, HLCBuffer *CB) {
for (Decl *it : DC->decls()) {
if (VarDecl *constDecl = dyn_cast<VarDecl>(it)) {
AddConstant(constDecl, *CB);
} else if (isa<EmptyDecl>(*it)) {
// Nothing to do for this declaration.
} else if (isa<CXXRecordDecl>(it)) {
// Nothing to do for this declaration.
} else if (isa<FunctionDecl>(it)) {
// A function within an cbuffer is effectively a top-level function,
// as it only refers to globally scoped declarations.
CGM.EmitTopLevelDecl(it);
} else if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(it)) {
AddCBufferDecls(ND, CB);
} else {
HLSLBufferDecl *inner = dyn_cast<HLSLBufferDecl>(it);
if (!inner) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"invalid decl inside cbuffer");
Diags.Report(it->getLocation(), DiagID);
return;
}
GetOrCreateCBuffer(inner);
}
}
}
uint32_t CGMSHLSLRuntime::AddCBuffer(HLSLBufferDecl *D) {
unique_ptr<HLCBuffer> CB = CreateHLCBuf(D, false, !D->isCBuffer());
// Add constant
CB->SetRangeSize(1);
AddCBufferDecls(D, CB.get());
CB->SetID(m_pHLModule->GetCBuffers().size());
return m_pHLModule->AddCBuffer(std::move(CB));
}
uint32_t CGMSHLSLRuntime::AddConstantBufferView(VarDecl *D) {
QualType Ty = D->getType();
unique_ptr<HLCBuffer> CB =
CreateHLCBuf(D, true, IsTextureBufferView(Ty, CGM.getContext()));
CB->SetRangeSize(1);
if (Ty->isArrayType()) {
unsigned incompleteSize = 0;
// The initial array may be unbound
if (Ty->isIncompleteArrayType()) {
Ty = QualType(Ty->getArrayElementTypeNoTypeQual(), 0);
incompleteSize = UINT_MAX;
}
DXASSERT(!Ty->isIncompleteArrayType(),
"Unbound array found after first axis");
unsigned arraySize = 1;
while (Ty->isArrayType()) {
Ty = Ty->getCanonicalTypeUnqualified();
const ConstantArrayType *AT = cast<ConstantArrayType>(Ty);
arraySize *= AT->getSize().getLimitedValue();
Ty = AT->getElementType();
}
CB->SetRangeSize(std::max(arraySize, incompleteSize));
CB->SetIsArray();
}
QualType ResultTy = hlsl::GetHLSLResourceResultType(Ty);
// Search defined structure for resource objects and fail
if (CB->GetRangeSize() > 1 && IsResourceInType(CGM.getContext(), ResultTy)) {
DiagnosticsEngine &Diags = CGM.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"object types not supported in cbuffer/tbuffer view arrays.");
Diags.Report(D->getLocation(), DiagID);
return UINT_MAX;
}
// Not allow offset for CBV.
unsigned LowerBound = 0;
GlobalVariable *GV = cast<GlobalVariable>(CGM.GetAddrOfGlobalVar(D));
AddConstantToCB(GV, D->getName(), ResultTy, LowerBound, *CB.get());
CB->SetResultType(CGM.getTypes().ConvertType(ResultTy));
CB->SetID(m_pHLModule->GetCBuffers().size());
return m_pHLModule->AddCBuffer(std::move(CB));
}
HLCBuffer &CGMSHLSLRuntime::GetOrCreateCBuffer(HLSLBufferDecl *D) {
if (constantBufMap.count(D) != 0) {
uint32_t cbIndex = constantBufMap[D];
return *static_cast<HLCBuffer *>(&(m_pHLModule->GetCBuffer(cbIndex)));
}
uint32_t cbID = AddCBuffer(D);
constantBufMap[D] = cbID;
return *static_cast<HLCBuffer *>(&(m_pHLModule->GetCBuffer(cbID)));
}
void CGMSHLSLRuntime::FinishCodeGen() {
HLModule &HLM = *m_pHLModule;
llvm::Module &M = TheModule;
// Do this before CloneShaderEntry and TranslateRayQueryConstructor to avoid
// update valToResPropertiesMap for cloned inst.
FinishIntrinsics(HLM, m_IntrinsicMap, objectProperties);
bool bWaveEnabledStage = m_pHLModule->GetShaderModel()->IsPS() ||
m_pHLModule->GetShaderModel()->IsCS() ||
m_pHLModule->GetShaderModel()->IsLib();
// Handle lang extensions if provided.
if (CGM.getCodeGenOpts().HLSLExtensionsCodegen) {
ExtensionCodeGen(HLM, CGM);
}
StructurizeMultiRet(M, CGM, m_ScopeMap, bWaveEnabledStage, m_DxBreaks);
FinishEntries(HLM, Entry, CGM, entryFunctionMap, HSEntryPatchConstantFuncAttr,
patchConstantFunctionMap, patchConstantFunctionPropsMap);
ReplaceConstStaticGlobals(staticConstGlobalInitListMap,
staticConstGlobalCtorMap);
// Create copy for clip plane.
if (!clipPlaneFuncList.empty()) {
FinishClipPlane(HLM, clipPlaneFuncList, debugInfoMap, CGM);
}
// Add Reg bindings for resource in cb.
AddRegBindingsForResourceInConstantBuffer(HLM, constantRegBindingMap);
// Allocate constant buffers.
// Create Global variable and type annotation for each CBuffer.
FinishCBuffer(HLM, CBufferType, m_ConstVarAnnotationMap);
// Translate calls to RayQuery constructor into hl Allocate calls
TranslateRayQueryConstructor(HLM);
// Lower Node Input and Output Parameters to Node Handles
TranslateInputNodeRecordArgToHandle(HLM, NodeInputRecordParams);
TranslateNodeOutputParamToHandle(HLM, NodeOutputParams);
bool bIsLib = HLM.GetShaderModel()->IsLib();
StringRef GlobalCtorName = "llvm.global_ctors";
llvm::SmallVector<llvm::Function *, 2> Ctors;
CollectCtorFunctions(M, GlobalCtorName, Ctors, CGM);
if (!Ctors.empty()) {
if (!bIsLib) {
// need this for "llvm.global_dtors"?
Function *patchConstantFn = nullptr;
if (HLM.GetShaderModel()->IsHS()) {
patchConstantFn = HLM.GetPatchConstantFunction();
}
ProcessCtorFunctions(M, Ctors, Entry.Func, patchConstantFn);
// remove the GV
if (GlobalVariable *GV = M.getGlobalVariable(GlobalCtorName))
GV->eraseFromParent();
} else {
// Call ctors for each entry.
DenseSet<Function *> processedPatchConstantFnSet;
for (auto &Entry : entryFunctionMap) {
Function *F = Entry.second.Func;
Function *patchConstFunc = nullptr;
auto AttrIter = HSEntryPatchConstantFuncAttr.find(F);
if (AttrIter != HSEntryPatchConstantFuncAttr.end()) {
StringRef funcName = AttrIter->second->getFunctionName();
auto PatchEntry = patchConstantFunctionMap.find(funcName);
if (PatchEntry != patchConstantFunctionMap.end() &&
PatchEntry->second.NumOverloads == 1) {
patchConstFunc = PatchEntry->second.Func;
// Each patchConstFunc should only be processed once.
if (patchConstFunc &&
processedPatchConstantFnSet.count(patchConstFunc) == 0)
processedPatchConstantFnSet.insert(patchConstFunc);
else
patchConstFunc = nullptr;
}
}
ProcessCtorFunctions(M, Ctors, F, patchConstFunc);
}
}
}
UpdateLinkage(HLM, CGM, m_ExportMap, entryFunctionMap,
patchConstantFunctionMap);
// Do simple transform to make later lower pass easier.
SimpleTransformForHLDXIR(&M);
// Add dx.break function and make appropriate breaks conditional on it.
AddDxBreak(M, m_DxBreaks);
// At this point, we have a high-level DXIL module - record this.
SetPauseResumePasses(*m_pHLModule->GetModule(), "hlsl-hlemit",
"hlsl-hlensure");
}
RValue CGMSHLSLRuntime::EmitHLSLBuiltinCallExpr(CodeGenFunction &CGF,
const FunctionDecl *FD,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
const Decl *TargetDecl = E->getCalleeDecl();
llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
RValue RV = CGF.EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
TargetDecl);
if (RV.isScalar() && RV.getScalarVal() != nullptr) {
if (CallInst *CI = dyn_cast<CallInst>(RV.getScalarVal())) {
Function *F = CI->getCalledFunction();
HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(F);
if (group == HLOpcodeGroup::HLIntrinsic) {
bool allOperandImm = true;
for (auto &operand : CI->arg_operands()) {
bool isImm = isa<ConstantInt>(operand) || isa<ConstantFP>(operand) ||
isa<ConstantAggregateZero>(operand) ||
isa<ConstantDataVector>(operand);
if (!isImm) {
allOperandImm = false;
break;
} else if (operand->getType()->isHalfTy()) {
// Not support half Eval yet.
allOperandImm = false;
break;
}
}
if (allOperandImm) {
unsigned intrinsicOpcode;
StringRef intrinsicGroup;
hlsl::GetIntrinsicOp(FD, intrinsicOpcode, intrinsicGroup);
IntrinsicOp opcode = static_cast<IntrinsicOp>(intrinsicOpcode);
if (Value *Result =
TryEvalIntrinsic(CI, opcode, CGM.getLangOpts().HLSLVersion)) {
RV = RValue::get(Result);
}
}
}
}
}
return RV;
}
static HLOpcodeGroup GetHLOpcodeGroup(const clang::Stmt::StmtClass stmtClass) {
switch (stmtClass) {
case Stmt::CStyleCastExprClass:
case Stmt::ImplicitCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
return HLOpcodeGroup::HLCast;
case Stmt::InitListExprClass:
return HLOpcodeGroup::HLInit;
case Stmt::BinaryOperatorClass:
case Stmt::CompoundAssignOperatorClass:
return HLOpcodeGroup::HLBinOp;
case Stmt::UnaryOperatorClass:
return HLOpcodeGroup::HLUnOp;
case Stmt::ExtMatrixElementExprClass:
return HLOpcodeGroup::HLSubscript;
case Stmt::CallExprClass:
return HLOpcodeGroup::HLIntrinsic;
case Stmt::ConditionalOperatorClass:
return HLOpcodeGroup::HLSelect;
default:
llvm_unreachable("not support operation");
}
}
// NOTE: This table must match BinaryOperator::Opcode
static const HLBinaryOpcode BinaryOperatorKindMap[] = {
HLBinaryOpcode::Invalid, // PtrMemD
HLBinaryOpcode::Invalid, // PtrMemI
HLBinaryOpcode::Mul, HLBinaryOpcode::Div, HLBinaryOpcode::Rem,
HLBinaryOpcode::Add, HLBinaryOpcode::Sub, HLBinaryOpcode::Shl,
HLBinaryOpcode::Shr, HLBinaryOpcode::LT, HLBinaryOpcode::GT,
HLBinaryOpcode::LE, HLBinaryOpcode::GE, HLBinaryOpcode::EQ,
HLBinaryOpcode::NE, HLBinaryOpcode::And, HLBinaryOpcode::Xor,
HLBinaryOpcode::Or, HLBinaryOpcode::LAnd, HLBinaryOpcode::LOr,
HLBinaryOpcode::Invalid, // Assign,
// The assign part is done by matrix store
HLBinaryOpcode::Mul, // MulAssign
HLBinaryOpcode::Div, // DivAssign
HLBinaryOpcode::Rem, // RemAssign
HLBinaryOpcode::Add, // AddAssign
HLBinaryOpcode::Sub, // SubAssign
HLBinaryOpcode::Shl, // ShlAssign
HLBinaryOpcode::Shr, // ShrAssign
HLBinaryOpcode::And, // AndAssign
HLBinaryOpcode::Xor, // XorAssign
HLBinaryOpcode::Or, // OrAssign
HLBinaryOpcode::Invalid, // Comma
};
// NOTE: This table must match UnaryOperator::Opcode
static const HLUnaryOpcode UnaryOperatorKindMap[] = {
HLUnaryOpcode::PostInc, HLUnaryOpcode::PostDec,
HLUnaryOpcode::PreInc, HLUnaryOpcode::PreDec,
HLUnaryOpcode::Invalid, // AddrOf,
HLUnaryOpcode::Invalid, // Deref,
HLUnaryOpcode::Plus, HLUnaryOpcode::Minus,
HLUnaryOpcode::Not, HLUnaryOpcode::LNot,
HLUnaryOpcode::Invalid, // Real,
HLUnaryOpcode::Invalid, // Imag,
HLUnaryOpcode::Invalid, // Extension
};
static unsigned GetHLOpcode(const Expr *E) {
switch (E->getStmtClass()) {
case Stmt::CompoundAssignOperatorClass:
case Stmt::BinaryOperatorClass: {
const clang::BinaryOperator *binOp = cast<clang::BinaryOperator>(E);
HLBinaryOpcode binOpcode = BinaryOperatorKindMap[binOp->getOpcode()];
if (HasUnsignedOpcode(binOpcode)) {
if (hlsl::IsHLSLUnsigned(binOp->getLHS()->getType())) {
binOpcode = GetUnsignedOpcode(binOpcode);
}
}
return static_cast<unsigned>(binOpcode);
}
case Stmt::UnaryOperatorClass: {
const UnaryOperator *unOp = cast<clang::UnaryOperator>(E);
HLUnaryOpcode unOpcode = UnaryOperatorKindMap[unOp->getOpcode()];
return static_cast<unsigned>(unOpcode);
}
case Stmt::ImplicitCastExprClass:
case Stmt::CStyleCastExprClass: {
const CastExpr *CE = cast<CastExpr>(E);
bool toUnsigned = hlsl::IsHLSLUnsigned(E->getType());
bool fromUnsigned = hlsl::IsHLSLUnsigned(CE->getSubExpr()->getType());
if (toUnsigned && fromUnsigned)
return static_cast<unsigned>(HLCastOpcode::UnsignedUnsignedCast);
else if (toUnsigned)
return static_cast<unsigned>(HLCastOpcode::ToUnsignedCast);
else if (fromUnsigned)
return static_cast<unsigned>(HLCastOpcode::FromUnsignedCast);
else
return static_cast<unsigned>(HLCastOpcode::DefaultCast);
}
default:
return 0;
}
}
static Value *
EmitHLSLMatrixOperationCallImp(CGBuilderTy &Builder, HLOpcodeGroup group,
unsigned opcode, llvm::Type *RetType,
ArrayRef<Value *> paramList, llvm::Module &M) {
SmallVector<llvm::Type *, 4> paramTyList;
// Add the opcode param
llvm::Type *opcodeTy = llvm::Type::getInt32Ty(M.getContext());
paramTyList.emplace_back(opcodeTy);
for (Value *param : paramList) {
paramTyList.emplace_back(param->getType());
}
llvm::FunctionType *funcTy =
llvm::FunctionType::get(RetType, paramTyList, false);
Function *opFunc = GetOrCreateHLFunction(M, funcTy, group, opcode);
SmallVector<Value *, 4> opcodeParamList;
Value *opcodeConst = Constant::getIntegerValue(opcodeTy, APInt(32, opcode));
opcodeParamList.emplace_back(opcodeConst);
opcodeParamList.append(paramList.begin(), paramList.end());
return Builder.CreateCall(opFunc, opcodeParamList);
}
static Value *EmitHLSLArrayInit(CGBuilderTy &Builder, HLOpcodeGroup group,
unsigned opcode, llvm::Type *RetType,
ArrayRef<Value *> paramList, llvm::Module &M) {
// It's a matrix init.
if (!RetType->isVoidTy())
return EmitHLSLMatrixOperationCallImp(Builder, group, opcode, RetType,
paramList, M);
Value *arrayPtr = paramList[0];
llvm::ArrayType *AT =
cast<llvm::ArrayType>(arrayPtr->getType()->getPointerElementType());
// Avoid the arrayPtr.
unsigned paramSize = paramList.size() - 1;
// Support simple case here.
if (paramSize == AT->getArrayNumElements()) {
bool typeMatch = true;
llvm::Type *EltTy = AT->getArrayElementType();
if (EltTy->isAggregateType()) {
// Aggregate Type use pointer in initList.
EltTy = llvm::PointerType::get(EltTy, 0);
}
for (unsigned i = 1; i < paramList.size(); i++) {
if (paramList[i]->getType() != EltTy) {
typeMatch = false;
break;
}
}
// Both size and type match.
if (typeMatch) {
bool isPtr = EltTy->isPointerTy();
llvm::Type *i32Ty = llvm::Type::getInt32Ty(EltTy->getContext());
Constant *zero = ConstantInt::get(i32Ty, 0);
for (unsigned i = 1; i < paramList.size(); i++) {
Constant *idx = ConstantInt::get(i32Ty, i - 1);
Value *GEP = Builder.CreateInBoundsGEP(arrayPtr, {zero, idx});
Value *Elt = paramList[i];
if (isPtr) {
Elt = Builder.CreateLoad(Elt);
}
Builder.CreateStore(Elt, GEP);
}
// The return value will not be used.
return nullptr;
}
}
// Other case will be lowered in later pass.
return EmitHLSLMatrixOperationCallImp(Builder, group, opcode, RetType,
paramList, M);
}
void CGMSHLSLRuntime::FlattenValToInitList(CodeGenFunction &CGF,
SmallVector<Value *, 4> &elts,
SmallVector<QualType, 4> &eltTys,
QualType Ty, Value *val) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Type *valTy = val->getType();
if (valTy->isPointerTy()) {
llvm::Type *valEltTy = valTy->getPointerElementType();
if (valEltTy->isVectorTy() || valEltTy->isSingleValueType()) {
Value *ldVal = Builder.CreateLoad(val);
FlattenValToInitList(CGF, elts, eltTys, Ty, ldVal);
} else if (HLMatrixType::isa(valEltTy)) {
Value *ldVal = EmitHLSLMatrixLoad(Builder, val, Ty);
FlattenValToInitList(CGF, elts, eltTys, Ty, ldVal);
} else {
llvm::Type *i32Ty = llvm::Type::getInt32Ty(valTy->getContext());
Value *zero = ConstantInt::get(i32Ty, 0);
if (llvm::ArrayType *AT = dyn_cast<llvm::ArrayType>(valEltTy)) {
QualType EltTy = Ty->getAsArrayTypeUnsafe()->getElementType();
for (unsigned i = 0; i < AT->getArrayNumElements(); i++) {
Value *gepIdx = ConstantInt::get(i32Ty, i);
Value *EltPtr = Builder.CreateInBoundsGEP(val, {zero, gepIdx});
FlattenValToInitList(CGF, elts, eltTys, EltTy, EltPtr);
}
} else {
// Struct.
StructType *ST = cast<StructType>(valEltTy);
if (dxilutil::IsHLSLObjectType(ST)) {
// Save object directly like basic type.
elts.emplace_back(Builder.CreateLoad(val));
eltTys.emplace_back(Ty);
} else {
const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
// Take care base.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->getNumBases()) {
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(
I.getType()->castAs<RecordType>()->getDecl());
if (BaseDecl->field_empty())
continue;
QualType parentTy = QualType(BaseDecl->getTypeForDecl(), 0);
unsigned i = RL.getNonVirtualBaseLLVMFieldNo(BaseDecl);
Value *gepIdx = ConstantInt::get(i32Ty, i);
Value *EltPtr = Builder.CreateInBoundsGEP(val, {zero, gepIdx});
FlattenValToInitList(CGF, elts, eltTys, parentTy, EltPtr);
}
}
}
for (auto fieldIter = RD->field_begin(), fieldEnd = RD->field_end();
fieldIter != fieldEnd; ++fieldIter) {
unsigned i = RL.getLLVMFieldNo(*fieldIter);
Value *gepIdx = ConstantInt::get(i32Ty, i);
Value *EltPtr = Builder.CreateInBoundsGEP(val, {zero, gepIdx});
FlattenValToInitList(CGF, elts, eltTys, fieldIter->getType(),
EltPtr);
}
}
}
}
} else {
if (HLMatrixType MatTy = HLMatrixType::dyn_cast(valTy)) {
llvm::Type *EltTy = MatTy.getElementTypeForReg();
// All matrix Value should be row major.
// Init list is row major in scalar.
// So the order is match here, just cast to vector.
unsigned matSize = MatTy.getNumElements();
bool isRowMajor = hlsl::IsHLSLMatRowMajor(
Ty, m_pHLModule->GetHLOptions().bDefaultRowMajor);
HLCastOpcode opcode = isRowMajor ? HLCastOpcode::RowMatrixToVecCast
: HLCastOpcode::ColMatrixToVecCast;
// Cast to vector.
val = EmitHLSLMatrixOperationCallImp(
Builder, HLOpcodeGroup::HLCast, static_cast<unsigned>(opcode),
llvm::VectorType::get(EltTy, matSize), {val}, TheModule);
valTy = val->getType();
}
if (valTy->isVectorTy()) {
QualType EltTy = hlsl::GetElementTypeOrType(Ty);
unsigned vecSize = valTy->getVectorNumElements();
for (unsigned i = 0; i < vecSize; i++) {
Value *Elt = Builder.CreateExtractElement(val, i);
elts.emplace_back(Elt);
eltTys.emplace_back(EltTy);
}
} else {
DXASSERT(valTy->isSingleValueType(), "must be single value type here");
elts.emplace_back(val);
eltTys.emplace_back(Ty);
}
}
}
static Value *ConvertScalarOrVector(CGBuilderTy &Builder, CodeGenTypes &Types,
Value *Val, QualType SrcQualTy,
QualType DstQualTy) {
llvm::Type *SrcTy = Val->getType();
llvm::Type *DstTy = Types.ConvertType(DstQualTy);
DXASSERT(Val->getType() == Types.ConvertType(SrcQualTy) ||
Val->getType() == Types.ConvertTypeForMem(SrcQualTy),
"QualType/Value mismatch!");
DXASSERT(
(SrcTy->isIntOrIntVectorTy() || SrcTy->isFPOrFPVectorTy()) &&
(DstTy->isIntOrIntVectorTy() || DstTy->isFPOrFPVectorTy()),
"EmitNumericConversion can only be used with int/float scalars/vectors.");
if (SrcTy == DstTy)
return Val; // Valid no-op, including uint to int / int to uint
DXASSERT(SrcTy->isVectorTy()
? (DstTy->isVectorTy() && SrcTy->getVectorNumElements() ==
DstTy->getVectorNumElements())
: !DstTy->isVectorTy(),
"EmitNumericConversion can only cast between scalars or vectors of "
"matching sizes");
// Conversions to bools are comparisons
if (DstTy->getScalarSizeInBits() == 1) {
// fcmp une is what regular clang uses in C++ for (bool)f;
return SrcTy->isIntOrIntVectorTy()
? Builder.CreateICmpNE(Val, llvm::Constant::getNullValue(SrcTy),
"tobool")
: Builder.CreateFCmpUNE(Val, llvm::Constant::getNullValue(SrcTy),
"tobool");
}
// Cast necessary
auto CastOp = static_cast<Instruction::CastOps>(
HLModule::GetNumericCastOp(SrcTy, hlsl::IsHLSLUnsigned(SrcQualTy), DstTy,
hlsl::IsHLSLUnsigned(DstQualTy)));
return Builder.CreateCast(CastOp, Val, DstTy);
}
static Value *ConvertScalarOrVector(CodeGenFunction &CGF, Value *Val,
QualType SrcQualTy, QualType DstQualTy) {
return ConvertScalarOrVector(CGF.Builder, CGF.getTypes(), Val, SrcQualTy,
DstQualTy);
}
// Cast elements in initlist if not match the target type.
// idx is current element index in initlist, Ty is target type.
// TODO: Stop handling missing cast here. Handle the casting of non-scalar
// values to their destination type in init list expressions at AST level.
static void AddMissingCastOpsInInitList(SmallVector<Value *, 4> &elts,
SmallVector<QualType, 4> &eltTys,
unsigned &idx, QualType Ty,
CodeGenFunction &CGF) {
if (Ty->isArrayType()) {
const clang::ArrayType *AT = Ty->getAsArrayTypeUnsafe();
// Must be ConstantArrayType here.
unsigned arraySize =
cast<ConstantArrayType>(AT)->getSize().getLimitedValue();
QualType EltTy = AT->getElementType();
for (unsigned i = 0; i < arraySize; i++)
AddMissingCastOpsInInitList(elts, eltTys, idx, EltTy, CGF);
} else if (IsHLSLVecType(Ty)) {
QualType EltTy = GetHLSLVecElementType(Ty);
unsigned vecSize = GetHLSLVecSize(Ty);
for (unsigned i = 0; i < vecSize; i++)
AddMissingCastOpsInInitList(elts, eltTys, idx, EltTy, CGF);
} else if (IsHLSLMatType(Ty)) {
QualType EltTy = GetHLSLMatElementType(Ty);
unsigned row, col;
GetHLSLMatRowColCount(Ty, row, col);
unsigned matSize = row * col;
for (unsigned i = 0; i < matSize; i++)
AddMissingCastOpsInInitList(elts, eltTys, idx, EltTy, CGF);
} else if (Ty->isRecordType()) {
if (dxilutil::IsHLSLObjectType(CGF.ConvertType(Ty))) {
// Skip hlsl object.
idx++;
} else {
const RecordType *RT = Ty->getAs<RecordType>();
RecordDecl *RD = RT->getDecl();
// Take care base.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->getNumBases()) {
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(
I.getType()->castAs<RecordType>()->getDecl());
if (BaseDecl->field_empty())
continue;
QualType parentTy = QualType(BaseDecl->getTypeForDecl(), 0);
AddMissingCastOpsInInitList(elts, eltTys, idx, parentTy, CGF);
}
}
}
for (FieldDecl *field : RD->fields())
AddMissingCastOpsInInitList(elts, eltTys, idx, field->getType(), CGF);
}
} else {
// Basic type.
elts[idx] = ConvertScalarOrVector(CGF, elts[idx], eltTys[idx], Ty);
idx++;
}
}
static void StoreInitListToDestPtr(Value *DestPtr,
SmallVector<Value *, 4> &elts, unsigned &idx,
QualType Type, bool bDefaultRowMajor,
CodeGenFunction &CGF, llvm::Module &M) {
CodeGenTypes &Types = CGF.getTypes();
CGBuilderTy &Builder = CGF.Builder;
llvm::Type *Ty = DestPtr->getType()->getPointerElementType();
if (Ty->isVectorTy()) {
llvm::Type *RegTy = CGF.ConvertType(Type);
Value *Result = UndefValue::get(RegTy);
for (unsigned i = 0; i < RegTy->getVectorNumElements(); i++)
Result = Builder.CreateInsertElement(Result, elts[idx + i], i);
Result = CGF.EmitToMemory(Result, Type);
Builder.CreateStore(Result, DestPtr);
idx += Ty->getVectorNumElements();
} else if (HLMatrixType MatTy = HLMatrixType::dyn_cast(Ty)) {
bool isRowMajor = hlsl::IsHLSLMatRowMajor(Type, bDefaultRowMajor);
std::vector<Value *> matInitList(MatTy.getNumElements());
for (unsigned c = 0; c < MatTy.getNumColumns(); c++) {
for (unsigned r = 0; r < MatTy.getNumRows(); r++) {
unsigned matIdx = c * MatTy.getNumRows() + r;
matInitList[matIdx] = elts[idx + matIdx];
}
}
idx += MatTy.getNumElements();
Value *matVal =
EmitHLSLMatrixOperationCallImp(Builder, HLOpcodeGroup::HLInit,
/*opcode*/ 0, Ty, matInitList, M);
// matVal return from HLInit is row major.
// If DestPtr is row major, just store it directly.
if (!isRowMajor) {
// ColMatStore need a col major value.
// Cast row major matrix into col major.
// Then store it.
Value *colMatVal = EmitHLSLMatrixOperationCallImp(
Builder, HLOpcodeGroup::HLCast,
static_cast<unsigned>(HLCastOpcode::RowMatrixToColMatrix), Ty,
{matVal}, M);
EmitHLSLMatrixOperationCallImp(
Builder, HLOpcodeGroup::HLMatLoadStore,
static_cast<unsigned>(HLMatLoadStoreOpcode::ColMatStore), Ty,
{DestPtr, colMatVal}, M);
} else {
EmitHLSLMatrixOperationCallImp(
Builder, HLOpcodeGroup::HLMatLoadStore,
static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatStore), Ty,
{DestPtr, matVal}, M);
}
} else if (Ty->isStructTy()) {
if (dxilutil::IsHLSLObjectType(Ty)) {
Builder.CreateStore(elts[idx], DestPtr);
idx++;
} else {
Constant *zero = Builder.getInt32(0);
const RecordType *RT = Type->getAs<RecordType>();
RecordDecl *RD = RT->getDecl();
const CGRecordLayout &RL = Types.getCGRecordLayout(RD);
// Take care base.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->getNumBases()) {
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(
I.getType()->castAs<RecordType>()->getDecl());
if (BaseDecl->field_empty())
continue;
QualType parentTy = QualType(BaseDecl->getTypeForDecl(), 0);
unsigned i = RL.getNonVirtualBaseLLVMFieldNo(BaseDecl);
Constant *gepIdx = Builder.getInt32(i);
Value *GEP = Builder.CreateInBoundsGEP(DestPtr, {zero, gepIdx});
StoreInitListToDestPtr(GEP, elts, idx, parentTy, bDefaultRowMajor,
CGF, M);
}
}
}
for (FieldDecl *field : RD->fields()) {
unsigned i = RL.getLLVMFieldNo(field);
Constant *gepIdx = Builder.getInt32(i);
Value *GEP = Builder.CreateInBoundsGEP(DestPtr, {zero, gepIdx});
StoreInitListToDestPtr(GEP, elts, idx, field->getType(),
bDefaultRowMajor, CGF, M);
}
}
} else if (Ty->isArrayTy()) {
Constant *zero = Builder.getInt32(0);
QualType EltType = Type->getAsArrayTypeUnsafe()->getElementType();
for (unsigned i = 0; i < Ty->getArrayNumElements(); i++) {
Constant *gepIdx = Builder.getInt32(i);
Value *GEP = Builder.CreateInBoundsGEP(DestPtr, {zero, gepIdx});
StoreInitListToDestPtr(GEP, elts, idx, EltType, bDefaultRowMajor, CGF, M);
}
} else {
DXASSERT(Ty->isSingleValueType(), "invalid type");
llvm::Type *i1Ty = Builder.getInt1Ty();
Value *V = elts[idx];
if (V->getType() == i1Ty &&
DestPtr->getType()->getPointerElementType() != i1Ty) {
V = Builder.CreateZExt(V, DestPtr->getType()->getPointerElementType());
}
Builder.CreateStore(V, DestPtr);
idx++;
}
}
void CGMSHLSLRuntime::ScanInitList(CodeGenFunction &CGF, InitListExpr *E,
SmallVector<Value *, 4> &EltValList,
SmallVector<QualType, 4> &EltTyList) {
unsigned NumInitElements = E->getNumInits();
for (unsigned i = 0; i != NumInitElements; ++i) {
Expr *init = E->getInit(i);
QualType iType = init->getType();
if (InitListExpr *initList = dyn_cast<InitListExpr>(init)) {
ScanInitList(CGF, initList, EltValList, EltTyList);
} else if (CodeGenFunction::hasScalarEvaluationKind(iType)) {
llvm::Value *initVal = CGF.EmitScalarExpr(init);
FlattenValToInitList(CGF, EltValList, EltTyList, iType, initVal);
} else {
AggValueSlot Slot =
CGF.CreateAggTemp(init->getType(), "Agg.InitList.tmp");
CGF.EmitAggExpr(init, Slot);
llvm::Value *aggPtr = Slot.getAddr();
FlattenValToInitList(CGF, EltValList, EltTyList, iType, aggPtr);
}
}
}
// Is Type of E match Ty.
static bool ExpTypeMatch(Expr *E, QualType Ty, ASTContext &Ctx,
CodeGenTypes &Types) {
if (InitListExpr *initList = dyn_cast<InitListExpr>(E)) {
unsigned NumInitElements = initList->getNumInits();
// Skip vector and matrix type.
if (Ty->isVectorType())
return false;
if (hlsl::IsHLSLVecMatType(Ty))
return false;
if (Ty->isStructureOrClassType()) {
RecordDecl *record = Ty->castAs<RecordType>()->getDecl();
bool bMatch = true;
unsigned i = 0;
for (auto it = record->field_begin(), end = record->field_end();
it != end; it++) {
if (i == NumInitElements) {
bMatch = false;
break;
}
Expr *init = initList->getInit(i++);
QualType EltTy = it->getType();
bMatch &= ExpTypeMatch(init, EltTy, Ctx, Types);
if (!bMatch)
break;
}
bMatch &= i == NumInitElements;
if (bMatch && initList->getType()->isVoidType()) {
initList->setType(Ty);
}
return bMatch;
} else if (Ty->isArrayType() && !Ty->isIncompleteArrayType()) {
const ConstantArrayType *AT = Ctx.getAsConstantArrayType(Ty);
QualType EltTy = AT->getElementType();
unsigned size = AT->getSize().getZExtValue();
if (size != NumInitElements)
return false;
bool bMatch = true;
for (unsigned i = 0; i != NumInitElements; ++i) {
Expr *init = initList->getInit(i);
bMatch &= ExpTypeMatch(init, EltTy, Ctx, Types);
if (!bMatch)
break;
}
if (bMatch && initList->getType()->isVoidType()) {
initList->setType(Ty);
}
return bMatch;
} else {
return false;
}
} else {
llvm::Type *ExpTy = Types.ConvertType(E->getType());
llvm::Type *TargetTy = Types.ConvertType(Ty);
return ExpTy == TargetTy;
}
}
bool CGMSHLSLRuntime::IsTrivalInitListExpr(CodeGenFunction &CGF,
InitListExpr *E) {
QualType Ty = E->getType();
bool result = ExpTypeMatch(E, Ty, CGF.getContext(), CGF.getTypes());
if (result) {
auto iter = staticConstGlobalInitMap.find(E);
if (iter != staticConstGlobalInitMap.end()) {
GlobalVariable *GV = iter->second;
auto &InitConstants = staticConstGlobalInitListMap[GV];
// Add Constant to InitList.
for (unsigned i = 0; i < E->getNumInits(); i++) {
Expr *Expr = E->getInit(i);
if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Expr)) {
if (Cast->getCastKind() == CK_LValueToRValue) {
Expr = Cast->getSubExpr();
}
}
// Only do this on lvalue, if not lvalue, it will not be constant
// anyway.
if (Expr->isLValue()) {
LValue LV = CGF.EmitLValue(Expr);
if (LV.isSimple()) {
Constant *SrcPtr = dyn_cast<Constant>(LV.getAddress());
if (SrcPtr && !isa<UndefValue>(SrcPtr)) {
InitConstants.emplace_back(SrcPtr);
continue;
}
}
}
// Only support simple LV and Constant Ptr case.
// Other case just go normal path.
InitConstants.clear();
break;
}
if (InitConstants.empty())
staticConstGlobalInitListMap.erase(GV);
else
staticConstGlobalCtorMap[GV] = CGF.CurFn;
}
}
return result;
}
Value *
CGMSHLSLRuntime::EmitHLSLInitListExpr(CodeGenFunction &CGF, InitListExpr *E,
// The destPtr when emiting aggregate
// init, for normal case, it will be null.
Value *DestPtr) {
if (DestPtr && E->getNumInits() == 1) {
llvm::Type *ExpTy = CGF.ConvertType(E->getType());
llvm::Type *TargetTy = CGF.ConvertType(E->getInit(0)->getType());
if (ExpTy == TargetTy) {
Expr *Expr = E->getInit(0);
LValue LV = CGF.EmitLValue(Expr);
if (LV.isSimple()) {
Value *SrcPtr = LV.getAddress();
SmallVector<Value *, 4> idxList;
EmitHLSLAggregateCopy(CGF, SrcPtr, DestPtr, idxList, Expr->getType(),
E->getType(), SrcPtr->getType());
return nullptr;
}
}
}
SmallVector<Value *, 4> EltValList;
SmallVector<QualType, 4> EltTyList;
ScanInitList(CGF, E, EltValList, EltTyList);
QualType ResultTy = E->getType();
unsigned idx = 0;
// Create cast if need.
AddMissingCastOpsInInitList(EltValList, EltTyList, idx, ResultTy, CGF);
DXASSERT(idx == EltValList.size(), "size must match");
llvm::Type *RetTy = CGF.ConvertType(ResultTy);
if (DestPtr) {
SmallVector<Value *, 4> ParamList;
DXASSERT_NOMSG(RetTy->isAggregateType());
ParamList.emplace_back(DestPtr);
ParamList.append(EltValList.begin(), EltValList.end());
idx = 0;
bool bDefaultRowMajor = m_pHLModule->GetHLOptions().bDefaultRowMajor;
StoreInitListToDestPtr(DestPtr, EltValList, idx, ResultTy, bDefaultRowMajor,
CGF, TheModule);
return nullptr;
}
if (IsHLSLVecType(ResultTy)) {
Value *Result = UndefValue::get(RetTy);
for (unsigned i = 0; i < RetTy->getVectorNumElements(); i++)
Result = CGF.Builder.CreateInsertElement(Result, EltValList[i], i);
return Result;
} else {
// Must be matrix here.
DXASSERT(IsHLSLMatType(ResultTy), "must be matrix type here.");
return EmitHLSLMatrixOperationCallImp(CGF.Builder, HLOpcodeGroup::HLInit,
/*opcode*/ 0, RetTy, EltValList,
TheModule);
}
}
static void FlatConstToList(CodeGenTypes &Types, bool bDefaultRowMajor,
Constant *C, QualType QualTy,
SmallVectorImpl<Constant *> &EltVals,
SmallVectorImpl<QualType> &EltQualTys) {
llvm::Type *Ty = C->getType();
DXASSERT(Types.ConvertTypeForMem(QualTy) == Ty, "QualType/Type mismatch!");
if (llvm::VectorType *VecTy = dyn_cast<llvm::VectorType>(Ty)) {
DXASSERT(hlsl::IsHLSLVecType(QualTy), "QualType/Type mismatch!");
QualType VecElemQualTy = hlsl::GetHLSLVecElementType(QualTy);
for (unsigned i = 0; i < VecTy->getNumElements(); i++) {
EltVals.emplace_back(C->getAggregateElement(i));
EltQualTys.emplace_back(VecElemQualTy);
}
} else if (HLMatrixType::isa(Ty)) {
DXASSERT(hlsl::IsHLSLMatType(QualTy), "QualType/Type mismatch!");
// matrix type is struct { [rowcount x <colcount x T>] };
// Strip the struct level here.
Constant *RowArrayVal = C->getAggregateElement((unsigned)0);
QualType MatEltQualTy = hlsl::GetHLSLMatElementType(QualTy);
unsigned RowCount, ColCount;
hlsl::GetHLSLMatRowColCount(QualTy, RowCount, ColCount);
// Get all the elements from the array of row vectors.
// Matrices are never in memory representation so convert as needed.
SmallVector<Constant *, 16> MatElts;
for (unsigned r = 0; r < RowCount; ++r) {
Constant *RowVec = RowArrayVal->getAggregateElement(r);
for (unsigned c = 0; c < ColCount; ++c) {
Constant *MatElt = RowVec->getAggregateElement(c);
if (MatEltQualTy->isBooleanType()) {
DXASSERT(
MatElt->getType()->isIntegerTy(1),
"Matrix elements should be in their register representation.");
MatElt = llvm::ConstantExpr::getZExt(
MatElt, Types.ConvertTypeForMem(MatEltQualTy));
}
MatElts.emplace_back(MatElt);
}
}
// Return the elements in the order respecting the orientation.
// Constant initializers are used as the initial value for static variables,
// which live in memory. This is why they have to respect memory packing
// order.
bool IsRowMajor = hlsl::IsHLSLMatRowMajor(QualTy, bDefaultRowMajor);
for (unsigned r = 0; r < RowCount; ++r) {
for (unsigned c = 0; c < ColCount; ++c) {
unsigned Idx = IsRowMajor ? (r * ColCount + c) : (c * RowCount + r);
EltVals.emplace_back(MatElts[Idx]);
EltQualTys.emplace_back(MatEltQualTy);
}
}
} else if (const clang::ConstantArrayType *ClangArrayTy =
Types.getContext().getAsConstantArrayType(QualTy)) {
QualType ArrayEltQualTy = ClangArrayTy->getElementType();
uint64_t ArraySize = ClangArrayTy->getSize().getLimitedValue();
DXASSERT(cast<llvm::ArrayType>(Ty)->getArrayNumElements() == ArraySize,
"QualType/Type mismatch!");
for (unsigned i = 0; i < ArraySize; i++) {
FlatConstToList(Types, bDefaultRowMajor, C->getAggregateElement(i),
ArrayEltQualTy, EltVals, EltQualTys);
}
} else if (const clang::RecordType *RecordTy =
QualTy->getAs<clang::RecordType>()) {
DXASSERT(dyn_cast<llvm::StructType>(Ty) != nullptr,
"QualType/Type mismatch!");
RecordDecl *RecordDecl = RecordTy->getDecl();
const CGRecordLayout &RL = Types.getCGRecordLayout(RecordDecl);
// Take care base.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RecordDecl)) {
if (CXXRD->getNumBases()) {
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
if (BaseDecl->field_empty())
continue;
QualType BaseQualTy = QualType(BaseDecl->getTypeForDecl(), 0);
unsigned BaseFieldIdx = RL.getNonVirtualBaseLLVMFieldNo(BaseDecl);
FlatConstToList(Types, bDefaultRowMajor,
C->getAggregateElement(BaseFieldIdx), BaseQualTy,
EltVals, EltQualTys);
}
}
}
for (auto FieldIt = RecordDecl->field_begin(),
fieldEnd = RecordDecl->field_end();
FieldIt != fieldEnd; ++FieldIt) {
unsigned FieldIndex = RL.getLLVMFieldNo(*FieldIt);
FlatConstToList(Types, bDefaultRowMajor,
C->getAggregateElement(FieldIndex), FieldIt->getType(),
EltVals, EltQualTys);
}
} else {
// At this point, we should have scalars in their memory representation
DXASSERT_NOMSG(QualTy->isBuiltinType());
EltVals.emplace_back(C);
EltQualTys.emplace_back(QualTy);
}
}
static bool ScanConstInitList(CodeGenModule &CGM, bool bDefaultRowMajor,
InitListExpr *InitList,
SmallVectorImpl<Constant *> &EltVals,
SmallVectorImpl<QualType> &EltQualTys) {
unsigned NumInitElements = InitList->getNumInits();
for (unsigned i = 0; i != NumInitElements; ++i) {
Expr *InitExpr = InitList->getInit(i);
QualType InitQualTy = InitExpr->getType();
if (InitListExpr *SubInitList = dyn_cast<InitListExpr>(InitExpr)) {
if (!ScanConstInitList(CGM, bDefaultRowMajor, SubInitList, EltVals,
EltQualTys))
return false;
} else if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(InitExpr)) {
if (VarDecl *Var = dyn_cast<VarDecl>(DeclRef->getDecl())) {
if (!Var->hasInit())
return false;
if (Constant *InitVal = CGM.EmitConstantInit(*Var)) {
FlatConstToList(CGM.getTypes(), bDefaultRowMajor, InitVal, InitQualTy,
EltVals, EltQualTys);
} else {
return false;
}
} else {
return false;
}
} else if (hlsl::IsHLSLMatType(InitQualTy)) {
return false;
} else if (CodeGenFunction::hasScalarEvaluationKind(InitQualTy)) {
if (Constant *InitVal = CGM.EmitConstantExpr(InitExpr, InitQualTy)) {
FlatConstToList(CGM.getTypes(), bDefaultRowMajor, InitVal, InitQualTy,
EltVals, EltQualTys);
} else {
return false;
}
} else {
return false;
}
}
return true;
}
static Constant *BuildConstInitializer(CodeGenTypes &Types,
bool bDefaultRowMajor, QualType QualTy,
bool MemRepr,
SmallVectorImpl<Constant *> &EltVals,
SmallVectorImpl<QualType> &EltQualTys,
unsigned &EltIdx);
static Constant *BuildConstMatrix(CodeGenTypes &Types, bool bDefaultRowMajor,
QualType QualTy,
SmallVectorImpl<Constant *> &EltVals,
SmallVectorImpl<QualType> &EltQualTys,
unsigned &EltIdx) {
QualType MatEltTy = hlsl::GetHLSLMatElementType(QualTy);
unsigned RowCount, ColCount;
hlsl::GetHLSLMatRowColCount(QualTy, RowCount, ColCount);
bool IsRowMajor = hlsl::IsHLSLMatRowMajor(QualTy, bDefaultRowMajor);
// Save initializer elements first.
// Matrix initializer is row major.
SmallVector<Constant *, 16> RowMajorMatElts;
for (unsigned i = 0; i < RowCount * ColCount; i++) {
// Matrix elements are never in their memory representation,
// to preserve type information for later lowering.
bool MemRepr = false;
RowMajorMatElts.emplace_back(
BuildConstInitializer(Types, bDefaultRowMajor, MatEltTy, MemRepr,
EltVals, EltQualTys, EltIdx));
}
SmallVector<Constant *, 16> FinalMatElts;
if (IsRowMajor) {
FinalMatElts = RowMajorMatElts;
} else {
// Cast row major to col major.
for (unsigned c = 0; c < ColCount; c++) {
for (unsigned r = 0; r < RowCount; r++) {
FinalMatElts.emplace_back(RowMajorMatElts[r * ColCount + c]);
}
}
}
// The type is vector<element, col>[row].
SmallVector<Constant *, 4> Rows;
unsigned idx = 0;
for (unsigned r = 0; r < RowCount; r++) {
SmallVector<Constant *, 4> RowElts;
for (unsigned c = 0; c < ColCount; c++) {
RowElts.emplace_back(FinalMatElts[idx++]);
}
Rows.emplace_back(llvm::ConstantVector::get(RowElts));
}
Constant *RowArray = llvm::ConstantArray::get(
llvm::ArrayType::get(Rows[0]->getType(), Rows.size()), Rows);
return llvm::ConstantStruct::get(
cast<llvm::StructType>(Types.ConvertType(QualTy)), RowArray);
}
static Constant *BuildConstStruct(CodeGenTypes &Types, bool bDefaultRowMajor,
QualType QualTy,
SmallVectorImpl<Constant *> &EltVals,
SmallVectorImpl<QualType> &EltQualTys,
unsigned &EltIdx) {
const RecordDecl *Record = QualTy->castAs<RecordType>()->getDecl();
bool MemRepr = true; // Structs are always in their memory representation
SmallVector<Constant *, 4> FieldVals;
if (const CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
if (CXXRecord->getNumBases()) {
// Add base as field.
for (const auto &BaseSpec : CXXRecord->bases()) {
const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(
BaseSpec.getType()->castAs<RecordType>()->getDecl());
// Skip empty struct.
if (BaseDecl->field_empty())
continue;
// Add base as a whole constant. Not as element.
FieldVals.emplace_back(
BuildConstInitializer(Types, bDefaultRowMajor, BaseSpec.getType(),
MemRepr, EltVals, EltQualTys, EltIdx));
}
}
}
for (auto FieldIt = Record->field_begin(), FieldEnd = Record->field_end();
FieldIt != FieldEnd; ++FieldIt) {
FieldVals.emplace_back(BuildConstInitializer(Types, bDefaultRowMajor,
FieldIt->getType(), MemRepr,
EltVals, EltQualTys, EltIdx));
}
return llvm::ConstantStruct::get(
cast<llvm::StructType>(Types.ConvertTypeForMem(QualTy)), FieldVals);
}
static Constant *BuildConstInitializer(CodeGenTypes &Types,
bool bDefaultRowMajor, QualType QualTy,
bool MemRepr,
SmallVectorImpl<Constant *> &EltVals,
SmallVectorImpl<QualType> &EltQualTys,
unsigned &EltIdx) {
if (hlsl::IsHLSLVecType(QualTy)) {
QualType VecEltQualTy = hlsl::GetHLSLVecElementType(QualTy);
unsigned VecSize = hlsl::GetHLSLVecSize(QualTy);
SmallVector<Constant *, 4> VecElts;
for (unsigned i = 0; i < VecSize; i++) {
VecElts.emplace_back(BuildConstInitializer(Types, bDefaultRowMajor,
VecEltQualTy, MemRepr, EltVals,
EltQualTys, EltIdx));
}
return llvm::ConstantVector::get(VecElts);
} else if (const clang::ConstantArrayType *ArrayTy =
Types.getContext().getAsConstantArrayType(QualTy)) {
QualType ArrayEltQualTy =
QualType(ArrayTy->getArrayElementTypeNoTypeQual(), 0);
uint64_t ArraySize = ArrayTy->getSize().getLimitedValue();
SmallVector<Constant *, 4> ArrayElts;
for (unsigned i = 0; i < ArraySize; i++) {
ArrayElts.emplace_back(BuildConstInitializer(
Types, bDefaultRowMajor, ArrayEltQualTy,
true, // Array elements must be in their memory representation
EltVals, EltQualTys, EltIdx));
}
return llvm::ConstantArray::get(
cast<llvm::ArrayType>(Types.ConvertTypeForMem(QualTy)), ArrayElts);
} else if (hlsl::IsHLSLMatType(QualTy)) {
return BuildConstMatrix(Types, bDefaultRowMajor, QualTy, EltVals,
EltQualTys, EltIdx);
} else if (QualTy->getAs<clang::RecordType>() != nullptr) {
return BuildConstStruct(Types, bDefaultRowMajor, QualTy, EltVals,
EltQualTys, EltIdx);
} else {
DXASSERT_NOMSG(QualTy->isBuiltinType());
Constant *EltVal = EltVals[EltIdx];
QualType EltQualTy = EltQualTys[EltIdx];
EltIdx++;
// Initializer constants are in their memory representation.
if (EltQualTy == QualTy && MemRepr)
return EltVal;
CGBuilderTy Builder(EltVal->getContext());
if (EltQualTy->isBooleanType()) {
// Convert to register representation
// We don't have access to CodeGenFunction::EmitFromMemory here
DXASSERT_NOMSG(!EltVal->getType()->isIntegerTy(1));
EltVal = cast<Constant>(Builder.CreateICmpNE(
EltVal, Constant::getNullValue(EltVal->getType())));
}
Constant *Result = cast<Constant>(
ConvertScalarOrVector(Builder, Types, EltVal, EltQualTy, QualTy));
if (QualTy->isBooleanType() && MemRepr) {
// Convert back to the memory representation
// We don't have access to CodeGenFunction::EmitToMemory here
DXASSERT_NOMSG(Result->getType()->isIntegerTy(1));
Result = cast<Constant>(
Builder.CreateZExt(Result, Types.ConvertTypeForMem(QualTy)));
}
return Result;
}
}
Constant *CGMSHLSLRuntime::EmitHLSLConstInitListExpr(CodeGenModule &CGM,
InitListExpr *E) {
bool bDefaultRowMajor = m_pHLModule->GetHLOptions().bDefaultRowMajor;
SmallVector<Constant *, 4> EltVals;
SmallVector<QualType, 4> EltQualTys;
if (!ScanConstInitList(CGM, bDefaultRowMajor, E, EltVals, EltQualTys))
return nullptr;
QualType QualTy = E->getType();
unsigned EltIdx = 0;
bool MemRepr = true;
return BuildConstInitializer(CGM.getTypes(), bDefaultRowMajor, QualTy,
MemRepr, EltVals, EltQualTys, EltIdx);
}
Value *CGMSHLSLRuntime::EmitHLSLMatrixOperationCall(
CodeGenFunction &CGF, const clang::Expr *E, llvm::Type *RetType,
ArrayRef<Value *> paramList) {
HLOpcodeGroup group = GetHLOpcodeGroup(E->getStmtClass());
unsigned opcode = GetHLOpcode(E);
if (group == HLOpcodeGroup::HLInit)
return EmitHLSLArrayInit(CGF.Builder, group, opcode, RetType, paramList,
TheModule);
else
return EmitHLSLMatrixOperationCallImp(CGF.Builder, group, opcode, RetType,
paramList, TheModule);
}
void CGMSHLSLRuntime::EmitHLSLDiscard(CodeGenFunction &CGF) {
EmitHLSLMatrixOperationCallImp(
CGF.Builder, HLOpcodeGroup::HLIntrinsic,
static_cast<unsigned>(IntrinsicOp::IOP_clip),
llvm::Type::getVoidTy(CGF.getLLVMContext()),
{ConstantFP::get(llvm::Type::getFloatTy(CGF.getLLVMContext()), -1.0f)},
TheModule);
}
// Emit an artificially conditionalized branch for a break operation when in a
// potentially wave-enabled stage This allows the block containing what would
// have been an unconditional break to be included in the loop If the block uses
// values that are wave-sensitive, it needs to stay in the loop to prevent
// optimizations that might produce incorrect results by ignoring the volatile
// aspect of wave operation results.
BranchInst *CGMSHLSLRuntime::EmitHLSLCondBreak(CodeGenFunction &CGF,
Function *F, BasicBlock *DestBB,
BasicBlock *AltBB) {
// Skip if unreachable
if (!CGF.HaveInsertPoint())
return nullptr;
// If not a wave-enabled stage, we can keep everything unconditional as before
if (!m_pHLModule->GetShaderModel()->IsPS() &&
!m_pHLModule->GetShaderModel()->IsCS() &&
!m_pHLModule->GetShaderModel()->IsLib()) {
return CGF.Builder.CreateBr(DestBB);
}
// Create a branch that is temporarily conditional on a constant
// FinalizeCodeGen will turn this into a function, DxilFinalize will turn it
// into a global var
llvm::Type *boolTy = llvm::Type::getInt1Ty(Context);
BranchInst *BI = CGF.Builder.CreateCondBr(llvm::ConstantInt::get(boolTy, 1),
DestBB, AltBB);
m_DxBreaks.emplace_back(BI);
return BI;
}
static llvm::Type *MergeIntType(llvm::IntegerType *T0, llvm::IntegerType *T1) {
if (T0->getBitWidth() > T1->getBitWidth())
return T0;
else
return T1;
}
static Value *CreateExt(CGBuilderTy &Builder, Value *Src, llvm::Type *DstTy,
bool bSigned) {
if (bSigned)
return Builder.CreateSExt(Src, DstTy);
else
return Builder.CreateZExt(Src, DstTy);
}
// For integer literal, try to get lowest precision.
static Value *CalcHLSLLiteralToLowestPrecision(CGBuilderTy &Builder, Value *Src,
bool bSigned) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Src)) {
APInt v = CI->getValue();
switch (v.getActiveWords()) {
case 4:
return Builder.getInt32(v.getLimitedValue());
case 8:
return Builder.getInt64(v.getLimitedValue());
case 2:
// TODO: use low precision type when support it in dxil.
// return Builder.getInt16(v.getLimitedValue());
return Builder.getInt32(v.getLimitedValue());
case 1:
// TODO: use precision type when support it in dxil.
// return Builder.getInt8(v.getLimitedValue());
return Builder.getInt32(v.getLimitedValue());
default:
return nullptr;
}
} else if (SelectInst *SI = dyn_cast<SelectInst>(Src)) {
if (SI->getType()->isIntegerTy()) {
Value *T = SI->getTrueValue();
Value *F = SI->getFalseValue();
Value *lowT = CalcHLSLLiteralToLowestPrecision(Builder, T, bSigned);
Value *lowF = CalcHLSLLiteralToLowestPrecision(Builder, F, bSigned);
if (lowT && lowF && lowT != T && lowF != F) {
llvm::IntegerType *TTy = cast<llvm::IntegerType>(lowT->getType());
llvm::IntegerType *FTy = cast<llvm::IntegerType>(lowF->getType());
llvm::Type *Ty = MergeIntType(TTy, FTy);
if (TTy != Ty) {
lowT = CreateExt(Builder, lowT, Ty, bSigned);
}
if (FTy != Ty) {
lowF = CreateExt(Builder, lowF, Ty, bSigned);
}
Value *Cond = SI->getCondition();
return Builder.CreateSelect(Cond, lowT, lowF);
}
}
} else if (llvm::BinaryOperator *BO = dyn_cast<llvm::BinaryOperator>(Src)) {
Value *Src0 = BO->getOperand(0);
Value *Src1 = BO->getOperand(1);
Value *CastSrc0 = CalcHLSLLiteralToLowestPrecision(Builder, Src0, bSigned);
Value *CastSrc1 = CalcHLSLLiteralToLowestPrecision(Builder, Src1, bSigned);
if (Src0 != CastSrc0 && Src1 != CastSrc1 && CastSrc0 && CastSrc1 &&
CastSrc0->getType() == CastSrc1->getType()) {
llvm::IntegerType *Ty0 = cast<llvm::IntegerType>(CastSrc0->getType());
llvm::IntegerType *Ty1 = cast<llvm::IntegerType>(CastSrc0->getType());
llvm::Type *Ty = MergeIntType(Ty0, Ty1);
if (Ty0 != Ty) {
CastSrc0 = CreateExt(Builder, CastSrc0, Ty, bSigned);
}
if (Ty1 != Ty) {
CastSrc1 = CreateExt(Builder, CastSrc1, Ty, bSigned);
}
return Builder.CreateBinOp(BO->getOpcode(), CastSrc0, CastSrc1);
}
}
return nullptr;
}
Value *CGMSHLSLRuntime::EmitHLSLLiteralCast(CodeGenFunction &CGF, Value *Src,
QualType SrcType,
QualType DstType) {
auto &Builder = CGF.Builder;
llvm::Type *DstTy = CGF.ConvertType(DstType);
bool bSrcSigned = SrcType->isSignedIntegerType();
if (ConstantInt *CI = dyn_cast<ConstantInt>(Src)) {
APInt v = CI->getValue();
if (llvm::IntegerType *IT = dyn_cast<llvm::IntegerType>(DstTy)) {
v = v.trunc(IT->getBitWidth());
switch (IT->getBitWidth()) {
case 32:
return Builder.getInt32(v.getLimitedValue());
case 64:
return Builder.getInt64(v.getLimitedValue());
case 16:
return Builder.getInt16(v.getLimitedValue());
case 8:
return Builder.getInt8(v.getLimitedValue());
default:
return nullptr;
}
} else {
DXASSERT_NOMSG(DstTy->isFloatingPointTy());
int64_t val = v.getLimitedValue();
if (v.isNegative())
val = 0 - v.abs().getLimitedValue();
if (DstTy->isDoubleTy())
return ConstantFP::get(DstTy, (double)val);
else if (DstTy->isFloatTy())
return ConstantFP::get(DstTy, (float)val);
else {
if (bSrcSigned)
return Builder.CreateSIToFP(Src, DstTy);
else
return Builder.CreateUIToFP(Src, DstTy);
}
}
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(Src)) {
APFloat v = CF->getValueAPF();
if (llvm::IntegerType *IT = dyn_cast<llvm::IntegerType>(DstTy)) {
APSInt iv(IT->getBitWidth(), DstType->hasUnsignedIntegerRepresentation());
bool isExact;
v.convertToInteger(iv, APFloat::roundingMode::rmTowardZero, &isExact);
switch (IT->getBitWidth()) {
case 32:
return Builder.getInt32(iv.getExtValue());
case 64:
return Builder.getInt64(iv.getExtValue());
case 16:
return Builder.getInt16(iv.getExtValue());
case 8:
return Builder.getInt8(iv.getExtValue());
default:
return nullptr;
}
} else {
if (DstTy->isFloatTy()) {
float fv = v.convertToDouble();
return ConstantFP::get(DstTy->getContext(), APFloat(fv));
} else {
return Builder.CreateFPTrunc(Src, DstTy);
}
}
} else if (dyn_cast<UndefValue>(Src)) {
return UndefValue::get(DstTy);
} else {
Instruction *I = cast<Instruction>(Src);
if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
Value *T = SI->getTrueValue();
Value *F = SI->getFalseValue();
Value *Cond = SI->getCondition();
if (isa<llvm::ConstantInt>(T) && isa<llvm::ConstantInt>(F)) {
llvm::APInt lhs = cast<llvm::ConstantInt>(T)->getValue();
llvm::APInt rhs = cast<llvm::ConstantInt>(F)->getValue();
if (DstTy == Builder.getInt32Ty()) {
T = Builder.getInt32(lhs.getLimitedValue());
F = Builder.getInt32(rhs.getLimitedValue());
Value *Sel = Builder.CreateSelect(Cond, T, F, "cond");
return Sel;
} else if (DstTy->isFloatingPointTy()) {
T = ConstantFP::get(DstTy, int64_t(lhs.getLimitedValue()));
F = ConstantFP::get(DstTy, int64_t(rhs.getLimitedValue()));
Value *Sel = Builder.CreateSelect(Cond, T, F, "cond");
return Sel;
}
} else if (isa<llvm::ConstantFP>(T) && isa<llvm::ConstantFP>(F)) {
llvm::APFloat lhs = cast<llvm::ConstantFP>(T)->getValueAPF();
llvm::APFloat rhs = cast<llvm::ConstantFP>(F)->getValueAPF();
double ld = lhs.convertToDouble();
double rd = rhs.convertToDouble();
if (DstTy->isFloatTy()) {
float lf = ld;
float rf = rd;
T = ConstantFP::get(DstTy->getContext(), APFloat(lf));
F = ConstantFP::get(DstTy->getContext(), APFloat(rf));
Value *Sel = Builder.CreateSelect(Cond, T, F, "cond");
return Sel;
} else if (DstTy == Builder.getInt32Ty()) {
T = Builder.getInt32(ld);
F = Builder.getInt32(rd);
Value *Sel = Builder.CreateSelect(Cond, T, F, "cond");
return Sel;
} else if (DstTy == Builder.getInt64Ty()) {
T = Builder.getInt64(ld);
F = Builder.getInt64(rd);
Value *Sel = Builder.CreateSelect(Cond, T, F, "cond");
return Sel;
}
}
} else if (llvm::BinaryOperator *BO = dyn_cast<llvm::BinaryOperator>(I)) {
// For integer binary operator, do the calc on lowest precision, then cast
// to dstTy.
if (I->getType()->isIntegerTy()) {
bool bSigned = DstType->isSignedIntegerType();
Value *CastResult =
CalcHLSLLiteralToLowestPrecision(Builder, BO, bSigned);
if (!CastResult)
return nullptr;
if (dyn_cast<llvm::IntegerType>(DstTy)) {
if (DstTy == CastResult->getType()) {
return CastResult;
} else {
if (bSigned)
return Builder.CreateSExtOrTrunc(CastResult, DstTy);
else
return Builder.CreateZExtOrTrunc(CastResult, DstTy);
}
} else {
if (bSrcSigned)
return Builder.CreateSIToFP(CastResult, DstTy);
else
return Builder.CreateUIToFP(CastResult, DstTy);
}
}
}
// TODO: support other opcode if need.
return nullptr;
}
}
// For case like ((float3xfloat3)mat4x4).m21 or ((float3xfloat3)mat4x4)[1], just
// treat it like mat4x4.m21 or mat4x4[1].
static Value *GetOriginMatrixOperandAndUpdateMatSize(Value *Ptr, unsigned &row,
unsigned &col) {
if (CallInst *Mat = dyn_cast<CallInst>(Ptr)) {
HLOpcodeGroup OpcodeGroup =
GetHLOpcodeGroupByName(Mat->getCalledFunction());
if (OpcodeGroup == HLOpcodeGroup::HLCast) {
HLCastOpcode castOpcode = static_cast<HLCastOpcode>(GetHLOpcode(Mat));
if (castOpcode == HLCastOpcode::DefaultCast) {
Ptr = Mat->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx);
// Remove the cast which is useless now.
Mat->eraseFromParent();
// Update row and col.
HLMatrixType matTy =
HLMatrixType::cast(Ptr->getType()->getPointerElementType());
row = matTy.getNumRows();
col = matTy.getNumColumns();
// Don't update RetTy and DxilGeneration pass will do the right thing.
return Ptr;
}
}
}
return nullptr;
}
Value *CGMSHLSLRuntime::EmitHLSLMatrixSubscript(CodeGenFunction &CGF,
llvm::Type *RetType,
llvm::Value *Ptr,
llvm::Value *Idx,
clang::QualType Ty) {
bool isRowMajor =
hlsl::IsHLSLMatRowMajor(Ty, m_pHLModule->GetHLOptions().bDefaultRowMajor);
unsigned opcode =
isRowMajor ? static_cast<unsigned>(HLSubscriptOpcode::RowMatSubscript)
: static_cast<unsigned>(HLSubscriptOpcode::ColMatSubscript);
Value *matBase = Ptr;
DXASSERT(matBase->getType()->isPointerTy(),
"matrix subscript should return pointer");
RetType =
llvm::PointerType::get(RetType->getPointerElementType(),
matBase->getType()->getPointerAddressSpace());
unsigned row, col;
hlsl::GetHLSLMatRowColCount(Ty, row, col);
unsigned resultCol = col;
if (Value *OriginPtr =
GetOriginMatrixOperandAndUpdateMatSize(Ptr, row, col)) {
Ptr = OriginPtr;
// Update col to result col to get correct result size.
col = resultCol;
}
// Lower mat[Idx] into real idx.
SmallVector<Value *, 8> args;
args.emplace_back(Ptr);
if (isRowMajor) {
Value *cCol = ConstantInt::get(Idx->getType(), col);
Value *Base = CGF.Builder.CreateMul(cCol, Idx);
for (unsigned i = 0; i < col; i++) {
Value *c = ConstantInt::get(Idx->getType(), i);
// r * col + c
Value *matIdx = CGF.Builder.CreateAdd(Base, c);
args.emplace_back(matIdx);
}
} else {
for (unsigned i = 0; i < col; i++) {
Value *cMulRow = ConstantInt::get(Idx->getType(), i * row);
// c * row + r
Value *matIdx = CGF.Builder.CreateAdd(cMulRow, Idx);
args.emplace_back(matIdx);
}
}
Value *matSub =
EmitHLSLMatrixOperationCallImp(CGF.Builder, HLOpcodeGroup::HLSubscript,
opcode, RetType, args, TheModule);
return matSub;
}
Value *CGMSHLSLRuntime::EmitHLSLMatrixElement(CodeGenFunction &CGF,
llvm::Type *RetType,
ArrayRef<Value *> paramList,
QualType Ty) {
bool isRowMajor =
hlsl::IsHLSLMatRowMajor(Ty, m_pHLModule->GetHLOptions().bDefaultRowMajor);
unsigned opcode =
isRowMajor ? static_cast<unsigned>(HLSubscriptOpcode::RowMatElement)
: static_cast<unsigned>(HLSubscriptOpcode::ColMatElement);
Value *matBase = paramList[0];
DXASSERT(matBase->getType()->isPointerTy(),
"matrix element should return pointer");
RetType =
llvm::PointerType::get(RetType->getPointerElementType(),
matBase->getType()->getPointerAddressSpace());
Value *idx = paramList[HLOperandIndex::kMatSubscriptSubOpIdx - 1];
// Lower _m00 into real idx.
// -1 to avoid opcode param which is added in EmitHLSLMatrixOperationCallImp.
Value *args[] = {paramList[HLOperandIndex::kMatSubscriptMatOpIdx - 1],
paramList[HLOperandIndex::kMatSubscriptSubOpIdx - 1]};
unsigned row, col;
hlsl::GetHLSLMatRowColCount(Ty, row, col);
Value *Ptr = paramList[0];
if (Value *OriginPtr =
GetOriginMatrixOperandAndUpdateMatSize(Ptr, row, col)) {
args[0] = OriginPtr;
}
// For all zero idx. Still all zero idx.
if (ConstantAggregateZero *zeros = dyn_cast<ConstantAggregateZero>(idx)) {
Constant *zero = zeros->getAggregateElement((unsigned)0);
std::vector<Constant *> elts(zeros->getNumElements() >> 1, zero);
args[HLOperandIndex::kMatSubscriptSubOpIdx - 1] = ConstantVector::get(elts);
} else {
ConstantDataSequential *elts = cast<ConstantDataSequential>(idx);
unsigned count = elts->getNumElements();
std::vector<Constant *> idxs(count >> 1);
for (unsigned i = 0; i < count; i += 2) {
unsigned rowIdx = elts->getElementAsInteger(i);
unsigned colIdx = elts->getElementAsInteger(i + 1);
unsigned matIdx = 0;
if (isRowMajor) {
matIdx = rowIdx * col + colIdx;
} else {
matIdx = colIdx * row + rowIdx;
}
idxs[i >> 1] = CGF.Builder.getInt32(matIdx);
}
args[HLOperandIndex::kMatSubscriptSubOpIdx - 1] = ConstantVector::get(idxs);
}
return EmitHLSLMatrixOperationCallImp(CGF.Builder, HLOpcodeGroup::HLSubscript,
opcode, RetType, args, TheModule);
}
Value *CGMSHLSLRuntime::EmitHLSLMatrixLoad(CGBuilderTy &Builder, Value *Ptr,
QualType Ty) {
bool isRowMajor =
hlsl::IsHLSLMatRowMajor(Ty, m_pHLModule->GetHLOptions().bDefaultRowMajor);
unsigned opcode =
isRowMajor ? static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatLoad)
: static_cast<unsigned>(HLMatLoadStoreOpcode::ColMatLoad);
Value *matVal = EmitHLSLMatrixOperationCallImp(
Builder, HLOpcodeGroup::HLMatLoadStore, opcode,
Ptr->getType()->getPointerElementType(), {Ptr}, TheModule);
if (!isRowMajor) {
// ColMatLoad will return a col major matrix.
// All matrix Value should be row major.
// Cast it to row major.
matVal = EmitHLSLMatrixOperationCallImp(
Builder, HLOpcodeGroup::HLCast,
static_cast<unsigned>(HLCastOpcode::ColMatrixToRowMatrix),
matVal->getType(), {matVal}, TheModule);
}
return matVal;
}
void CGMSHLSLRuntime::EmitHLSLMatrixStore(CGBuilderTy &Builder, Value *Val,
Value *DestPtr, QualType Ty) {
bool isRowMajor =
hlsl::IsHLSLMatRowMajor(Ty, m_pHLModule->GetHLOptions().bDefaultRowMajor);
unsigned opcode =
isRowMajor ? static_cast<unsigned>(HLMatLoadStoreOpcode::RowMatStore)
: static_cast<unsigned>(HLMatLoadStoreOpcode::ColMatStore);
if (!isRowMajor) {
Value *ColVal = nullptr;
// If Val is casted from col major. Just use the original col major val.
if (CallInst *CI = dyn_cast<CallInst>(Val)) {
hlsl::HLOpcodeGroup group =
hlsl::GetHLOpcodeGroupByName(CI->getCalledFunction());
if (group == HLOpcodeGroup::HLCast) {
HLCastOpcode castOp = static_cast<HLCastOpcode>(hlsl::GetHLOpcode(CI));
if (castOp == HLCastOpcode::ColMatrixToRowMatrix) {
ColVal = CI->getArgOperand(HLOperandIndex::kUnaryOpSrc0Idx);
}
}
}
if (ColVal) {
Val = ColVal;
} else {
// All matrix Value should be row major.
// ColMatStore need a col major value.
// Cast it to row major.
Val = EmitHLSLMatrixOperationCallImp(
Builder, HLOpcodeGroup::HLCast,
static_cast<unsigned>(HLCastOpcode::RowMatrixToColMatrix),
Val->getType(), {Val}, TheModule);
}
}
EmitHLSLMatrixOperationCallImp(Builder, HLOpcodeGroup::HLMatLoadStore, opcode,
Val->getType(), {DestPtr, Val}, TheModule);
}
bool CGMSHLSLRuntime::NeedHLSLMartrixCastForStoreOp(
const clang::Decl *TD, llvm::SmallVector<llvm::Value *, 16> &IRCallArgs) {
const clang::FunctionDecl *FD = dyn_cast<clang::FunctionDecl>(TD);
unsigned opcode = 0;
StringRef group;
if (!hlsl::GetIntrinsicOp(FD, opcode, group))
return false;
if (opcode != (unsigned)hlsl::IntrinsicOp::MOP_Store)
return false;
// Note that the store op is not yet an HL op. It's just a call
// to mangled rwbab store function. So adjust the store val position.
const unsigned storeValOpIdx = HLOperandIndex::kStoreValOpIdx - 1;
if (storeValOpIdx >= IRCallArgs.size()) {
return false;
}
return HLMatrixType::isa(IRCallArgs[storeValOpIdx]->getType());
}
void CGMSHLSLRuntime::EmitHLSLMartrixCastForStoreOp(
CodeGenFunction &CGF, SmallVector<llvm::Value *, 16> &IRCallArgs,
llvm::SmallVector<clang::QualType, 16> &ArgTys) {
// Note that the store op is not yet an HL op. It's just a call
// to mangled rwbab store function. So adjust the store val position.
const unsigned storeValOpIdx = HLOperandIndex::kStoreValOpIdx - 1;
if (storeValOpIdx >= IRCallArgs.size() || storeValOpIdx >= ArgTys.size()) {
return;
}
if (!hlsl::IsHLSLMatType(ArgTys[storeValOpIdx]))
return;
bool isRowMajor = hlsl::IsHLSLMatRowMajor(
ArgTys[storeValOpIdx], m_pHLModule->GetHLOptions().bDefaultRowMajor);
if (!isRowMajor) {
IRCallArgs[storeValOpIdx] = EmitHLSLMatrixOperationCallImp(
CGF.Builder, HLOpcodeGroup::HLCast,
static_cast<unsigned>(HLCastOpcode::RowMatrixToColMatrix),
IRCallArgs[storeValOpIdx]->getType(), {IRCallArgs[storeValOpIdx]},
TheModule);
}
}
Value *CGMSHLSLRuntime::EmitHLSLMatrixLoad(CodeGenFunction &CGF, Value *Ptr,
QualType Ty) {
return EmitHLSLMatrixLoad(CGF.Builder, Ptr, Ty);
}
void CGMSHLSLRuntime::EmitHLSLMatrixStore(CodeGenFunction &CGF, Value *Val,
Value *DestPtr, QualType Ty) {
EmitHLSLMatrixStore(CGF.Builder, Val, DestPtr, Ty);
}
// Copy data from srcPtr to destPtr.
static void SimplePtrCopy(Value *DestPtr, Value *SrcPtr,
ArrayRef<Value *> idxList, CGBuilderTy &Builder) {
if (idxList.size() > 1) {
DestPtr = Builder.CreateInBoundsGEP(DestPtr, idxList);
SrcPtr = Builder.CreateInBoundsGEP(SrcPtr, idxList);
}
llvm::LoadInst *ld = Builder.CreateLoad(SrcPtr);
Builder.CreateStore(ld, DestPtr);
}
// Get Element val from SrvVal with extract value.
static Value *GetEltVal(Value *SrcVal, ArrayRef<Value *> idxList,
CGBuilderTy &Builder) {
Value *Val = SrcVal;
// Skip beginning pointer type.
for (unsigned i = 1; i < idxList.size(); i++) {
ConstantInt *idx = cast<ConstantInt>(idxList[i]);
llvm::Type *Ty = Val->getType();
if (Ty->isAggregateType()) {
Val = Builder.CreateExtractValue(Val, idx->getLimitedValue());
}
}
return Val;
}
// Copy srcVal to destPtr.
static void SimpleValCopy(Value *DestPtr, Value *SrcVal,
ArrayRef<Value *> idxList, CGBuilderTy &Builder) {
Value *DestGEP = Builder.CreateInBoundsGEP(DestPtr, idxList);
Value *Val = GetEltVal(SrcVal, idxList, Builder);
Builder.CreateStore(Val, DestGEP);
}
static void SimpleCopy(Value *Dest, Value *Src, ArrayRef<Value *> idxList,
CGBuilderTy &Builder) {
if (Src->getType()->isPointerTy())
SimplePtrCopy(Dest, Src, idxList, Builder);
else
SimpleValCopy(Dest, Src, idxList, Builder);
}
void CGMSHLSLRuntime::FlattenAggregatePtrToGepList(
CodeGenFunction &CGF, Value *Ptr, SmallVector<Value *, 4> &idxList,
clang::QualType Type, llvm::Type *Ty, SmallVector<Value *, 4> &GepList,
SmallVector<QualType, 4> &EltTyList) {
if (llvm::PointerType *PT = dyn_cast<llvm::PointerType>(Ty)) {
Constant *idx = Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, 0));
idxList.emplace_back(idx);
FlattenAggregatePtrToGepList(CGF, Ptr, idxList, Type, PT->getElementType(),
GepList, EltTyList);
idxList.pop_back();
} else if (HLMatrixType MatTy = HLMatrixType::dyn_cast(Ty)) {
// Use matLd/St for matrix.
llvm::Type *EltTy = MatTy.getElementTypeForReg();
llvm::PointerType *EltPtrTy =
llvm::PointerType::get(EltTy, Ptr->getType()->getPointerAddressSpace());
QualType EltQualTy = hlsl::GetHLSLMatElementType(Type);
Value *matPtr = CGF.Builder.CreateInBoundsGEP(Ptr, idxList);
// Flatten matrix to elements.
for (unsigned r = 0; r < MatTy.getNumRows(); r++) {
for (unsigned c = 0; c < MatTy.getNumColumns(); c++) {
ConstantInt *cRow = CGF.Builder.getInt32(r);
ConstantInt *cCol = CGF.Builder.getInt32(c);
Constant *CV = llvm::ConstantVector::get({cRow, cCol});
GepList.push_back(
EmitHLSLMatrixElement(CGF, EltPtrTy, {matPtr, CV}, Type));
EltTyList.push_back(EltQualTy);
}
}
} else if (StructType *ST = dyn_cast<StructType>(Ty)) {
if (dxilutil::IsHLSLObjectType(ST)) {
// Avoid split HLSL object.
Value *GEP = CGF.Builder.CreateInBoundsGEP(Ptr, idxList);
GepList.push_back(GEP);
EltTyList.push_back(Type);
return;
}
const clang::RecordType *RT = Type->getAs<RecordType>();
RecordDecl *RD = RT->getDecl();
const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->getNumBases()) {
// Add base as field.
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
// Skip empty struct.
if (BaseDecl->field_empty())
continue;
QualType parentTy = QualType(BaseDecl->getTypeForDecl(), 0);
llvm::Type *parentType = CGF.ConvertType(parentTy);
unsigned i = RL.getNonVirtualBaseLLVMFieldNo(BaseDecl);
Constant *idx = llvm::Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, i));
idxList.emplace_back(idx);
FlattenAggregatePtrToGepList(CGF, Ptr, idxList, parentTy, parentType,
GepList, EltTyList);
idxList.pop_back();
}
}
}
for (auto fieldIter = RD->field_begin(), fieldEnd = RD->field_end();
fieldIter != fieldEnd; ++fieldIter) {
unsigned i = RL.getLLVMFieldNo(*fieldIter);
llvm::Type *ET = ST->getElementType(i);
Constant *idx = llvm::Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, i));
idxList.emplace_back(idx);
FlattenAggregatePtrToGepList(CGF, Ptr, idxList, fieldIter->getType(), ET,
GepList, EltTyList);
idxList.pop_back();
}
} else if (llvm::ArrayType *AT = dyn_cast<llvm::ArrayType>(Ty)) {
llvm::Type *ET = AT->getElementType();
QualType EltType = CGF.getContext().getBaseElementType(Type);
for (uint32_t i = 0; i < AT->getNumElements(); i++) {
Constant *idx = Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, i));
idxList.emplace_back(idx);
FlattenAggregatePtrToGepList(CGF, Ptr, idxList, EltType, ET, GepList,
EltTyList);
idxList.pop_back();
}
} else if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Ty)) {
// Flatten vector too.
QualType EltTy = hlsl::GetHLSLVecElementType(Type);
for (uint32_t i = 0; i < VT->getNumElements(); i++) {
Constant *idx = CGF.Builder.getInt32(i);
idxList.emplace_back(idx);
Value *GEP = CGF.Builder.CreateInBoundsGEP(Ptr, idxList);
GepList.push_back(GEP);
EltTyList.push_back(EltTy);
idxList.pop_back();
}
} else {
Value *GEP = CGF.Builder.CreateInBoundsGEP(Ptr, idxList);
GepList.push_back(GEP);
EltTyList.push_back(Type);
}
}
void CGMSHLSLRuntime::LoadElements(CodeGenFunction &CGF, ArrayRef<Value *> Ptrs,
ArrayRef<QualType> QualTys,
SmallVector<Value *, 4> &Vals) {
for (size_t i = 0, e = Ptrs.size(); i < e; i++) {
Value *Ptr = Ptrs[i];
llvm::Type *Ty = Ptr->getType()->getPointerElementType();
DXASSERT_LOCALVAR(Ty, Ty->isIntegerTy() || Ty->isFloatingPointTy(),
"Expected only element types.");
Value *Val = CGF.Builder.CreateLoad(Ptr);
Val = CGF.EmitFromMemory(Val, QualTys[i]);
Vals.push_back(Val);
}
}
void CGMSHLSLRuntime::ConvertAndStoreElements(CodeGenFunction &CGF,
ArrayRef<Value *> SrcVals,
ArrayRef<QualType> SrcQualTys,
ArrayRef<Value *> DstPtrs,
ArrayRef<QualType> DstQualTys) {
for (size_t i = 0, e = DstPtrs.size(); i < e; i++) {
Value *DstPtr = DstPtrs[i];
QualType DstQualTy = DstQualTys[i];
Value *SrcVal = SrcVals[i];
QualType SrcQualTy = SrcQualTys[i];
DXASSERT(SrcVal->getType()->isIntegerTy() ||
SrcVal->getType()->isFloatingPointTy(),
"Expected only element types.");
llvm::Value *Result =
ConvertScalarOrVector(CGF, SrcVal, SrcQualTy, DstQualTy);
Result = CGF.EmitToMemory(Result, DstQualTy);
CGF.Builder.CreateStore(Result, DstPtr);
}
}
static bool AreMatrixArrayOrientationMatching(ASTContext &Context,
HLModule &Module, QualType LhsTy,
QualType RhsTy) {
while (const clang::ArrayType *LhsArrayTy = Context.getAsArrayType(LhsTy)) {
LhsTy = LhsArrayTy->getElementType();
RhsTy = Context.getAsArrayType(RhsTy)->getElementType();
}
bool LhsRowMajor, RhsRowMajor;
LhsRowMajor = RhsRowMajor = Module.GetHLOptions().bDefaultRowMajor;
HasHLSLMatOrientation(LhsTy, &LhsRowMajor);
HasHLSLMatOrientation(RhsTy, &RhsRowMajor);
return LhsRowMajor == RhsRowMajor;
}
static llvm::Value *CreateInBoundsGEPIfNeeded(llvm::Value *Ptr,
ArrayRef<Value *> IdxList,
CGBuilderTy &Builder) {
DXASSERT(IdxList.size() > 0, "Invalid empty GEP index list");
// If the GEP list is a single zero, it's a no-op, so save us the trouble.
if (IdxList.size() == 1) {
if (ConstantInt *FirstIdx = dyn_cast<ConstantInt>(IdxList[0])) {
if (FirstIdx->isZero())
return Ptr;
}
}
return Builder.CreateInBoundsGEP(Ptr, IdxList);
}
// Copy data from SrcPtr to DestPtr.
// For matrix, use MatLoad/MatStore.
// For matrix array, EmitHLSLAggregateCopy on each element.
// For struct or array, use memcpy.
// Other just load/store.
void CGMSHLSLRuntime::EmitHLSLAggregateCopy(
CodeGenFunction &CGF, llvm::Value *SrcPtr, llvm::Value *DestPtr,
SmallVector<Value *, 4> &idxList, clang::QualType SrcType,
clang::QualType DestType, llvm::Type *Ty) {
if (llvm::PointerType *PT = dyn_cast<llvm::PointerType>(Ty)) {
Constant *idx = Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, 0));
idxList.emplace_back(idx);
EmitHLSLAggregateCopy(CGF, SrcPtr, DestPtr, idxList, SrcType, DestType,
PT->getElementType());
idxList.pop_back();
} else if (HLMatrixType::isa(Ty)) {
// Use matLd/St for matrix.
Value *SrcMatPtr = CreateInBoundsGEPIfNeeded(SrcPtr, idxList, CGF.Builder);
Value *DestMatPtr =
CreateInBoundsGEPIfNeeded(DestPtr, idxList, CGF.Builder);
Value *ldMat = EmitHLSLMatrixLoad(CGF, SrcMatPtr, SrcType);
EmitHLSLMatrixStore(CGF, ldMat, DestMatPtr, DestType);
} else if (StructType *ST = dyn_cast<StructType>(Ty)) {
if (dxilutil::IsHLSLObjectType(ST)) {
// Avoid split HLSL object.
SimpleCopy(DestPtr, SrcPtr, idxList, CGF.Builder);
return;
}
Value *SrcStructPtr =
CreateInBoundsGEPIfNeeded(SrcPtr, idxList, CGF.Builder);
Value *DestStructPtr =
CreateInBoundsGEPIfNeeded(DestPtr, idxList, CGF.Builder);
unsigned size = this->TheModule.getDataLayout().getTypeAllocSize(ST);
// Memcpy struct.
CGF.Builder.CreateMemCpy(DestStructPtr, SrcStructPtr, size, 1);
} else if (llvm::ArrayType *AT = dyn_cast<llvm::ArrayType>(Ty)) {
if (!HLMatrixType::isMatrixArray(Ty) ||
AreMatrixArrayOrientationMatching(CGF.getContext(), *m_pHLModule,
SrcType, DestType)) {
Value *SrcArrayPtr =
CreateInBoundsGEPIfNeeded(SrcPtr, idxList, CGF.Builder);
Value *DestArrayPtr =
CreateInBoundsGEPIfNeeded(DestPtr, idxList, CGF.Builder);
unsigned size = this->TheModule.getDataLayout().getTypeAllocSize(AT);
// Memcpy non-matrix array.
CGF.Builder.CreateMemCpy(DestArrayPtr, SrcArrayPtr, size, 1);
} else {
// Copy matrix arrays elementwise if orientation changes are needed.
llvm::Type *ET = AT->getElementType();
QualType EltDestType = CGF.getContext().getBaseElementType(DestType);
QualType EltSrcType = CGF.getContext().getBaseElementType(SrcType);
for (uint32_t i = 0; i < AT->getNumElements(); i++) {
Constant *idx = Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, i));
idxList.emplace_back(idx);
EmitHLSLAggregateCopy(CGF, SrcPtr, DestPtr, idxList, EltSrcType,
EltDestType, ET);
idxList.pop_back();
}
}
} else {
SimpleCopy(DestPtr, SrcPtr, idxList, CGF.Builder);
}
}
void CGMSHLSLRuntime::EmitHLSLAggregateCopy(CodeGenFunction &CGF,
llvm::Value *SrcPtr,
llvm::Value *DestPtr,
clang::QualType Ty) {
SmallVector<Value *, 4> idxList;
EmitHLSLAggregateCopy(CGF, SrcPtr, DestPtr, idxList, Ty, Ty,
SrcPtr->getType());
}
// Make sure all element type of struct is same type.
static bool IsStructWithSameElementType(llvm::StructType *ST, llvm::Type *Ty) {
for (llvm::Type *EltTy : ST->elements()) {
if (StructType *EltSt = dyn_cast<StructType>(EltTy)) {
if (!IsStructWithSameElementType(EltSt, Ty))
return false;
} else if (llvm::ArrayType *AT = dyn_cast<llvm::ArrayType>(EltTy)) {
llvm::Type *ArrayEltTy = dxilutil::GetArrayEltTy(AT);
if (ArrayEltTy == Ty) {
continue;
} else if (StructType *EltSt = dyn_cast<StructType>(EltTy)) {
if (!IsStructWithSameElementType(EltSt, Ty))
return false;
} else {
return false;
}
} else if (EltTy != Ty)
return false;
}
return true;
}
// To memcpy, need element type match.
// For struct type, the layout should match in cbuffer layout.
// struct { float2 x; float3 y; } will not match struct { float3 x; float2 y; }.
// struct { float2 x; float3 y; } will not match array of float.
static bool IsTypeMatchForMemcpy(llvm::Type *SrcTy, llvm::Type *DestTy) {
llvm::Type *SrcEltTy = dxilutil::GetArrayEltTy(SrcTy);
llvm::Type *DestEltTy = dxilutil::GetArrayEltTy(DestTy);
if (SrcEltTy == DestEltTy)
return true;
llvm::StructType *SrcST = dyn_cast<llvm::StructType>(SrcEltTy);
llvm::StructType *DestST = dyn_cast<llvm::StructType>(DestEltTy);
if (SrcST && DestST) {
// Only allow identical struct.
return SrcST->isLayoutIdentical(DestST);
} else if (!SrcST && !DestST) {
// For basic type, if one is array, one is not array, layout is different.
// If both array, type mismatch. If both basic, copy should be fine.
// So all return false.
return false;
} else {
// One struct, one basic type.
// Make sure all struct element match the basic type and basic type is
// vector4.
llvm::StructType *ST = SrcST ? SrcST : DestST;
llvm::Type *Ty = SrcST ? DestEltTy : SrcEltTy;
if (!Ty->isVectorTy())
return false;
if (Ty->getVectorNumElements() != 4)
return false;
return IsStructWithSameElementType(ST, Ty);
}
}
static bool IsVec4ArrayToScalarArrayForMemcpy(llvm::Type *SrcTy,
llvm::Type *DestTy,
const DataLayout &DL) {
if (!SrcTy->isArrayTy())
return false;
llvm::Type *SrcEltTy = dxilutil::GetArrayEltTy(SrcTy);
llvm::Type *DestEltTy = dxilutil::GetArrayEltTy(DestTy);
if (SrcEltTy == DestEltTy)
return true;
llvm::VectorType *VT = dyn_cast<llvm::VectorType>(SrcEltTy);
if (!VT)
return false;
if (DL.getTypeSizeInBits(VT) != 128)
return false;
if (DL.getTypeSizeInBits(DestEltTy) < 32)
return false;
return VT->getElementType() == DestEltTy;
}
void CGMSHLSLRuntime::EmitHLSLFlatConversionAggregateCopy(
CodeGenFunction &CGF, llvm::Value *SrcPtr, clang::QualType SrcTy,
llvm::Value *DestPtr, clang::QualType DestTy) {
llvm::Type *SrcPtrTy = SrcPtr->getType()->getPointerElementType();
llvm::Type *DestPtrTy = DestPtr->getType()->getPointerElementType();
const DataLayout &DL = TheModule.getDataLayout();
bool bDefaultRowMajor = m_pHLModule->GetHLOptions().bDefaultRowMajor;
if (SrcPtrTy == DestPtrTy) {
bool bMatArrayRotate = false;
if (HLMatrixType::isMatrixArrayPtr(SrcPtr->getType())) {
QualType SrcEltTy = GetArrayEltType(CGM.getContext(), SrcTy);
QualType DestEltTy = GetArrayEltType(CGM.getContext(), DestTy);
if (GetMatrixMajor(SrcEltTy, bDefaultRowMajor) !=
GetMatrixMajor(DestEltTy, bDefaultRowMajor)) {
bMatArrayRotate = true;
}
}
if (!bMatArrayRotate) {
// Memcpy if type is match.
unsigned size = DL.getTypeAllocSize(SrcPtrTy);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, size, 1);
return;
}
} else if (dxilutil::IsHLSLResourceDescType(SrcPtrTy) &&
(dxilutil::IsHLSLResourceType(DestPtrTy) ||
GetResourceClassForType(CGM.getContext(), DestTy) ==
DXIL::ResourceClass::CBuffer)) {
// Cast resource desc to resource.// Make sure to generate Inst to help
// lowering.
bool originAllowFolding = CGF.Builder.AllowFolding;
CGF.Builder.AllowFolding = false;
Value *CastPtr = CGF.Builder.CreatePointerCast(SrcPtr, DestPtr->getType());
CGF.Builder.AllowFolding = originAllowFolding;
// Load resource.
Value *V = CGF.Builder.CreateLoad(CastPtr);
// Store to resource ptr.
CGF.Builder.CreateStore(V, DestPtr);
return;
} else if (GetResourceClassForType(CGM.getContext(), SrcTy) ==
DXIL::ResourceClass::CBuffer) {
llvm::Type *ResultTy =
CGM.getTypes().ConvertType(hlsl::GetHLSLResourceResultType(SrcTy));
if (ResultTy == DestPtrTy) {
// Cast ConstantBuffer to result type then copy.
Value *Cast = CGF.Builder.CreateBitCast(
SrcPtr,
ResultTy->getPointerTo(DestPtr->getType()->getPointerAddressSpace()));
unsigned size = DL.getTypeAllocSize(DestPtrTy);
CGF.Builder.CreateMemCpy(DestPtr, Cast, size, 1);
return;
}
} else if (dxilutil::IsHLSLObjectType(dxilutil::GetArrayEltTy(SrcPtrTy)) &&
dxilutil::IsHLSLObjectType(dxilutil::GetArrayEltTy(DestPtrTy))) {
unsigned sizeSrc = DL.getTypeAllocSize(SrcPtrTy);
unsigned sizeDest = DL.getTypeAllocSize(DestPtrTy);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, std::max(sizeSrc, sizeDest), 1);
return;
} else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(DestPtr)) {
if (GV->isInternalLinkage(GV->getLinkage()) &&
IsTypeMatchForMemcpy(SrcPtrTy, DestPtrTy)) {
unsigned sizeSrc = DL.getTypeAllocSize(SrcPtrTy);
unsigned sizeDest = DL.getTypeAllocSize(DestPtrTy);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, std::min(sizeSrc, sizeDest), 1);
return;
} else if (GlobalVariable *SrcGV = dyn_cast<GlobalVariable>(SrcPtr)) {
if (GV->isInternalLinkage(GV->getLinkage()) &&
m_ConstVarAnnotationMap.count(SrcGV) &&
IsVec4ArrayToScalarArrayForMemcpy(SrcPtrTy, DestPtrTy, DL)) {
unsigned sizeSrc = DL.getTypeAllocSize(SrcPtrTy);
unsigned sizeDest = DL.getTypeAllocSize(DestPtrTy);
if (sizeSrc == sizeDest) {
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, sizeSrc, 1);
return;
}
}
}
}
// It is possible to implement EmitHLSLAggregateCopy, the same way. But split
// value to scalar will generate many instruction when src type is same as
// dest type.
SmallVector<Value *, 4> GEPIdxStack;
SmallVector<Value *, 4> SrcPtrs;
SmallVector<QualType, 4> SrcQualTys;
FlattenAggregatePtrToGepList(CGF, SrcPtr, GEPIdxStack, SrcTy,
SrcPtr->getType(), SrcPtrs, SrcQualTys);
SmallVector<Value *, 4> SrcVals;
LoadElements(CGF, SrcPtrs, SrcQualTys, SrcVals);
GEPIdxStack.clear();
SmallVector<Value *, 4> DstPtrs;
SmallVector<QualType, 4> DstQualTys;
FlattenAggregatePtrToGepList(CGF, DestPtr, GEPIdxStack, DestTy,
DestPtr->getType(), DstPtrs, DstQualTys);
ConvertAndStoreElements(CGF, SrcVals, SrcQualTys, DstPtrs, DstQualTys);
}
// Either copies a scalar to a scalar, a scalar to a vector, or splats a scalar
// to a vector
static void SimpleFlatValCopy(CodeGenFunction &CGF, Value *SrcVal,
QualType SrcQualTy, Value *DstPtr,
QualType DstQualTy) {
DXASSERT(SrcVal->getType() == CGF.ConvertType(SrcQualTy),
"QualType/Type mismatch!");
llvm::Type *DstTy = DstPtr->getType()->getPointerElementType();
DXASSERT(DstTy == CGF.ConvertTypeForMem(DstQualTy),
"QualType/Type mismatch!");
llvm::VectorType *DstVecTy = dyn_cast<llvm::VectorType>(DstTy);
QualType DstScalarQualTy = DstQualTy;
if (DstVecTy) {
DstScalarQualTy = hlsl::GetHLSLVecElementType(DstQualTy);
}
Value *ResultScalar =
ConvertScalarOrVector(CGF, SrcVal, SrcQualTy, DstScalarQualTy);
ResultScalar = CGF.EmitToMemory(ResultScalar, DstScalarQualTy);
if (DstVecTy) {
llvm::VectorType *DstScalarVecTy =
llvm::VectorType::get(ResultScalar->getType(), 1);
Value *ResultScalarVec = CGF.Builder.CreateInsertElement(
UndefValue::get(DstScalarVecTy), ResultScalar, (uint64_t)0);
std::vector<int> ShufIdx(DstVecTy->getNumElements(), 0);
Value *ResultVec = CGF.Builder.CreateShuffleVector(
ResultScalarVec, ResultScalarVec, ShufIdx);
CGF.Builder.CreateStore(ResultVec, DstPtr);
} else
CGF.Builder.CreateStore(ResultScalar, DstPtr);
}
void CGMSHLSLRuntime::EmitHLSLSplat(CodeGenFunction &CGF, Value *SrcVal,
llvm::Value *DestPtr,
SmallVector<Value *, 4> &idxList,
QualType Type, QualType SrcType,
llvm::Type *Ty) {
if (llvm::PointerType *PT = dyn_cast<llvm::PointerType>(Ty)) {
idxList.emplace_back(CGF.Builder.getInt32(0));
EmitHLSLSplat(CGF, SrcVal, DestPtr, idxList, Type, SrcType,
PT->getElementType());
idxList.pop_back();
} else if (HLMatrixType MatTy = HLMatrixType::dyn_cast(Ty)) {
// Use matLd/St for matrix.
Value *dstGEP = CGF.Builder.CreateInBoundsGEP(DestPtr, idxList);
llvm::Type *EltTy = MatTy.getElementTypeForReg();
llvm::VectorType *VT1 = llvm::VectorType::get(EltTy, 1);
SrcVal = ConvertScalarOrVector(CGF, SrcVal, SrcType,
hlsl::GetHLSLMatElementType(Type));
// Splat the value
Value *V1 = CGF.Builder.CreateInsertElement(UndefValue::get(VT1), SrcVal,
(uint64_t)0);
std::vector<int> shufIdx(MatTy.getNumElements(), 0);
Value *VecMat = CGF.Builder.CreateShuffleVector(V1, V1, shufIdx);
Value *MatInit = EmitHLSLMatrixOperationCallImp(
CGF.Builder, HLOpcodeGroup::HLInit, 0, Ty, {VecMat}, TheModule);
EmitHLSLMatrixStore(CGF, MatInit, dstGEP, Type);
} else if (StructType *ST = dyn_cast<StructType>(Ty)) {
DXASSERT(!dxilutil::IsHLSLObjectType(ST),
"cannot cast to hlsl object, Sema should reject");
const clang::RecordType *RT = Type->getAs<RecordType>();
RecordDecl *RD = RT->getDecl();
const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(RD);
// Take care base.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->getNumBases()) {
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
if (BaseDecl->field_empty())
continue;
QualType parentTy = QualType(BaseDecl->getTypeForDecl(), 0);
unsigned i = RL.getNonVirtualBaseLLVMFieldNo(BaseDecl);
llvm::Type *ET = ST->getElementType(i);
Constant *idx = llvm::Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, i));
idxList.emplace_back(idx);
EmitHLSLSplat(CGF, SrcVal, DestPtr, idxList, parentTy, SrcType, ET);
idxList.pop_back();
}
}
}
for (auto fieldIter = RD->field_begin(), fieldEnd = RD->field_end();
fieldIter != fieldEnd; ++fieldIter) {
unsigned i = RL.getLLVMFieldNo(*fieldIter);
llvm::Type *ET = ST->getElementType(i);
Constant *idx = llvm::Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, i));
idxList.emplace_back(idx);
EmitHLSLSplat(CGF, SrcVal, DestPtr, idxList, fieldIter->getType(),
SrcType, ET);
idxList.pop_back();
}
} else if (llvm::ArrayType *AT = dyn_cast<llvm::ArrayType>(Ty)) {
llvm::Type *ET = AT->getElementType();
QualType EltType = CGF.getContext().getBaseElementType(Type);
for (uint32_t i = 0; i < AT->getNumElements(); i++) {
Constant *idx = Constant::getIntegerValue(
IntegerType::get(Ty->getContext(), 32), APInt(32, i));
idxList.emplace_back(idx);
EmitHLSLSplat(CGF, SrcVal, DestPtr, idxList, EltType, SrcType, ET);
idxList.pop_back();
}
} else {
DestPtr = CGF.Builder.CreateInBoundsGEP(DestPtr, idxList);
SimpleFlatValCopy(CGF, SrcVal, SrcType, DestPtr, Type);
}
}
void CGMSHLSLRuntime::EmitHLSLFlatConversion(CodeGenFunction &CGF, Value *Val,
Value *DestPtr, QualType Ty,
QualType SrcTy) {
SmallVector<Value *, 4> SrcVals;
SmallVector<QualType, 4> SrcQualTys;
FlattenValToInitList(CGF, SrcVals, SrcQualTys, SrcTy, Val);
if (SrcVals.size() == 1) {
// Perform a splat
SmallVector<Value *, 4> GEPIdxStack;
GEPIdxStack.emplace_back(
CGF.Builder.getInt32(0)); // Add first 0 for DestPtr.
EmitHLSLSplat(CGF, SrcVals[0], DestPtr, GEPIdxStack, Ty, SrcQualTys[0],
DestPtr->getType()->getPointerElementType());
} else {
SmallVector<Value *, 4> GEPIdxStack;
SmallVector<Value *, 4> DstPtrs;
SmallVector<QualType, 4> DstQualTys;
FlattenAggregatePtrToGepList(CGF, DestPtr, GEPIdxStack, Ty,
DestPtr->getType(), DstPtrs, DstQualTys);
ConvertAndStoreElements(CGF, SrcVals, SrcQualTys, DstPtrs, DstQualTys);
}
}
void CGMSHLSLRuntime::EmitHLSLRootSignature(HLSLRootSignatureAttr *RSA,
Function *Fn,
DxilFunctionProps &props) {
StringRef StrRef = RSA->getSignatureName();
DiagnosticsEngine &Diags = CGM.getDiags();
SourceLocation SLoc = RSA->getLocation();
RootSignatureHandle RootSigHandle;
clang::CompileRootSignature(
StrRef, Diags, SLoc, rootSigVer,
DxilRootSignatureCompilationFlags::GlobalRootSignature, &RootSigHandle);
if (!RootSigHandle.IsEmpty()) {
RootSigHandle.EnsureSerializedAvailable();
if (!m_bIsLib) {
m_pHLModule->SetSerializedRootSignature(
RootSigHandle.GetSerializedBytes(),
RootSigHandle.GetSerializedSize());
} else {
if (!props.IsRay()) {
props.SetSerializedRootSignature(RootSigHandle.GetSerializedBytes(),
RootSigHandle.GetSerializedSize());
} else {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "root signature attribute not supported "
"for raytracing entry functions");
Diags.Report(RSA->getLocation(), DiagID);
}
}
}
}
void CGMSHLSLRuntime::EmitHLSLOutParamConversionInit(
CodeGenFunction &CGF, const FunctionDecl *FD, const CallExpr *E,
llvm::SmallVector<LValue, 8> &castArgList,
llvm::SmallVector<const Stmt *, 8> &argList,
llvm::SmallVector<LValue, 8> &lifetimeCleanupList,
const std::function<void(const VarDecl *, llvm::Value *)> &TmpArgMap) {
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(E) ? 1 : 0;
llvm::SmallSet<llvm::Value *, 8> ArgVals;
for (uint32_t i = 0; i < FD->getNumParams(); i++) {
const ParmVarDecl *Param = FD->getParamDecl(i);
uint32_t ArgIdx = i + ArgsToSkip;
const Expr *Arg = E->getArg(ArgIdx);
QualType ParamTy = Param->getType().getNonReferenceType();
bool isObject = dxilutil::IsHLSLObjectType(CGF.ConvertTypeForMem(ParamTy));
bool bAnnotResource = false;
if (isObject) {
if (isGLCMismatch(Param->getType(), Arg->getType(), Arg,
Arg->getExprLoc(), CGM.getDiags())) {
// NOTE: if function is noinline, resource parameter is not allowed.
// Here assume function will be always inlined.
// This can only take care resource as parameter. When parameter is
// struct with resource member, glc cannot mismatch because the
// struct type will always match.
// Add annotate handle here.
bAnnotResource = true;
}
}
bool isVector = hlsl::IsHLSLVecType(ParamTy);
bool isArray = ParamTy->isArrayType();
// Check for array of matrix
QualType ParamElTy = ParamTy;
while (ParamElTy->isArrayType())
ParamElTy = ParamElTy->getAsArrayTypeUnsafe()->getElementType();
bool isMatrix = hlsl::IsHLSLMatType(ParamElTy);
bool isAggregateType =
!isObject &&
(isArray || (ParamTy->isRecordType() && !(isMatrix || isVector)));
bool EmitRValueAgg = false;
bool RValOnRef = false;
if (!Param->isModifierOut()) {
if (!isAggregateType && !isObject) {
if (Arg->isRValue() && Param->getType()->isReferenceType()) {
// RValue on a reference type.
if (const CStyleCastExpr *cCast = dyn_cast<CStyleCastExpr>(Arg)) {
// TODO: Evolving this to warn then fail in future language
// versions. Allow special case like cast uint to uint for
// back-compat.
if (cCast->getCastKind() == CastKind::CK_NoOp) {
if (const ImplicitCastExpr *cast =
dyn_cast<ImplicitCastExpr>(cCast->getSubExpr())) {
if (cast->getCastKind() == CastKind::CK_LValueToRValue) {
// update the arg
argList[ArgIdx] = cast->getSubExpr();
continue;
}
}
}
}
// EmitLValue will report error.
// Mark RValOnRef to create tmpArg for it.
RValOnRef = true;
} else {
continue;
}
} else if (isAggregateType) {
// aggregate in-only - emit RValue, unless LValueToRValue cast
EmitRValueAgg = true;
if (const ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(Arg)) {
if (cast->getCastKind() == CastKind::CK_LValueToRValue) {
EmitRValueAgg = false;
}
}
} else {
// Must be object
DXASSERT(isObject,
"otherwise, flow condition changed, breaking assumption");
// in-only objects should be skipped to preserve previous behavior.
if (!bAnnotResource)
continue;
}
}
// Skip unbounded array, since we cannot preserve copy-in copy-out
// semantics for these.
if (ParamTy->isIncompleteArrayType()) {
continue;
}
if (!Param->isModifierOut() && !RValOnRef) {
// No need to copy arg to in-only param for hlsl intrinsic.
if (const FunctionDecl *Callee = E->getDirectCallee()) {
if (Callee->hasAttr<HLSLIntrinsicAttr>())
continue;
}
}
// get original arg
// FIXME: This will not emit in correct argument order with the other
// arguments. This should be integrated into
// CodeGenFunction::EmitCallArg if possible.
RValue argRV; // emit this if aggregate arg on in-only param
LValue argLV; // otherwise, we may emit this
llvm::Value *argAddr = nullptr;
QualType argType = Arg->getType();
CharUnits argAlignment;
if (EmitRValueAgg) {
argRV = CGF.EmitAnyExprToTemp(Arg);
argAddr = argRV.getAggregateAddr(); // must be alloca
argAlignment =
CharUnits::fromQuantity(cast<AllocaInst>(argAddr)->getAlignment());
argLV =
LValue::MakeAddr(argAddr, ParamTy, argAlignment, CGF.getContext());
} else {
argLV = CGF.EmitLValue(Arg);
if (argLV.isSimple())
argAddr = argLV.getAddress();
bool mustCopy = bAnnotResource;
// If matrix orientation changes, we must copy here
// TODO: A high level intrinsic for matrix array copy with orientation
// change would be much easier to optimize/eliminate at high level
// after inline.
if (!mustCopy && isMatrix) {
mustCopy = !AreMatrixArrayOrientationMatching(
CGF.getContext(), *m_pHLModule, argType, ParamTy);
}
if (!mustCopy) {
// When there's argument need to lower like buffer/cbuffer load, need to
// copy to let the lower not happen on argument when calle is noinline
// or extern functions. Will do it in HLLegalizeParameter after known
// which functions are extern but before inline.
Value *Ptr = argAddr;
while (GEPOperator *GEP = dyn_cast_or_null<GEPOperator>(Ptr)) {
Ptr = GEP->getPointerOperand();
}
// Skip copy-in copy-out when safe.
// The unsafe case will be global variable alias with parameter.
// Then global variable is updated in the function, the parameter will
// be updated silently. For non global variable or constant global
// variable, it should be safe.
bool SafeToSkip = false;
if (GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(Ptr)) {
SafeToSkip =
ParamTy.isConstQualified() &&
(m_ConstVarAnnotationMap.count(GV) > 0 || GV->isConstant());
}
if (Ptr) {
if (isa<AllocaInst>(Ptr) && 0 == ArgVals.count(Ptr))
SafeToSkip = true;
// Safe to skip if groupshared ptr passed to groupshared parameter.
else if (Ptr->getType()->getPointerAddressSpace() ==
DXIL::kTGSMAddrSpace &&
ParamTy.getAddressSpace() == DXIL::kTGSMAddrSpace)
SafeToSkip = true;
else if (const auto *A = dyn_cast<Argument>(Ptr))
SafeToSkip = A->hasNoAliasAttr() && 0 == ArgVals.count(Ptr);
}
if (argAddr && SafeToSkip) {
ArgVals.insert(Ptr);
llvm::Type *ToTy = CGF.ConvertType(ParamTy.getNonReferenceType());
if (argAddr->getType()->getPointerElementType() == ToTy &&
// Check clang Type for case like int cast to unsigned.
ParamTy.getNonReferenceType().getCanonicalType().getTypePtr() ==
Arg->getType().getCanonicalType().getTypePtr())
continue;
}
}
argType =
argLV.getType(); // TBD: Can this be different than Arg->getType()?
argAlignment = argLV.getAlignment();
}
// After emit Arg, we must update the argList[i],
// otherwise we get double emit of the expression.
// create temp Var
VarDecl *tmpArg =
VarDecl::Create(CGF.getContext(), const_cast<FunctionDecl *>(FD),
SourceLocation(), SourceLocation(),
/*IdentifierInfo*/ nullptr, ParamTy,
CGF.getContext().getTrivialTypeSourceInfo(ParamTy),
StorageClass::SC_Auto);
// Aggregate type will be indirect param convert to pointer type.
// So don't update to ReferenceType, use RValue for it.
const DeclRefExpr *tmpRef = DeclRefExpr::Create(
CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), tmpArg,
/*enclosing*/ false, tmpArg->getLocation(), ParamTy,
(isAggregateType || isObject) ? VK_RValue : VK_LValue);
// must update the arg, since we did emit Arg, else we get double emit.
argList[ArgIdx] = tmpRef;
// create alloc for the tmp arg
Value *tmpArgAddr = nullptr;
BasicBlock *InsertBlock = CGF.Builder.GetInsertBlock();
Function *F = InsertBlock->getParent();
// Make sure the alloca is in entry block to stop inline create stacksave.
IRBuilder<> AllocaBuilder(dxilutil::FindAllocaInsertionPt(F));
tmpArgAddr = AllocaBuilder.CreateAlloca(CGF.ConvertTypeForMem(ParamTy));
if (CGM.getCodeGenOpts().HLSLEnableLifetimeMarkers) {
const uint64_t AllocaSize =
CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(ParamTy));
CGF.EmitLifetimeStart(AllocaSize, tmpArgAddr);
}
// add it to local decl map
TmpArgMap(tmpArg, tmpArgAddr);
LValue tmpLV =
LValue::MakeAddr(tmpArgAddr, ParamTy, argAlignment, CGF.getContext());
// save for cast after call
if (Param->isModifierOut()) {
castArgList.emplace_back(tmpLV);
castArgList.emplace_back(argLV);
if (isVector && !hlsl::IsHLSLVecType(argType)) {
// This assumes only implicit casts because explicit casts can only
// produce RValues currently and out parameters are LValues.
DiagnosticsEngine &Diags = CGM.getDiags();
Diags.Report(Param->getLocation(),
diag::warn_hlsl_implicit_vector_truncation);
}
}
// save to generate lifetime end after call
if (CGM.getCodeGenOpts().HLSLEnableLifetimeMarkers)
lifetimeCleanupList.emplace_back(tmpLV);
// cast before the call
if (Param->isModifierIn() &&
// Don't copy object
!isObject) {
QualType ArgTy = Arg->getType();
Value *outVal = nullptr;
if (!isAggregateType) {
if (!IsHLSLMatType(ParamTy)) {
RValue outRVal = CGF.EmitLoadOfLValue(argLV, SourceLocation());
outVal = outRVal.getScalarVal();
} else {
DXASSERT(argAddr, "should be RV or simple LV");
outVal = EmitHLSLMatrixLoad(CGF, argAddr, ArgTy);
}
llvm::Type *ToTy = tmpArgAddr->getType()->getPointerElementType();
if (HLMatrixType::isa(ToTy)) {
Value *castVal = CGF.Builder.CreateBitCast(outVal, ToTy);
EmitHLSLMatrixStore(CGF, castVal, tmpArgAddr, ParamTy);
} else {
if (outVal->getType()->isVectorTy()) {
Value *castVal =
ConvertScalarOrVector(CGF, outVal, argType, ParamTy);
castVal = CGF.EmitToMemory(castVal, ParamTy);
CGF.Builder.CreateStore(castVal, tmpArgAddr);
} else {
// This allows for splatting, unlike the above.
SimpleFlatValCopy(CGF, outVal, argType, tmpArgAddr, ParamTy);
}
}
} else {
DXASSERT(argAddr, "should be RV or simple LV");
SmallVector<Value *, 4> idxList;
EmitHLSLAggregateCopy(CGF, argAddr, tmpArgAddr, idxList, ArgTy, ParamTy,
argAddr->getType());
}
} else if (bAnnotResource) {
DxilResourceProperties RP = BuildResourceProperty(Arg->getType());
CopyAndAnnotateResourceArgument(argAddr, tmpArgAddr, RP, *m_pHLModule,
CGF);
mismatchGLCArgSet.insert(tmpArgAddr);
}
}
}
void CGMSHLSLRuntime::EmitHLSLOutParamConversionCopyBack(
CodeGenFunction &CGF, llvm::SmallVector<LValue, 8> &castArgList,
llvm::SmallVector<LValue, 8> &lifetimeCleanupList) {
for (uint32_t i = 0; i < castArgList.size(); i += 2) {
// cast after the call
LValue tmpLV = castArgList[i];
LValue argLV = castArgList[i + 1];
QualType ArgTy = argLV.getType().getNonReferenceType();
QualType ParamTy = tmpLV.getType().getNonReferenceType();
Value *tmpArgAddr = tmpLV.getAddress();
Value *outVal = nullptr;
bool isAggregateTy = hlsl::IsHLSLAggregateType(ArgTy);
bool isObject = dxilutil::IsHLSLObjectType(
tmpArgAddr->getType()->getPointerElementType());
if (!isObject) {
if (!isAggregateTy) {
if (!IsHLSLMatType(ParamTy))
outVal = CGF.Builder.CreateLoad(tmpArgAddr);
else
outVal = EmitHLSLMatrixLoad(CGF, tmpArgAddr, ParamTy);
outVal = CGF.EmitFromMemory(outVal, ParamTy);
llvm::Type *ToTy = CGF.ConvertType(ArgTy);
llvm::Type *FromTy = outVal->getType();
Value *castVal = outVal;
if (ToTy == FromTy) {
// Don't need cast.
} else if (ToTy->getScalarType() == FromTy->getScalarType()) {
if (ToTy->getScalarType() == ToTy) {
DXASSERT(FromTy->isVectorTy(), "must be vector");
castVal = CGF.Builder.CreateExtractElement(outVal, (uint64_t)0);
} else {
DXASSERT(!FromTy->isVectorTy(), "must be scalar type");
DXASSERT(ToTy->isVectorTy() && ToTy->getVectorNumElements() == 1,
"must be vector of 1 element");
castVal = UndefValue::get(ToTy);
castVal =
CGF.Builder.CreateInsertElement(castVal, outVal, (uint64_t)0);
}
} else {
castVal = ConvertScalarOrVector(CGF, outVal, tmpLV.getType(),
argLV.getType());
}
if (!HLMatrixType::isa(ToTy))
CGF.EmitStoreThroughLValue(RValue::get(castVal), argLV);
else {
Value *destPtr = argLV.getAddress();
EmitHLSLMatrixStore(CGF, castVal, destPtr, ArgTy);
}
} else {
SmallVector<Value *, 4> idxList;
EmitHLSLAggregateCopy(CGF, tmpLV.getAddress(), argLV.getAddress(),
idxList, ParamTy, ArgTy,
argLV.getAddress()->getType());
}
} else if (mismatchGLCArgSet.find(tmpArgAddr) == mismatchGLCArgSet.end()) {
tmpArgAddr->replaceAllUsesWith(argLV.getAddress());
}
}
for (LValue &tmpLV : lifetimeCleanupList) {
QualType ParamTy = tmpLV.getType().getNonReferenceType();
Value *tmpArgAddr = tmpLV.getAddress();
const uint64_t AllocaSize =
CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(ParamTy));
CGF.EmitLifetimeEnd(CGF.Builder.getInt64(AllocaSize), tmpArgAddr);
}
}
ScopeInfo *CGMSHLSLRuntime::GetScopeInfo(Function *F) {
auto it = m_ScopeMap.find(F);
if (it == m_ScopeMap.end())
return nullptr;
return &it->second;
}
void CGMSHLSLRuntime::MarkIfStmt(CodeGenFunction &CGF, BasicBlock *endIfBB) {
if (ScopeInfo *Scope = GetScopeInfo(CGF.CurFn))
Scope->AddIf(endIfBB);
}
void CGMSHLSLRuntime::MarkCleanupBlock(CodeGenFunction &CGF,
llvm::BasicBlock *cleanupBB) {
if (ScopeInfo *Scope = GetScopeInfo(CGF.CurFn))
Scope->AddCleanupBB(cleanupBB);
}
void CGMSHLSLRuntime::MarkSwitchStmt(CodeGenFunction &CGF,
SwitchInst *switchInst,
BasicBlock *endSwitch) {
if (ScopeInfo *Scope = GetScopeInfo(CGF.CurFn))
Scope->AddSwitch(endSwitch);
}
void CGMSHLSLRuntime::MarkReturnStmt(CodeGenFunction &CGF,
BasicBlock *bbWithRet) {
if (ScopeInfo *Scope = GetScopeInfo(CGF.CurFn))
Scope->AddRet(bbWithRet);
}
void CGMSHLSLRuntime::MarkLoopStmt(CodeGenFunction &CGF,
BasicBlock *loopContinue,
BasicBlock *loopExit) {
if (ScopeInfo *Scope = GetScopeInfo(CGF.CurFn))
Scope->AddLoop(loopContinue, loopExit);
}
Scope *CGMSHLSLRuntime::MarkScopeEnd(CodeGenFunction &CGF) {
if (ScopeInfo *Scope = GetScopeInfo(CGF.CurFn)) {
llvm::BasicBlock *CurBB = CGF.Builder.GetInsertBlock();
bool bScopeFinishedWithRet = !CurBB || CurBB->getTerminator();
return &Scope->EndScope(bScopeFinishedWithRet);
}
return nullptr;
}
CGHLSLRuntime *CodeGen::CreateMSHLSLRuntime(CodeGenModule &CGM) {
return new CGMSHLSLRuntime(CGM);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/CMakeLists.txt | add_subdirectory(Core)
add_subdirectory(Checkers)
add_subdirectory(Frontend)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/README.txt | //===----------------------------------------------------------------------===//
// Clang Static Analyzer
//===----------------------------------------------------------------------===//
= Library Structure =
The analyzer library has two layers: a (low-level) static analysis
engine (GRExprEngine.cpp and friends), and some static checkers
(*Checker.cpp). The latter are built on top of the former via the
Checker and CheckerVisitor interfaces (Checker.h and
CheckerVisitor.h). The Checker interface is designed to be minimal
and simple for checker writers, and attempts to isolate them from much
of the gore of the internal analysis engine.
= How It Works =
The analyzer is inspired by several foundational research papers ([1],
[2]). (FIXME: kremenek to add more links)
In a nutshell, the analyzer is basically a source code simulator that
traces out possible paths of execution. The state of the program
(values of variables and expressions) is encapsulated by the state
(ProgramState). A location in the program is called a program point
(ProgramPoint), and the combination of state and program point is a
node in an exploded graph (ExplodedGraph). The term "exploded" comes
from exploding the control-flow edges in the control-flow graph (CFG).
Conceptually the analyzer does a reachability analysis through the
ExplodedGraph. We start at a root node, which has the entry program
point and initial state, and then simulate transitions by analyzing
individual expressions. The analysis of an expression can cause the
state to change, resulting in a new node in the ExplodedGraph with an
updated program point and an updated state. A bug is found by hitting
a node that satisfies some "bug condition" (basically a violation of a
checking invariant).
The analyzer traces out multiple paths by reasoning about branches and
then bifurcating the state: on the true branch the conditions of the
branch are assumed to be true and on the false branch the conditions
of the branch are assumed to be false. Such "assumptions" create
constraints on the values of the program, and those constraints are
recorded in the ProgramState object (and are manipulated by the
ConstraintManager). If assuming the conditions of a branch would
cause the constraints to be unsatisfiable, the branch is considered
infeasible and that path is not taken. This is how we get
path-sensitivity. We reduce exponential blow-up by caching nodes. If
a new node with the same state and program point as an existing node
would get generated, the path "caches out" and we simply reuse the
existing node. Thus the ExplodedGraph is not a DAG; it can contain
cycles as paths loop back onto each other and cache out.
ProgramState and ExplodedNodes are basically immutable once created. Once
one creates a ProgramState, you need to create a new one to get a new
ProgramState. This immutability is key since the ExplodedGraph represents
the behavior of the analyzed program from the entry point. To
represent these efficiently, we use functional data structures (e.g.,
ImmutableMaps) which share data between instances.
Finally, individual Checkers work by also manipulating the analysis
state. The analyzer engine talks to them via a visitor interface.
For example, the PreVisitCallExpr() method is called by GRExprEngine
to tell the Checker that we are about to analyze a CallExpr, and the
checker is asked to check for any preconditions that might not be
satisfied. The checker can do nothing, or it can generate a new
ProgramState and ExplodedNode which contains updated checker state. If it
finds a bug, it can tell the BugReporter object about the bug,
providing it an ExplodedNode which is the last node in the path that
triggered the problem.
= Notes about C++ =
Since now constructors are seen before the variable that is constructed
in the CFG, we create a temporary object as the destination region that
is constructed into. See ExprEngine::VisitCXXConstructExpr().
In ExprEngine::processCallExit(), we always bind the object region to the
evaluated CXXConstructExpr. Then in VisitDeclStmt(), we compute the
corresponding lazy compound value if the variable is not a reference, and
bind the variable region to the lazy compound value. If the variable
is a reference, just use the object region as the initilizer value.
Before entering a C++ method (or ctor/dtor), the 'this' region is bound
to the object region. In ctors, we synthesize 'this' region with
CXXRecordDecl*, which means we do not use type qualifiers. In methods, we
synthesize 'this' region with CXXMethodDecl*, which has getThisType()
taking type qualifiers into account. It does not matter we use qualified
'this' region in one method and unqualified 'this' region in another
method, because we only need to ensure the 'this' region is consistent
when we synthesize it and create it directly from CXXThisExpr in a single
method call.
= Working on the Analyzer =
If you are interested in bringing up support for C++ expressions, the
best place to look is the visitation logic in GRExprEngine, which
handles the simulation of individual expressions. There are plenty of
examples there of how other expressions are handled.
If you are interested in writing checkers, look at the Checker and
CheckerVisitor interfaces (Checker.h and CheckerVisitor.h). Also look
at the files named *Checker.cpp for examples on how you can implement
these interfaces.
= Debugging the Analyzer =
There are some useful command-line options for debugging. For example:
$ clang -cc1 -help | grep analyze
-analyze-function <value>
-analyzer-display-progress
-analyzer-viz-egraph-graphviz
...
The first allows you to specify only analyzing a specific function.
The second prints to the console what function is being analyzed. The
third generates a graphviz dot file of the ExplodedGraph. This is
extremely useful when debugging the analyzer and viewing the
simulation results.
Of course, viewing the CFG (Control-Flow Graph) is also useful:
$ clang -cc1 -help | grep cfg
-cfg-add-implicit-dtors Add C++ implicit destructors to CFGs for all analyses
-cfg-add-initializers Add C++ initializers to CFGs for all analyses
-cfg-dump Display Control-Flow Graphs
-cfg-view View Control-Flow Graphs using GraphViz
-unoptimized-cfg Generate unoptimized CFGs for all analyses
-cfg-dump dumps a textual representation of the CFG to the console,
and -cfg-view creates a GraphViz representation.
= References =
[1] Precise interprocedural dataflow analysis via graph reachability,
T Reps, S Horwitz, and M Sagiv, POPL '95,
http://portal.acm.org/citation.cfm?id=199462
[2] A memory model for static analysis of C programs, Z Xu, T
Kremenek, and J Zhang, http://lcs.ios.ac.cn/~xzx/memmodel.pdf
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp | //===-- ModelInjector.cpp ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "ModelInjector.h"
#include "clang/AST/Decl.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendAction.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/FileSystem.h"
#include <string>
#include <utility>
using namespace clang;
using namespace ento;
ModelInjector::ModelInjector(CompilerInstance &CI) : CI(CI) {}
Stmt *ModelInjector::getBody(const FunctionDecl *D) {
onBodySynthesis(D);
return Bodies[D->getName()];
}
Stmt *ModelInjector::getBody(const ObjCMethodDecl *D) {
onBodySynthesis(D);
return Bodies[D->getName()];
}
void ModelInjector::onBodySynthesis(const NamedDecl *D) {
// FIXME: what about overloads? Declarations can be used as keys but what
// about file name index? Mangled names may not be suitable for that either.
if (Bodies.count(D->getName()) != 0)
return;
SourceManager &SM = CI.getSourceManager();
FileID mainFileID = SM.getMainFileID();
AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
llvm::StringRef modelPath = analyzerOpts->Config["model-path"];
llvm::SmallString<128> fileName;
if (!modelPath.empty())
fileName =
llvm::StringRef(modelPath.str() + "/" + D->getName().str() + ".model");
else
fileName = llvm::StringRef(D->getName().str() + ".model");
if (!llvm::sys::fs::exists(fileName.str())) {
Bodies[D->getName()] = nullptr;
return;
}
IntrusiveRefCntPtr<CompilerInvocation> Invocation(
new CompilerInvocation(CI.getInvocation()));
FrontendOptions &FrontendOpts = Invocation->getFrontendOpts();
InputKind IK = IK_CXX; // FIXME
FrontendOpts.Inputs.clear();
FrontendOpts.Inputs.emplace_back(fileName, IK);
FrontendOpts.DisableFree = true;
Invocation->getDiagnosticOpts().VerifyDiagnostics = 0;
// Modules are parsed by a separate CompilerInstance, so this code mimics that
// behavior for models
CompilerInstance Instance(CI.getPCHContainerOperations());
Instance.setInvocation(&*Invocation);
Instance.createDiagnostics(
new ForwardingDiagnosticConsumer(CI.getDiagnosticClient()),
/*ShouldOwnClient=*/true);
Instance.getDiagnostics().setSourceManager(&SM);
Instance.setVirtualFileSystem(&CI.getVirtualFileSystem());
// The instance wants to take ownership, however DisableFree frontend option
// is set to true to avoid double free issues
Instance.setFileManager(&CI.getFileManager());
Instance.setSourceManager(&SM);
Instance.setPreprocessor(&CI.getPreprocessor());
Instance.setASTContext(&CI.getASTContext());
Instance.getPreprocessor().InitializeForModelFile();
ParseModelFileAction parseModelFile(Bodies);
const unsigned ThreadStackSize = 8 << 20;
llvm::CrashRecoveryContext CRC;
CRC.RunSafelyOnThread([&]() { Instance.ExecuteAction(parseModelFile); },
ThreadStackSize);
Instance.getPreprocessor().FinalizeForModelFile();
Instance.resetAndLeakSourceManager();
Instance.resetAndLeakFileManager();
Instance.resetAndLeakPreprocessor();
// The preprocessor enters to the main file id when parsing is started, so
// the main file id is changed to the model file during parsing and it needs
// to be reseted to the former main file id after parsing of the model file
// is done.
SM.setMainFileID(mainFileID);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp | //===--- CheckerRegistration.cpp - Registration for the Analyzer Checkers -===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Defines the registration function for the analyzer checkers.
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
using namespace clang;
using namespace ento;
using llvm::sys::DynamicLibrary;
namespace {
class ClangCheckerRegistry : public CheckerRegistry {
typedef void (*RegisterCheckersFn)(CheckerRegistry &);
static bool isCompatibleAPIVersion(const char *versionString);
static void warnIncompatible(DiagnosticsEngine *diags, StringRef pluginPath,
const char *pluginAPIVersion);
public:
ClangCheckerRegistry(ArrayRef<std::string> plugins,
DiagnosticsEngine *diags = nullptr);
};
} // end anonymous namespace
ClangCheckerRegistry::ClangCheckerRegistry(ArrayRef<std::string> plugins,
DiagnosticsEngine *diags) {
registerBuiltinCheckers(*this);
for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
i != e; ++i) {
// Get access to the plugin.
DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str());
// See if it's compatible with this build of clang.
const char *pluginAPIVersion =
(const char *) lib.getAddressOfSymbol("clang_analyzerAPIVersionString");
if (!isCompatibleAPIVersion(pluginAPIVersion)) {
warnIncompatible(diags, *i, pluginAPIVersion);
continue;
}
// Register its checkers.
RegisterCheckersFn registerPluginCheckers =
(RegisterCheckersFn) (intptr_t) lib.getAddressOfSymbol(
"clang_registerCheckers");
if (registerPluginCheckers)
registerPluginCheckers(*this);
}
}
bool ClangCheckerRegistry::isCompatibleAPIVersion(const char *versionString) {
// If the version string is null, it's not an analyzer plugin.
if (!versionString)
return false;
// For now, none of the static analyzer API is considered stable.
// Versions must match exactly.
if (strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0)
return true;
return false;
}
void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
StringRef pluginPath,
const char *pluginAPIVersion) {
if (!diags)
return;
if (!pluginAPIVersion)
return;
diags->Report(diag::warn_incompatible_analyzer_plugin_api)
<< llvm::sys::path::filename(pluginPath);
diags->Report(diag::note_incompatible_analyzer_plugin_api)
<< CLANG_ANALYZER_API_VERSION_STRING
<< pluginAPIVersion;
}
std::unique_ptr<CheckerManager>
ento::createCheckerManager(AnalyzerOptions &opts, const LangOptions &langOpts,
ArrayRef<std::string> plugins,
DiagnosticsEngine &diags) {
std::unique_ptr<CheckerManager> checkerMgr(
new CheckerManager(langOpts, &opts));
SmallVector<CheckerOptInfo, 8> checkerOpts;
for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
checkerOpts.push_back(CheckerOptInfo(opt.first.c_str(), opt.second));
}
ClangCheckerRegistry allCheckers(plugins, &diags);
allCheckers.initializeManager(*checkerMgr, checkerOpts);
allCheckers.validateCheckerOptions(opts, diags);
checkerMgr->finishedCheckerRegistration();
for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
if (checkerOpts[i].isUnclaimed()) {
diags.Report(diag::err_unknown_analyzer_checker)
<< checkerOpts[i].getName();
diags.Report(diag::note_suggest_disabling_all_checkers);
}
}
return checkerMgr;
}
void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
ClangCheckerRegistry(plugins).printHelp(out);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp | //===--- AnalysisConsumer.cpp - ASTConsumer for running Analyses ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// "Meta" ASTConsumer for running different source analyses.
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
#include "ModelInjector.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/DataRecursiveASTVisitor.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ParentMap.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CallGraph.h"
#include "clang/Analysis/CodeInjector.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
#include <queue>
using namespace clang;
using namespace ento;
using llvm::SmallPtrSet;
#define DEBUG_TYPE "AnalysisConsumer"
static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz();
STATISTIC(NumFunctionTopLevel, "The # of functions at top level.");
STATISTIC(NumFunctionsAnalyzed,
"The # of functions and blocks analyzed (as top level "
"with inlining turned on).");
STATISTIC(NumBlocksInAnalyzedFunctions,
"The # of basic blocks in the analyzed functions.");
STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function.");
//===----------------------------------------------------------------------===//
// Special PathDiagnosticConsumers.
//===----------------------------------------------------------------------===//
void ento::createPlistHTMLDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
PathDiagnosticConsumers &C,
const std::string &prefix,
const Preprocessor &PP) {
createHTMLDiagnosticConsumer(AnalyzerOpts, C,
llvm::sys::path::parent_path(prefix), PP);
createPlistDiagnosticConsumer(AnalyzerOpts, C, prefix, PP);
}
void ento::createTextPathDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
PathDiagnosticConsumers &C,
const std::string &Prefix,
const clang::Preprocessor &PP) {
llvm_unreachable("'text' consumer should be enabled on ClangDiags");
}
namespace {
class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer {
DiagnosticsEngine &Diag;
bool IncludePath;
public:
ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag)
: Diag(Diag), IncludePath(false) {}
~ClangDiagPathDiagConsumer() override {}
StringRef getName() const override { return "ClangDiags"; }
bool supportsLogicalOpControlFlow() const override { return true; }
bool supportsCrossFileDiagnostics() const override { return true; }
PathGenerationScheme getGenerationScheme() const override {
return IncludePath ? Minimal : None;
}
void enablePaths() {
IncludePath = true;
}
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) override {
unsigned WarnID = Diag.getCustomDiagID(DiagnosticsEngine::Warning, "%0");
unsigned NoteID = Diag.getCustomDiagID(DiagnosticsEngine::Note, "%0");
for (std::vector<const PathDiagnostic*>::iterator I = Diags.begin(),
E = Diags.end(); I != E; ++I) {
const PathDiagnostic *PD = *I;
SourceLocation WarnLoc = PD->getLocation().asLocation();
Diag.Report(WarnLoc, WarnID) << PD->getShortDescription()
<< PD->path.back()->getRanges();
if (!IncludePath)
continue;
PathPieces FlatPath = PD->path.flatten(/*ShouldFlattenMacros=*/true);
for (PathPieces::const_iterator PI = FlatPath.begin(),
PE = FlatPath.end();
PI != PE; ++PI) {
SourceLocation NoteLoc = (*PI)->getLocation().asLocation();
Diag.Report(NoteLoc, NoteID) << (*PI)->getString()
<< (*PI)->getRanges();
}
}
}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// AnalysisConsumer declaration.
//===----------------------------------------------------------------------===//
namespace {
class AnalysisConsumer : public AnalysisASTConsumer,
public DataRecursiveASTVisitor<AnalysisConsumer> {
enum {
AM_None = 0,
AM_Syntax = 0x1,
AM_Path = 0x2
};
typedef unsigned AnalysisMode;
/// Mode of the analyzes while recursively visiting Decls.
AnalysisMode RecVisitorMode;
/// Bug Reporter to use while recursively visiting Decls.
BugReporter *RecVisitorBR;
public:
ASTContext *Ctx;
const Preprocessor &PP;
const std::string OutDir;
AnalyzerOptionsRef Opts;
ArrayRef<std::string> Plugins;
CodeInjector *Injector;
/// \brief Stores the declarations from the local translation unit.
/// Note, we pre-compute the local declarations at parse time as an
/// optimization to make sure we do not deserialize everything from disk.
/// The local declaration to all declarations ratio might be very small when
/// working with a PCH file.
SetOfDecls LocalTUDecls;
// Set of PathDiagnosticConsumers. Owned by AnalysisManager.
PathDiagnosticConsumers PathConsumers;
StoreManagerCreator CreateStoreMgr;
ConstraintManagerCreator CreateConstraintMgr;
std::unique_ptr<CheckerManager> checkerMgr;
std::unique_ptr<AnalysisManager> Mgr;
/// Time the analyzes time of each translation unit.
static llvm::Timer* TUTotalTimer;
/// The information about analyzed functions shared throughout the
/// translation unit.
FunctionSummariesTy FunctionSummaries;
AnalysisConsumer(const Preprocessor& pp,
const std::string& outdir,
AnalyzerOptionsRef opts,
ArrayRef<std::string> plugins,
CodeInjector *injector)
: RecVisitorMode(0), RecVisitorBR(nullptr), Ctx(nullptr), PP(pp),
OutDir(outdir), Opts(opts), Plugins(plugins), Injector(injector) {
DigestAnalyzerOptions();
if (Opts->PrintStats) {
llvm::EnableStatistics();
TUTotalTimer = new llvm::Timer("Analyzer Total Time");
}
}
~AnalysisConsumer() override {
if (Opts->PrintStats)
delete TUTotalTimer;
}
void DigestAnalyzerOptions() {
if (Opts->AnalysisDiagOpt != PD_NONE) {
// Create the PathDiagnosticConsumer.
ClangDiagPathDiagConsumer *clangDiags =
new ClangDiagPathDiagConsumer(PP.getDiagnostics());
PathConsumers.push_back(clangDiags);
if (Opts->AnalysisDiagOpt == PD_TEXT) {
clangDiags->enablePaths();
} else if (!OutDir.empty()) {
switch (Opts->AnalysisDiagOpt) {
default:
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
case PD_##NAME: \
CREATEFN(*Opts.get(), PathConsumers, OutDir, PP); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
}
}
}
// Create the analyzer component creators.
switch (Opts->AnalysisStoreOpt) {
default:
llvm_unreachable("Unknown store manager.");
#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
case NAME##Model: CreateStoreMgr = CREATEFN; break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
}
switch (Opts->AnalysisConstraintsOpt) {
default:
llvm_unreachable("Unknown constraint manager.");
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \
case NAME##Model: CreateConstraintMgr = CREATEFN; break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
}
}
void DisplayFunction(const Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode) {
if (!Opts->AnalyzerDisplayProgress)
return;
SourceManager &SM = Mgr->getASTContext().getSourceManager();
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
if (Loc.isValid()) {
llvm::errs() << "ANALYZE";
if (Mode == AM_Syntax)
llvm::errs() << " (Syntax)";
else if (Mode == AM_Path) {
llvm::errs() << " (Path, ";
switch (IMode) {
case ExprEngine::Inline_Minimal:
llvm::errs() << " Inline_Minimal";
break;
case ExprEngine::Inline_Regular:
llvm::errs() << " Inline_Regular";
break;
}
llvm::errs() << ")";
}
else
assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
llvm::errs() << ": " << Loc.getFilename();
if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
const NamedDecl *ND = cast<NamedDecl>(D);
llvm::errs() << ' ' << *ND << '\n';
}
else if (isa<BlockDecl>(D)) {
llvm::errs() << ' ' << "block(line:" << Loc.getLine() << ",col:"
<< Loc.getColumn() << '\n';
}
else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
Selector S = MD->getSelector();
llvm::errs() << ' ' << S.getAsString();
}
}
}
void Initialize(ASTContext &Context) override {
Ctx = &Context;
checkerMgr = createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
PP.getDiagnostics());
Mgr = llvm::make_unique<AnalysisManager>(
*Ctx, PP.getDiagnostics(), PP.getLangOpts(), PathConsumers,
CreateStoreMgr, CreateConstraintMgr, checkerMgr.get(), *Opts, Injector);
}
/// \brief Store the top level decls in the set to be processed later on.
/// (Doing this pre-processing avoids deserialization of data from PCH.)
bool HandleTopLevelDecl(DeclGroupRef D) override;
void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override;
void HandleTranslationUnit(ASTContext &C) override;
/// \brief Determine which inlining mode should be used when this function is
/// analyzed. This allows to redefine the default inlining policies when
/// analyzing a given function.
ExprEngine::InliningModes
getInliningModeForFunction(const Decl *D, const SetOfConstDecls &Visited);
/// \brief Build the call graph for all the top level decls of this TU and
/// use it to define the order in which the functions should be visited.
void HandleDeclsCallGraph(const unsigned LocalTUDeclsSize);
/// \brief Run analyzes(syntax or path sensitive) on the given function.
/// \param Mode - determines if we are requesting syntax only or path
/// sensitive only analysis.
/// \param VisitedCallees - The output parameter, which is populated with the
/// set of functions which should be considered analyzed after analyzing the
/// given root function.
void HandleCode(Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode = ExprEngine::Inline_Minimal,
SetOfConstDecls *VisitedCallees = nullptr);
void RunPathSensitiveChecks(Decl *D,
ExprEngine::InliningModes IMode,
SetOfConstDecls *VisitedCallees);
void ActionExprEngine(Decl *D, bool ObjCGCEnabled,
ExprEngine::InliningModes IMode,
SetOfConstDecls *VisitedCallees);
/// Visitors for the RecursiveASTVisitor.
bool shouldWalkTypesOfTypeLocs() const { return false; }
/// Handle callbacks for arbitrary Decls.
bool VisitDecl(Decl *D) {
AnalysisMode Mode = getModeForDecl(D, RecVisitorMode);
if (Mode & AM_Syntax)
checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
return true;
}
bool VisitFunctionDecl(FunctionDecl *FD) {
IdentifierInfo *II = FD->getIdentifier();
if (II && II->getName().startswith("__inline"))
return true;
// We skip function template definitions, as their semantics is
// only determined when they are instantiated.
if (FD->isThisDeclarationADefinition() &&
!FD->isDependentContext()) {
assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
HandleCode(FD, RecVisitorMode);
}
return true;
}
bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
if (MD->isThisDeclarationADefinition()) {
assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
HandleCode(MD, RecVisitorMode);
}
return true;
}
bool VisitBlockDecl(BlockDecl *BD) {
if (BD->hasBody()) {
assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
HandleCode(BD, RecVisitorMode);
}
return true;
}
void AddDiagnosticConsumer(PathDiagnosticConsumer *Consumer) override {
PathConsumers.push_back(Consumer);
}
private:
void storeTopLevelDecls(DeclGroupRef DG);
/// \brief Check if we should skip (not analyze) the given function.
AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// AnalysisConsumer implementation.
//===----------------------------------------------------------------------===//
llvm::Timer* AnalysisConsumer::TUTotalTimer = nullptr;
bool AnalysisConsumer::HandleTopLevelDecl(DeclGroupRef DG) {
storeTopLevelDecls(DG);
return true;
}
void AnalysisConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
storeTopLevelDecls(DG);
}
void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) {
// Skip ObjCMethodDecl, wait for the objc container to avoid
// analyzing twice.
if (isa<ObjCMethodDecl>(*I))
continue;
LocalTUDecls.push_back(*I);
}
}
static bool shouldSkipFunction(const Decl *D,
const SetOfConstDecls &Visited,
const SetOfConstDecls &VisitedAsTopLevel) {
if (VisitedAsTopLevel.count(D))
return true;
// We want to re-analyse the functions as top level in the following cases:
// - The 'init' methods should be reanalyzed because
// ObjCNonNilReturnValueChecker assumes that '[super init]' never returns
// 'nil' and unless we analyze the 'init' functions as top level, we will
// not catch errors within defensive code.
// - We want to reanalyze all ObjC methods as top level to report Retain
// Count naming convention errors more aggressively.
if (isa<ObjCMethodDecl>(D))
return false;
// Otherwise, if we visited the function before, do not reanalyze it.
return Visited.count(D);
}
ExprEngine::InliningModes
AnalysisConsumer::getInliningModeForFunction(const Decl *D,
const SetOfConstDecls &Visited) {
// We want to reanalyze all ObjC methods as top level to report Retain
// Count naming convention errors more aggressively. But we should tune down
// inlining when reanalyzing an already inlined function.
if (Visited.count(D)) {
assert(isa<ObjCMethodDecl>(D) &&
"We are only reanalyzing ObjCMethods.");
const ObjCMethodDecl *ObjCM = cast<ObjCMethodDecl>(D);
if (ObjCM->getMethodFamily() != OMF_init)
return ExprEngine::Inline_Minimal;
}
return ExprEngine::Inline_Regular;
}
void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
// Build the Call Graph by adding all the top level declarations to the graph.
// Note: CallGraph can trigger deserialization of more items from a pch
// (though HandleInterestingDecl); triggering additions to LocalTUDecls.
// We rely on random access to add the initially processed Decls to CG.
CallGraph CG;
for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
CG.addToCallGraph(LocalTUDecls[i]);
}
// Walk over all of the call graph nodes in topological order, so that we
// analyze parents before the children. Skip the functions inlined into
// the previously processed functions. Use external Visited set to identify
// inlined functions. The topological order allows the "do not reanalyze
// previously inlined function" performance heuristic to be triggered more
// often.
SetOfConstDecls Visited;
SetOfConstDecls VisitedAsTopLevel;
llvm::ReversePostOrderTraversal<clang::CallGraph*> RPOT(&CG);
for (llvm::ReversePostOrderTraversal<clang::CallGraph*>::rpo_iterator
I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
NumFunctionTopLevel++;
CallGraphNode *N = *I;
Decl *D = N->getDecl();
// Skip the abstract root node.
if (!D)
continue;
// Skip the functions which have been processed already or previously
// inlined.
if (shouldSkipFunction(D, Visited, VisitedAsTopLevel))
continue;
// Analyze the function.
SetOfConstDecls VisitedCallees;
HandleCode(D, AM_Path, getInliningModeForFunction(D, Visited),
(Mgr->options.InliningMode == All ? nullptr : &VisitedCallees));
// Add the visited callees to the global visited set.
for (SetOfConstDecls::iterator I = VisitedCallees.begin(),
E = VisitedCallees.end(); I != E; ++I) {
Visited.insert(*I);
}
VisitedAsTopLevel.insert(D);
}
}
void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
// Don't run the actions if an error has occurred with parsing the file.
DiagnosticsEngine &Diags = PP.getDiagnostics();
if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
return;
// Don't analyze if the user explicitly asked for no checks to be performed
// on this file.
if (Opts->DisableAllChecks)
return;
{
if (TUTotalTimer) TUTotalTimer->startTimer();
// Introduce a scope to destroy BR before Mgr.
BugReporter BR(*Mgr);
TranslationUnitDecl *TU = C.getTranslationUnitDecl();
checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
// Run the AST-only checks using the order in which functions are defined.
// If inlining is not turned on, use the simplest function order for path
// sensitive analyzes as well.
RecVisitorMode = AM_Syntax;
if (!Mgr->shouldInlineCall())
RecVisitorMode |= AM_Path;
RecVisitorBR = &BR;
// Process all the top level declarations.
//
// Note: TraverseDecl may modify LocalTUDecls, but only by appending more
// entries. Thus we don't use an iterator, but rely on LocalTUDecls
// random access. By doing so, we automatically compensate for iterators
// possibly being invalidated, although this is a bit slower.
const unsigned LocalTUDeclsSize = LocalTUDecls.size();
for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
TraverseDecl(LocalTUDecls[i]);
}
if (Mgr->shouldInlineCall())
HandleDeclsCallGraph(LocalTUDeclsSize);
// After all decls handled, run checkers on the entire TranslationUnit.
checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
RecVisitorBR = nullptr;
}
// Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
// FIXME: This should be replaced with something that doesn't rely on
// side-effects in PathDiagnosticConsumer's destructor. This is required when
// used with option -disable-free.
Mgr.reset();
if (TUTotalTimer) TUTotalTimer->stopTimer();
// Count how many basic blocks we have not covered.
NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
if (NumBlocksInAnalyzedFunctions > 0)
PercentReachableBlocks =
(FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
NumBlocksInAnalyzedFunctions;
}
static std::string getFunctionName(const Decl *D) {
if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
return ID->getSelector().getAsString();
}
if (const FunctionDecl *ND = dyn_cast<FunctionDecl>(D)) {
IdentifierInfo *II = ND->getIdentifier();
if (II)
return II->getName();
}
return "";
}
AnalysisConsumer::AnalysisMode
AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
if (!Opts->AnalyzeSpecificFunction.empty() &&
getFunctionName(D) != Opts->AnalyzeSpecificFunction)
return AM_None;
// Unless -analyze-all is specified, treat decls differently depending on
// where they came from:
// - Main source file: run both path-sensitive and non-path-sensitive checks.
// - Header files: run non-path-sensitive checks only.
// - System headers: don't run any checks.
SourceManager &SM = Ctx->getSourceManager();
SourceLocation SL = D->hasBody() ? D->getBody()->getLocStart()
: D->getLocation();
SL = SM.getExpansionLoc(SL);
if (!Opts->AnalyzeAll && !SM.isWrittenInMainFile(SL)) {
if (SL.isInvalid() || SM.isInSystemHeader(SL))
return AM_None;
return Mode & ~AM_Path;
}
return Mode;
}
void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode,
SetOfConstDecls *VisitedCallees) {
if (!D->hasBody())
return;
Mode = getModeForDecl(D, Mode);
if (Mode == AM_None)
return;
DisplayFunction(D, Mode, IMode);
CFG *DeclCFG = Mgr->getCFG(D);
if (DeclCFG) {
unsigned CFGSize = DeclCFG->size();
MaxCFGSize = MaxCFGSize < CFGSize ? CFGSize : MaxCFGSize;
}
// Clear the AnalysisManager of old AnalysisDeclContexts.
Mgr->ClearContexts();
BugReporter BR(*Mgr);
if (Mode & AM_Syntax)
checkerMgr->runCheckersOnASTBody(D, *Mgr, BR);
if ((Mode & AM_Path) && checkerMgr->hasPathSensitiveCheckers()) {
RunPathSensitiveChecks(D, IMode, VisitedCallees);
if (IMode != ExprEngine::Inline_Minimal)
NumFunctionsAnalyzed++;
}
}
//===----------------------------------------------------------------------===//
// Path-sensitive checking.
//===----------------------------------------------------------------------===//
void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
ExprEngine::InliningModes IMode,
SetOfConstDecls *VisitedCallees) {
// Construct the analysis engine. First check if the CFG is valid.
// FIXME: Inter-procedural analysis will need to handle invalid CFGs.
if (!Mgr->getCFG(D))
return;
// See if the LiveVariables analysis scales.
if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
return;
ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries,IMode);
// Set the graph auditor.
std::unique_ptr<ExplodedNode::Auditor> Auditor;
if (Mgr->options.visualizeExplodedGraphWithUbiGraph) {
Auditor = CreateUbiViz();
ExplodedNode::SetAuditor(Auditor.get());
}
// Execute the worklist algorithm.
Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
Mgr->options.getMaxNodesPerTopLevelFunction());
// Release the auditor (if any) so that it doesn't monitor the graph
// created BugReporter.
ExplodedNode::SetAuditor(nullptr);
// Visualize the exploded graph.
if (Mgr->options.visualizeExplodedGraphWithGraphViz)
Eng.ViewGraph(Mgr->options.TrimGraph);
// Display warnings.
Eng.getBugReporter().FlushReports();
}
void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
ExprEngine::InliningModes IMode,
SetOfConstDecls *Visited) {
switch (Mgr->getLangOpts().getGC()) {
case LangOptions::NonGC:
ActionExprEngine(D, false, IMode, Visited);
break;
case LangOptions::GCOnly:
ActionExprEngine(D, true, IMode, Visited);
break;
case LangOptions::HybridGC:
ActionExprEngine(D, false, IMode, Visited);
ActionExprEngine(D, true, IMode, Visited);
break;
}
}
//===----------------------------------------------------------------------===//
// AnalysisConsumer creation.
//===----------------------------------------------------------------------===//
std::unique_ptr<AnalysisASTConsumer>
ento::CreateAnalysisConsumer(CompilerInstance &CI) {
// Disable the effects of '-Werror' when using the AnalysisConsumer.
CI.getPreprocessor().getDiagnostics().setWarningsAsErrors(false);
AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
bool hasModelPath = analyzerOpts->Config.count("model-path") > 0;
return llvm::make_unique<AnalysisConsumer>(
CI.getPreprocessor(), CI.getFrontendOpts().OutputFile, analyzerOpts,
CI.getFrontendOpts().Plugins,
hasModelPath ? new ModelInjector(CI) : nullptr);
}
//===----------------------------------------------------------------------===//
// Ubigraph Visualization. FIXME: Move to separate file.
//===----------------------------------------------------------------------===//
namespace {
class UbigraphViz : public ExplodedNode::Auditor {
std::unique_ptr<raw_ostream> Out;
std::string Filename;
unsigned Cntr;
typedef llvm::DenseMap<void*,unsigned> VMap;
VMap M;
public:
UbigraphViz(std::unique_ptr<raw_ostream> Out, StringRef Filename);
~UbigraphViz() override;
void AddEdge(ExplodedNode *Src, ExplodedNode *Dst) override;
};
} // end anonymous namespace
static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz() {
SmallString<128> P;
int FD;
llvm::sys::fs::createTemporaryFile("llvm_ubi", "", FD, P);
llvm::errs() << "Writing '" << P << "'.\n";
auto Stream = llvm::make_unique<llvm::raw_fd_ostream>(FD, true);
return llvm::make_unique<UbigraphViz>(std::move(Stream), P);
}
void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
assert (Src != Dst && "Self-edges are not allowed.");
// Lookup the Src. If it is a new node, it's a root.
VMap::iterator SrcI= M.find(Src);
unsigned SrcID;
if (SrcI == M.end()) {
M[Src] = SrcID = Cntr++;
*Out << "('vertex', " << SrcID << ", ('color','#00ff00'))\n";
}
else
SrcID = SrcI->second;
// Lookup the Dst.
VMap::iterator DstI= M.find(Dst);
unsigned DstID;
if (DstI == M.end()) {
M[Dst] = DstID = Cntr++;
*Out << "('vertex', " << DstID << ")\n";
}
else {
// We have hit DstID before. Change its style to reflect a cache hit.
DstID = DstI->second;
*Out << "('change_vertex_style', " << DstID << ", 1)\n";
}
// Add the edge.
*Out << "('edge', " << SrcID << ", " << DstID
<< ", ('arrow','true'), ('oriented', 'true'))\n";
}
UbigraphViz::UbigraphViz(std::unique_ptr<raw_ostream> Out, StringRef Filename)
: Out(std::move(Out)), Filename(Filename), Cntr(0) {
*Out << "('vertex_style_attribute', 0, ('shape', 'icosahedron'))\n";
*Out << "('vertex_style', 1, 0, ('shape', 'sphere'), ('color', '#ffcc66'),"
" ('size', '1.5'))\n";
}
UbigraphViz::~UbigraphViz() {
Out.reset();
llvm::errs() << "Running 'ubiviz' program... ";
#ifdef MSFT_SUPPORTS_CHILD_PROCESSES
std::string ErrMsg;
std::string Ubiviz;
if (auto Path = llvm::sys::findProgramByName("ubiviz"))
Ubiviz = *Path;
std::vector<const char*> args;
args.push_back(Ubiviz.c_str());
args.push_back(Filename.c_str());
args.push_back(nullptr);
if (llvm::sys::ExecuteAndWait(Ubiviz, &args[0], nullptr, nullptr, 0, 0,
&ErrMsg)) {
llvm::errs() << "Error viewing graph: " << ErrMsg << "\n";
}
// Delete the file.
llvm::sys::fs::remove(Filename);
#endif // MSFT_SUPPORTS_CHILD_PROCESSES
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt | include_directories( ${CMAKE_CURRENT_BINARY_DIR}/../Checkers )
set(LLVM_LINK_COMPONENTS
Support
)
add_clang_library(clangStaticAnalyzerFrontend
AnalysisConsumer.cpp
CheckerRegistration.cpp
ModelConsumer.cpp
FrontendActions.cpp
ModelInjector.cpp
LINK_LIBS
clangAST
clangAnalysis
clangBasic
clangFrontend
clangLex
clangStaticAnalyzerCheckers
clangStaticAnalyzerCore
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp | //===--- ModelConsumer.cpp - ASTConsumer for consuming model files --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements an ASTConsumer for consuming model files.
///
/// This ASTConsumer handles the AST of a parsed model file. All top level
/// function definitions will be collected from that model file for later
/// retrieval during the static analysis. The body of these functions will not
/// be injected into the ASTUnit of the analyzed translation unit. It will be
/// available through the BodyFarm which is utilized by the AnalysisDeclContext
/// class.
///
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Frontend/ModelConsumer.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclGroup.h"
using namespace clang;
using namespace ento;
ModelConsumer::ModelConsumer(llvm::StringMap<Stmt *> &Bodies)
: Bodies(Bodies) {}
bool ModelConsumer::HandleTopLevelDecl(DeclGroupRef D) {
for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
// Only interested in definitions.
const FunctionDecl *func = llvm::dyn_cast<FunctionDecl>(*I);
if (func && func->hasBody()) {
Bodies.insert(std::make_pair(func->getName(), func->getBody()));
}
}
return true;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp | //===--- FrontendActions.cpp ----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
#include "clang/StaticAnalyzer/Frontend/ModelConsumer.h"
using namespace clang;
using namespace ento;
std::unique_ptr<ASTConsumer>
AnalysisAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return CreateAnalysisConsumer(CI);
}
ParseModelFileAction::ParseModelFileAction(llvm::StringMap<Stmt *> &Bodies)
: Bodies(Bodies) {}
std::unique_ptr<ASTConsumer>
ParseModelFileAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
return llvm::make_unique<ModelConsumer>(Bodies);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h | //===-- ModelInjector.h -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file defines the clang::ento::ModelInjector class which implements the
/// clang::CodeInjector interface. This class is responsible for injecting
/// function definitions that were synthesized from model files.
///
/// Model files allow definitions of functions to be lazily constituted for functions
/// which lack bodies in the original source code. This allows the analyzer
/// to more precisely analyze code that calls such functions, analyzing the
/// artificial definitions (which typically approximate the semantics of the
/// called function) when called by client code. These definitions are
/// reconstituted lazily, on-demand, by the static analyzer engine.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SA_FRONTEND_MODELINJECTOR_H
#define LLVM_CLANG_SA_FRONTEND_MODELINJECTOR_H
#include "clang/Analysis/CodeInjector.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringMap.h"
#include <map>
#include <memory>
#include <vector>
namespace clang {
class CompilerInstance;
class ASTUnit;
class ASTReader;
class NamedDecl;
class Module;
namespace ento {
class ModelInjector : public CodeInjector {
public:
ModelInjector(CompilerInstance &CI);
Stmt *getBody(const FunctionDecl *D) override;
Stmt *getBody(const ObjCMethodDecl *D) override;
private:
/// \brief Synthesize a body for a declaration
///
/// This method first looks up the appropriate model file based on the
/// model-path configuration option and the name of the declaration that is
/// looked up. If no model were synthesized yet for a function with that name
/// it will create a new compiler instance to parse the model file using the
/// ASTContext, Preprocessor, SourceManager of the original compiler instance.
/// The former resources are shared between the two compiler instance, so the
/// newly created instance have to "leak" these objects, since they are owned
/// by the original instance.
///
/// The model-path should be either an absolute path or relative to the
/// working directory of the compiler.
void onBodySynthesis(const NamedDecl *D);
CompilerInstance &CI;
// FIXME: double memoization is redundant, with memoization both here and in
// BodyFarm.
llvm::StringMap<Stmt *> Bodies;
};
}
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp | //== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines BasicObjCFoundationChecks, a class that encapsulates
// a set of simple checks to run on Objective-C code using Apple's Foundation
// classes.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "SelectorExtras.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class APIMisuse : public BugType {
public:
APIMisuse(const CheckerBase *checker, const char *name)
: BugType(checker, name, "API Misuse (Apple)") {}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
static StringRef GetReceiverInterfaceName(const ObjCMethodCall &msg) {
if (const ObjCInterfaceDecl *ID = msg.getReceiverInterface())
return ID->getIdentifier()->getName();
return StringRef();
}
enum FoundationClass {
FC_None,
FC_NSArray,
FC_NSDictionary,
FC_NSEnumerator,
FC_NSNull,
FC_NSOrderedSet,
FC_NSSet,
FC_NSString
};
static FoundationClass findKnownClass(const ObjCInterfaceDecl *ID,
bool IncludeSuperclasses = true) {
static llvm::StringMap<FoundationClass> Classes;
if (Classes.empty()) {
Classes["NSArray"] = FC_NSArray;
Classes["NSDictionary"] = FC_NSDictionary;
Classes["NSEnumerator"] = FC_NSEnumerator;
Classes["NSNull"] = FC_NSNull;
Classes["NSOrderedSet"] = FC_NSOrderedSet;
Classes["NSSet"] = FC_NSSet;
Classes["NSString"] = FC_NSString;
}
// FIXME: Should we cache this at all?
FoundationClass result = Classes.lookup(ID->getIdentifier()->getName());
if (result == FC_None && IncludeSuperclasses)
if (const ObjCInterfaceDecl *Super = ID->getSuperClass())
return findKnownClass(Super);
return result;
}
//===----------------------------------------------------------------------===//
// NilArgChecker - Check for prohibited nil arguments to ObjC method calls.
//===----------------------------------------------------------------------===//
namespace {
class NilArgChecker : public Checker<check::PreObjCMessage,
check::PostStmt<ObjCDictionaryLiteral>,
check::PostStmt<ObjCArrayLiteral> > {
mutable std::unique_ptr<APIMisuse> BT;
mutable llvm::SmallDenseMap<Selector, unsigned, 16> StringSelectors;
mutable Selector ArrayWithObjectSel;
mutable Selector AddObjectSel;
mutable Selector InsertObjectAtIndexSel;
mutable Selector ReplaceObjectAtIndexWithObjectSel;
mutable Selector SetObjectAtIndexedSubscriptSel;
mutable Selector ArrayByAddingObjectSel;
mutable Selector DictionaryWithObjectForKeySel;
mutable Selector SetObjectForKeySel;
mutable Selector SetObjectForKeyedSubscriptSel;
mutable Selector RemoveObjectForKeySel;
void warnIfNilExpr(const Expr *E,
const char *Msg,
CheckerContext &C) const;
void warnIfNilArg(CheckerContext &C,
const ObjCMethodCall &msg, unsigned Arg,
FoundationClass Class,
bool CanBeSubscript = false) const;
void generateBugReport(ExplodedNode *N,
StringRef Msg,
SourceRange Range,
const Expr *Expr,
CheckerContext &C) const;
public:
void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
void checkPostStmt(const ObjCDictionaryLiteral *DL,
CheckerContext &C) const;
void checkPostStmt(const ObjCArrayLiteral *AL,
CheckerContext &C) const;
};
}
void NilArgChecker::warnIfNilExpr(const Expr *E,
const char *Msg,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
if (State->isNull(C.getSVal(E)).isConstrainedTrue()) {
if (ExplodedNode *N = C.generateSink()) {
generateBugReport(N, Msg, E->getSourceRange(), E, C);
}
}
}
void NilArgChecker::warnIfNilArg(CheckerContext &C,
const ObjCMethodCall &msg,
unsigned int Arg,
FoundationClass Class,
bool CanBeSubscript) const {
// Check if the argument is nil.
ProgramStateRef State = C.getState();
if (!State->isNull(msg.getArgSVal(Arg)).isConstrainedTrue())
return;
if (ExplodedNode *N = C.generateSink()) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
if (CanBeSubscript && msg.getMessageKind() == OCM_Subscript) {
if (Class == FC_NSArray) {
os << "Array element cannot be nil";
} else if (Class == FC_NSDictionary) {
if (Arg == 0) {
os << "Value stored into '";
os << GetReceiverInterfaceName(msg) << "' cannot be nil";
} else {
assert(Arg == 1);
os << "'"<< GetReceiverInterfaceName(msg) << "' key cannot be nil";
}
} else
llvm_unreachable("Missing foundation class for the subscript expr");
} else {
if (Class == FC_NSDictionary) {
if (Arg == 0)
os << "Value argument ";
else {
assert(Arg == 1);
os << "Key argument ";
}
os << "to '";
msg.getSelector().print(os);
os << "' cannot be nil";
} else {
os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '";
msg.getSelector().print(os);
os << "' cannot be nil";
}
}
generateBugReport(N, os.str(), msg.getArgSourceRange(Arg),
msg.getArgExpr(Arg), C);
}
}
void NilArgChecker::generateBugReport(ExplodedNode *N,
StringRef Msg,
SourceRange Range,
const Expr *E,
CheckerContext &C) const {
if (!BT)
BT.reset(new APIMisuse(this, "nil argument"));
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
R->addRange(Range);
bugreporter::trackNullOrUndefValue(N, E, *R);
C.emitReport(std::move(R));
}
void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
const ObjCInterfaceDecl *ID = msg.getReceiverInterface();
if (!ID)
return;
FoundationClass Class = findKnownClass(ID);
static const unsigned InvalidArgIndex = UINT_MAX;
unsigned Arg = InvalidArgIndex;
bool CanBeSubscript = false;
if (Class == FC_NSString) {
Selector S = msg.getSelector();
if (S.isUnarySelector())
return;
if (StringSelectors.empty()) {
ASTContext &Ctx = C.getASTContext();
Selector Sels[] = {
getKeywordSelector(Ctx, "caseInsensitiveCompare", nullptr),
getKeywordSelector(Ctx, "compare", nullptr),
getKeywordSelector(Ctx, "compare", "options", nullptr),
getKeywordSelector(Ctx, "compare", "options", "range", nullptr),
getKeywordSelector(Ctx, "compare", "options", "range", "locale",
nullptr),
getKeywordSelector(Ctx, "componentsSeparatedByCharactersInSet",
nullptr),
getKeywordSelector(Ctx, "initWithFormat",
nullptr),
getKeywordSelector(Ctx, "localizedCaseInsensitiveCompare", nullptr),
getKeywordSelector(Ctx, "localizedCompare", nullptr),
getKeywordSelector(Ctx, "localizedStandardCompare", nullptr),
};
for (Selector KnownSel : Sels)
StringSelectors[KnownSel] = 0;
}
auto I = StringSelectors.find(S);
if (I == StringSelectors.end())
return;
Arg = I->second;
} else if (Class == FC_NSArray) {
Selector S = msg.getSelector();
if (S.isUnarySelector())
return;
if (ArrayWithObjectSel.isNull()) {
ASTContext &Ctx = C.getASTContext();
ArrayWithObjectSel = getKeywordSelector(Ctx, "arrayWithObject", nullptr);
AddObjectSel = getKeywordSelector(Ctx, "addObject", nullptr);
InsertObjectAtIndexSel =
getKeywordSelector(Ctx, "insertObject", "atIndex", nullptr);
ReplaceObjectAtIndexWithObjectSel =
getKeywordSelector(Ctx, "replaceObjectAtIndex", "withObject", nullptr);
SetObjectAtIndexedSubscriptSel =
getKeywordSelector(Ctx, "setObject", "atIndexedSubscript", nullptr);
ArrayByAddingObjectSel =
getKeywordSelector(Ctx, "arrayByAddingObject", nullptr);
}
if (S == ArrayWithObjectSel || S == AddObjectSel ||
S == InsertObjectAtIndexSel || S == ArrayByAddingObjectSel) {
Arg = 0;
} else if (S == SetObjectAtIndexedSubscriptSel) {
Arg = 0;
CanBeSubscript = true;
} else if (S == ReplaceObjectAtIndexWithObjectSel) {
Arg = 1;
}
} else if (Class == FC_NSDictionary) {
Selector S = msg.getSelector();
if (S.isUnarySelector())
return;
if (DictionaryWithObjectForKeySel.isNull()) {
ASTContext &Ctx = C.getASTContext();
DictionaryWithObjectForKeySel =
getKeywordSelector(Ctx, "dictionaryWithObject", "forKey", nullptr);
SetObjectForKeySel =
getKeywordSelector(Ctx, "setObject", "forKey", nullptr);
SetObjectForKeyedSubscriptSel =
getKeywordSelector(Ctx, "setObject", "forKeyedSubscript", nullptr);
RemoveObjectForKeySel =
getKeywordSelector(Ctx, "removeObjectForKey", nullptr);
}
if (S == DictionaryWithObjectForKeySel || S == SetObjectForKeySel) {
Arg = 0;
warnIfNilArg(C, msg, /* Arg */1, Class);
} else if (S == SetObjectForKeyedSubscriptSel) {
CanBeSubscript = true;
Arg = 0;
warnIfNilArg(C, msg, /* Arg */1, Class, CanBeSubscript);
} else if (S == RemoveObjectForKeySel) {
Arg = 0;
}
}
// If argument is '0', report a warning.
if ((Arg != InvalidArgIndex))
warnIfNilArg(C, msg, Arg, Class, CanBeSubscript);
}
void NilArgChecker::checkPostStmt(const ObjCArrayLiteral *AL,
CheckerContext &C) const {
unsigned NumOfElements = AL->getNumElements();
for (unsigned i = 0; i < NumOfElements; ++i) {
warnIfNilExpr(AL->getElement(i), "Array element cannot be nil", C);
}
}
void NilArgChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
CheckerContext &C) const {
unsigned NumOfElements = DL->getNumElements();
for (unsigned i = 0; i < NumOfElements; ++i) {
ObjCDictionaryElement Element = DL->getKeyValueElement(i);
warnIfNilExpr(Element.Key, "Dictionary key cannot be nil", C);
warnIfNilExpr(Element.Value, "Dictionary value cannot be nil", C);
}
}
//===----------------------------------------------------------------------===//
// Error reporting.
//===----------------------------------------------------------------------===//
namespace {
class CFNumberCreateChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<APIMisuse> BT;
mutable IdentifierInfo* II;
public:
CFNumberCreateChecker() : II(nullptr) {}
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
private:
void EmitError(const TypedRegion* R, const Expr *Ex,
uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
};
} // end anonymous namespace
enum CFNumberType {
kCFNumberSInt8Type = 1,
kCFNumberSInt16Type = 2,
kCFNumberSInt32Type = 3,
kCFNumberSInt64Type = 4,
kCFNumberFloat32Type = 5,
kCFNumberFloat64Type = 6,
kCFNumberCharType = 7,
kCFNumberShortType = 8,
kCFNumberIntType = 9,
kCFNumberLongType = 10,
kCFNumberLongLongType = 11,
kCFNumberFloatType = 12,
kCFNumberDoubleType = 13,
kCFNumberCFIndexType = 14,
kCFNumberNSIntegerType = 15,
kCFNumberCGFloatType = 16
};
static Optional<uint64_t> GetCFNumberSize(ASTContext &Ctx, uint64_t i) {
static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
if (i < kCFNumberCharType)
return FixedSize[i-1];
QualType T;
switch (i) {
case kCFNumberCharType: T = Ctx.CharTy; break;
case kCFNumberShortType: T = Ctx.ShortTy; break;
case kCFNumberIntType: T = Ctx.IntTy; break;
case kCFNumberLongType: T = Ctx.LongTy; break;
case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
case kCFNumberFloatType: T = Ctx.FloatTy; break;
case kCFNumberDoubleType: T = Ctx.DoubleTy; break;
case kCFNumberCFIndexType:
case kCFNumberNSIntegerType:
case kCFNumberCGFloatType:
// FIXME: We need a way to map from names to Type*.
default:
return None;
}
return Ctx.getTypeSize(T);
}
#if 0
static const char* GetCFNumberTypeStr(uint64_t i) {
static const char* Names[] = {
"kCFNumberSInt8Type",
"kCFNumberSInt16Type",
"kCFNumberSInt32Type",
"kCFNumberSInt64Type",
"kCFNumberFloat32Type",
"kCFNumberFloat64Type",
"kCFNumberCharType",
"kCFNumberShortType",
"kCFNumberIntType",
"kCFNumberLongType",
"kCFNumberLongLongType",
"kCFNumberFloatType",
"kCFNumberDoubleType",
"kCFNumberCFIndexType",
"kCFNumberNSIntegerType",
"kCFNumberCGFloatType"
};
return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
}
#endif
void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
ASTContext &Ctx = C.getASTContext();
if (!II)
II = &Ctx.Idents.get("CFNumberCreate");
if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
return;
// Get the value of the "theType" argument.
const LocationContext *LCtx = C.getLocationContext();
SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>();
if (!V)
return;
uint64_t NumberKind = V->getValue().getLimitedValue();
Optional<uint64_t> OptTargetSize = GetCFNumberSize(Ctx, NumberKind);
// FIXME: In some cases we can emit an error.
if (!OptTargetSize)
return;
uint64_t TargetSize = *OptTargetSize;
// Look at the value of the integer being passed by reference. Essentially
// we want to catch cases where the value passed in is not equal to the
// size of the type being created.
SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);
// FIXME: Eventually we should handle arbitrary locations. We can do this
// by having an enhanced memory model that does low-level typing.
Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>();
if (!LV)
return;
const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
if (!R)
return;
QualType T = Ctx.getCanonicalType(R->getValueType());
// FIXME: If the pointee isn't an integer type, should we flag a warning?
// People can do weird stuff with pointers.
if (!T->isIntegralOrEnumerationType())
return;
uint64_t SourceSize = Ctx.getTypeSize(T);
// CHECK: is SourceSize == TargetSize
if (SourceSize == TargetSize)
return;
// Generate an error. Only generate a sink if 'SourceSize < TargetSize';
// otherwise generate a regular node.
//
// FIXME: We can actually create an abstract "CFNumber" object that has
// the bits initialized to the provided values.
//
if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink()
: C.addTransition()) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << (SourceSize == 8 ? "An " : "A ")
<< SourceSize << " bit integer is used to initialize a CFNumber "
"object that represents "
<< (TargetSize == 8 ? "an " : "a ")
<< TargetSize << " bit integer. ";
if (SourceSize < TargetSize)
os << (TargetSize - SourceSize)
<< " bits of the CFNumber value will be garbage." ;
else
os << (SourceSize - TargetSize)
<< " bits of the input integer will be lost.";
if (!BT)
BT.reset(new APIMisuse(this, "Bad use of CFNumberCreate"));
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addRange(CE->getArg(2)->getSourceRange());
C.emitReport(std::move(report));
}
}
//===----------------------------------------------------------------------===//
// CFRetain/CFRelease/CFMakeCollectable/CFAutorelease checking for null arguments.
//===----------------------------------------------------------------------===//
namespace {
class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<APIMisuse> BT;
mutable IdentifierInfo *Retain, *Release, *MakeCollectable, *Autorelease;
public:
CFRetainReleaseChecker()
: Retain(nullptr), Release(nullptr), MakeCollectable(nullptr),
Autorelease(nullptr) {}
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
};
} // end anonymous namespace
void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
// If the CallExpr doesn't have exactly 1 argument just give up checking.
if (CE->getNumArgs() != 1)
return;
ProgramStateRef state = C.getState();
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
if (!BT) {
ASTContext &Ctx = C.getASTContext();
Retain = &Ctx.Idents.get("CFRetain");
Release = &Ctx.Idents.get("CFRelease");
MakeCollectable = &Ctx.Idents.get("CFMakeCollectable");
Autorelease = &Ctx.Idents.get("CFAutorelease");
BT.reset(new APIMisuse(
this, "null passed to CF memory management function"));
}
// Check if we called CFRetain/CFRelease/CFMakeCollectable/CFAutorelease.
const IdentifierInfo *FuncII = FD->getIdentifier();
if (!(FuncII == Retain || FuncII == Release || FuncII == MakeCollectable ||
FuncII == Autorelease))
return;
// FIXME: The rest of this just checks that the argument is non-null.
// It should probably be refactored and combined with NonNullParamChecker.
// Get the argument's value.
const Expr *Arg = CE->getArg(0);
SVal ArgVal = state->getSVal(Arg, C.getLocationContext());
Optional<DefinedSVal> DefArgVal = ArgVal.getAs<DefinedSVal>();
if (!DefArgVal)
return;
// Get a NULL value.
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedSVal zero =
svalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
// Make an expression asserting that they're equal.
DefinedOrUnknownSVal ArgIsNull = svalBuilder.evalEQ(state, zero, *DefArgVal);
// Are they equal?
ProgramStateRef stateTrue, stateFalse;
std::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
if (stateTrue && !stateFalse) {
ExplodedNode *N = C.generateSink(stateTrue);
if (!N)
return;
const char *description;
if (FuncII == Retain)
description = "Null pointer argument in call to CFRetain";
else if (FuncII == Release)
description = "Null pointer argument in call to CFRelease";
else if (FuncII == MakeCollectable)
description = "Null pointer argument in call to CFMakeCollectable";
else if (FuncII == Autorelease)
description = "Null pointer argument in call to CFAutorelease";
else
llvm_unreachable("impossible case");
auto report = llvm::make_unique<BugReport>(*BT, description, N);
report->addRange(Arg->getSourceRange());
bugreporter::trackNullOrUndefValue(N, Arg, *report);
C.emitReport(std::move(report));
return;
}
// From here on, we know the argument is non-null.
C.addTransition(stateFalse);
}
//===----------------------------------------------------------------------===//
// Check for sending 'retain', 'release', or 'autorelease' directly to a Class.
//===----------------------------------------------------------------------===//
namespace {
class ClassReleaseChecker : public Checker<check::PreObjCMessage> {
mutable Selector releaseS;
mutable Selector retainS;
mutable Selector autoreleaseS;
mutable Selector drainS;
mutable std::unique_ptr<BugType> BT;
public:
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
};
}
void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
if (!BT) {
BT.reset(new APIMisuse(
this, "message incorrectly sent to class instead of class instance"));
ASTContext &Ctx = C.getASTContext();
releaseS = GetNullarySelector("release", Ctx);
retainS = GetNullarySelector("retain", Ctx);
autoreleaseS = GetNullarySelector("autorelease", Ctx);
drainS = GetNullarySelector("drain", Ctx);
}
if (msg.isInstanceMessage())
return;
const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
assert(Class);
Selector S = msg.getSelector();
if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
return;
if (ExplodedNode *N = C.addTransition()) {
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
os << "The '";
S.print(os);
os << "' message should be sent to instances "
"of class '" << Class->getName()
<< "' and not the class directly";
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addRange(msg.getSourceRange());
C.emitReport(std::move(report));
}
}
//===----------------------------------------------------------------------===//
// Check for passing non-Objective-C types to variadic methods that expect
// only Objective-C types.
//===----------------------------------------------------------------------===//
namespace {
class VariadicMethodTypeChecker : public Checker<check::PreObjCMessage> {
mutable Selector arrayWithObjectsS;
mutable Selector dictionaryWithObjectsAndKeysS;
mutable Selector setWithObjectsS;
mutable Selector orderedSetWithObjectsS;
mutable Selector initWithObjectsS;
mutable Selector initWithObjectsAndKeysS;
mutable std::unique_ptr<BugType> BT;
bool isVariadicMessage(const ObjCMethodCall &msg) const;
public:
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
};
}
/// isVariadicMessage - Returns whether the given message is a variadic message,
/// where all arguments must be Objective-C types.
bool
VariadicMethodTypeChecker::isVariadicMessage(const ObjCMethodCall &msg) const {
const ObjCMethodDecl *MD = msg.getDecl();
if (!MD || !MD->isVariadic() || isa<ObjCProtocolDecl>(MD->getDeclContext()))
return false;
Selector S = msg.getSelector();
if (msg.isInstanceMessage()) {
// FIXME: Ideally we'd look at the receiver interface here, but that's not
// useful for init, because alloc returns 'id'. In theory, this could lead
// to false positives, for example if there existed a class that had an
// initWithObjects: implementation that does accept non-Objective-C pointer
// types, but the chance of that happening is pretty small compared to the
// gains that this analysis gives.
const ObjCInterfaceDecl *Class = MD->getClassInterface();
switch (findKnownClass(Class)) {
case FC_NSArray:
case FC_NSOrderedSet:
case FC_NSSet:
return S == initWithObjectsS;
case FC_NSDictionary:
return S == initWithObjectsAndKeysS;
default:
return false;
}
} else {
const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
switch (findKnownClass(Class)) {
case FC_NSArray:
return S == arrayWithObjectsS;
case FC_NSOrderedSet:
return S == orderedSetWithObjectsS;
case FC_NSSet:
return S == setWithObjectsS;
case FC_NSDictionary:
return S == dictionaryWithObjectsAndKeysS;
default:
return false;
}
}
}
void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
if (!BT) {
BT.reset(new APIMisuse(this,
"Arguments passed to variadic method aren't all "
"Objective-C pointer types"));
ASTContext &Ctx = C.getASTContext();
arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
dictionaryWithObjectsAndKeysS =
GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);
orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx);
initWithObjectsS = GetUnarySelector("initWithObjects", Ctx);
initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx);
}
if (!isVariadicMessage(msg))
return;
// We are not interested in the selector arguments since they have
// well-defined types, so the compiler will issue a warning for them.
unsigned variadicArgsBegin = msg.getSelector().getNumArgs();
// We're not interested in the last argument since it has to be nil or the
// compiler would have issued a warning for it elsewhere.
unsigned variadicArgsEnd = msg.getNumArgs() - 1;
if (variadicArgsEnd <= variadicArgsBegin)
return;
// Verify that all arguments have Objective-C types.
Optional<ExplodedNode*> errorNode;
for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
QualType ArgTy = msg.getArgExpr(I)->getType();
if (ArgTy->isObjCObjectPointerType())
continue;
// Block pointers are treaded as Objective-C pointers.
if (ArgTy->isBlockPointerType())
continue;
// Ignore pointer constants.
if (msg.getArgSVal(I).getAs<loc::ConcreteInt>())
continue;
// Ignore pointer types annotated with 'NSObject' attribute.
if (C.getASTContext().isObjCNSObjectType(ArgTy))
continue;
// Ignore CF references, which can be toll-free bridged.
if (coreFoundation::isCFObjectRef(ArgTy))
continue;
// Generate only one error node to use for all bug reports.
if (!errorNode.hasValue())
errorNode = C.addTransition();
if (!errorNode.getValue())
continue;
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
StringRef TypeName = GetReceiverInterfaceName(msg);
if (!TypeName.empty())
os << "Argument to '" << TypeName << "' method '";
else
os << "Argument to method '";
msg.getSelector().print(os);
os << "' should be an Objective-C pointer type, not '";
ArgTy.print(os, C.getLangOpts());
os << "'";
auto R = llvm::make_unique<BugReport>(*BT, os.str(), errorNode.getValue());
R->addRange(msg.getArgSourceRange(I));
C.emitReport(std::move(R));
}
}
//===----------------------------------------------------------------------===//
// Improves the modeling of loops over Cocoa collections.
//===----------------------------------------------------------------------===//
// The map from container symbol to the container count symbol.
// We currently will remember the last countainer count symbol encountered.
REGISTER_MAP_WITH_PROGRAMSTATE(ContainerCountMap, SymbolRef, SymbolRef)
REGISTER_MAP_WITH_PROGRAMSTATE(ContainerNonEmptyMap, SymbolRef, bool)
namespace {
class ObjCLoopChecker
: public Checker<check::PostStmt<ObjCForCollectionStmt>,
check::PostObjCMessage,
check::DeadSymbols,
check::PointerEscape > {
mutable IdentifierInfo *CountSelectorII;
bool isCollectionCountMethod(const ObjCMethodCall &M,
CheckerContext &C) const;
public:
ObjCLoopChecker() : CountSelectorII(nullptr) {}
void checkPostStmt(const ObjCForCollectionStmt *FCS, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
ProgramStateRef checkPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const;
};
}
static bool isKnownNonNilCollectionType(QualType T) {
const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
if (!PT)
return false;
const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
if (!ID)
return false;
switch (findKnownClass(ID)) {
case FC_NSArray:
case FC_NSDictionary:
case FC_NSEnumerator:
case FC_NSOrderedSet:
case FC_NSSet:
return true;
default:
return false;
}
}
/// Assumes that the collection is non-nil.
///
/// If the collection is known to be nil, returns NULL to indicate an infeasible
/// path.
static ProgramStateRef checkCollectionNonNil(CheckerContext &C,
ProgramStateRef State,
const ObjCForCollectionStmt *FCS) {
if (!State)
return nullptr;
SVal CollectionVal = C.getSVal(FCS->getCollection());
Optional<DefinedSVal> KnownCollection = CollectionVal.getAs<DefinedSVal>();
if (!KnownCollection)
return State;
ProgramStateRef StNonNil, StNil;
std::tie(StNonNil, StNil) = State->assume(*KnownCollection);
if (StNil && !StNonNil) {
// The collection is nil. This path is infeasible.
return nullptr;
}
return StNonNil;
}
/// Assumes that the collection elements are non-nil.
///
/// This only applies if the collection is one of those known not to contain
/// nil values.
static ProgramStateRef checkElementNonNil(CheckerContext &C,
ProgramStateRef State,
const ObjCForCollectionStmt *FCS) {
if (!State)
return nullptr;
// See if the collection is one where we /know/ the elements are non-nil.
if (!isKnownNonNilCollectionType(FCS->getCollection()->getType()))
return State;
const LocationContext *LCtx = C.getLocationContext();
const Stmt *Element = FCS->getElement();
// FIXME: Copied from ExprEngineObjC.
Optional<Loc> ElementLoc;
if (const DeclStmt *DS = dyn_cast<DeclStmt>(Element)) {
const VarDecl *ElemDecl = cast<VarDecl>(DS->getSingleDecl());
assert(ElemDecl->getInit() == nullptr);
ElementLoc = State->getLValue(ElemDecl, LCtx);
} else {
ElementLoc = State->getSVal(Element, LCtx).getAs<Loc>();
}
if (!ElementLoc)
return State;
// Go ahead and assume the value is non-nil.
SVal Val = State->getSVal(*ElementLoc);
return State->assume(Val.castAs<DefinedOrUnknownSVal>(), true);
}
/// Returns NULL state if the collection is known to contain elements
/// (or is known not to contain elements if the Assumption parameter is false.)
static ProgramStateRef
assumeCollectionNonEmpty(CheckerContext &C, ProgramStateRef State,
SymbolRef CollectionS, bool Assumption) {
if (!State || !CollectionS)
return State;
const SymbolRef *CountS = State->get<ContainerCountMap>(CollectionS);
if (!CountS) {
const bool *KnownNonEmpty = State->get<ContainerNonEmptyMap>(CollectionS);
if (!KnownNonEmpty)
return State->set<ContainerNonEmptyMap>(CollectionS, Assumption);
return (Assumption == *KnownNonEmpty) ? State : nullptr;
}
SValBuilder &SvalBuilder = C.getSValBuilder();
SVal CountGreaterThanZeroVal =
SvalBuilder.evalBinOp(State, BO_GT,
nonloc::SymbolVal(*CountS),
SvalBuilder.makeIntVal(0, (*CountS)->getType()),
SvalBuilder.getConditionType());
Optional<DefinedSVal> CountGreaterThanZero =
CountGreaterThanZeroVal.getAs<DefinedSVal>();
if (!CountGreaterThanZero) {
// The SValBuilder cannot construct a valid SVal for this condition.
// This means we cannot properly reason about it.
return State;
}
return State->assume(*CountGreaterThanZero, Assumption);
}
static ProgramStateRef
assumeCollectionNonEmpty(CheckerContext &C, ProgramStateRef State,
const ObjCForCollectionStmt *FCS,
bool Assumption) {
if (!State)
return nullptr;
SymbolRef CollectionS =
State->getSVal(FCS->getCollection(), C.getLocationContext()).getAsSymbol();
return assumeCollectionNonEmpty(C, State, CollectionS, Assumption);
}
/// If the fist block edge is a back edge, we are reentering the loop.
static bool alreadyExecutedAtLeastOneLoopIteration(const ExplodedNode *N,
const ObjCForCollectionStmt *FCS) {
if (!N)
return false;
ProgramPoint P = N->getLocation();
if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
if (BE->getSrc()->getLoopTarget() == FCS)
return true;
return false;
}
// Keep looking for a block edge.
for (ExplodedNode::const_pred_iterator I = N->pred_begin(),
E = N->pred_end(); I != E; ++I) {
if (alreadyExecutedAtLeastOneLoopIteration(*I, FCS))
return true;
}
return false;
}
void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
// Check if this is the branch for the end of the loop.
SVal CollectionSentinel = C.getSVal(FCS);
if (CollectionSentinel.isZeroConstant()) {
if (!alreadyExecutedAtLeastOneLoopIteration(C.getPredecessor(), FCS))
State = assumeCollectionNonEmpty(C, State, FCS, /*Assumption*/false);
// Otherwise, this is a branch that goes through the loop body.
} else {
State = checkCollectionNonNil(C, State, FCS);
State = checkElementNonNil(C, State, FCS);
State = assumeCollectionNonEmpty(C, State, FCS, /*Assumption*/true);
}
if (!State)
C.generateSink();
else if (State != C.getState())
C.addTransition(State);
}
bool ObjCLoopChecker::isCollectionCountMethod(const ObjCMethodCall &M,
CheckerContext &C) const {
Selector S = M.getSelector();
// Initialize the identifiers on first use.
if (!CountSelectorII)
CountSelectorII = &C.getASTContext().Idents.get("count");
// If the method returns collection count, record the value.
if (S.isUnarySelector() &&
(S.getIdentifierInfoForSlot(0) == CountSelectorII))
return true;
return false;
}
void ObjCLoopChecker::checkPostObjCMessage(const ObjCMethodCall &M,
CheckerContext &C) const {
if (!M.isInstanceMessage())
return;
const ObjCInterfaceDecl *ClassID = M.getReceiverInterface();
if (!ClassID)
return;
FoundationClass Class = findKnownClass(ClassID);
if (Class != FC_NSDictionary &&
Class != FC_NSArray &&
Class != FC_NSSet &&
Class != FC_NSOrderedSet)
return;
SymbolRef ContainerS = M.getReceiverSVal().getAsSymbol();
if (!ContainerS)
return;
// If we are processing a call to "count", get the symbolic value returned by
// a call to "count" and add it to the map.
if (!isCollectionCountMethod(M, C))
return;
const Expr *MsgExpr = M.getOriginExpr();
SymbolRef CountS = C.getSVal(MsgExpr).getAsSymbol();
if (CountS) {
ProgramStateRef State = C.getState();
C.getSymbolManager().addSymbolDependency(ContainerS, CountS);
State = State->set<ContainerCountMap>(ContainerS, CountS);
if (const bool *NonEmpty = State->get<ContainerNonEmptyMap>(ContainerS)) {
State = State->remove<ContainerNonEmptyMap>(ContainerS);
State = assumeCollectionNonEmpty(C, State, ContainerS, *NonEmpty);
}
C.addTransition(State);
}
return;
}
static SymbolRef getMethodReceiverIfKnownImmutable(const CallEvent *Call) {
const ObjCMethodCall *Message = dyn_cast_or_null<ObjCMethodCall>(Call);
if (!Message)
return nullptr;
const ObjCMethodDecl *MD = Message->getDecl();
if (!MD)
return nullptr;
const ObjCInterfaceDecl *StaticClass;
if (isa<ObjCProtocolDecl>(MD->getDeclContext())) {
// We can't find out where the method was declared without doing more work.
// Instead, see if the receiver is statically typed as a known immutable
// collection.
StaticClass = Message->getOriginExpr()->getReceiverInterface();
} else {
StaticClass = MD->getClassInterface();
}
if (!StaticClass)
return nullptr;
switch (findKnownClass(StaticClass, /*IncludeSuper=*/false)) {
case FC_None:
return nullptr;
case FC_NSArray:
case FC_NSDictionary:
case FC_NSEnumerator:
case FC_NSNull:
case FC_NSOrderedSet:
case FC_NSSet:
case FC_NSString:
break;
}
return Message->getReceiverSVal().getAsSymbol();
}
ProgramStateRef
ObjCLoopChecker::checkPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const {
SymbolRef ImmutableReceiver = getMethodReceiverIfKnownImmutable(Call);
// Remove the invalidated symbols form the collection count map.
for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
E = Escaped.end();
I != E; ++I) {
SymbolRef Sym = *I;
// Don't invalidate this symbol's count if we know the method being called
// is declared on an immutable class. This isn't completely correct if the
// receiver is also passed as an argument, but in most uses of NSArray,
// NSDictionary, etc. this isn't likely to happen in a dangerous way.
if (Sym == ImmutableReceiver)
continue;
// The symbol escaped. Pessimistically, assume that the count could have
// changed.
State = State->remove<ContainerCountMap>(Sym);
State = State->remove<ContainerNonEmptyMap>(Sym);
}
return State;
}
void ObjCLoopChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
// Remove the dead symbols from the collection count map.
ContainerCountMapTy Tracked = State->get<ContainerCountMap>();
for (ContainerCountMapTy::iterator I = Tracked.begin(),
E = Tracked.end(); I != E; ++I) {
SymbolRef Sym = I->first;
if (SymReaper.isDead(Sym)) {
State = State->remove<ContainerCountMap>(Sym);
State = State->remove<ContainerNonEmptyMap>(Sym);
}
}
C.addTransition(State);
}
namespace {
/// \class ObjCNonNilReturnValueChecker
/// \brief The checker restricts the return values of APIs known to
/// never (or almost never) return 'nil'.
class ObjCNonNilReturnValueChecker
: public Checker<check::PostObjCMessage,
check::PostStmt<ObjCArrayLiteral>,
check::PostStmt<ObjCDictionaryLiteral>,
check::PostStmt<ObjCBoxedExpr> > {
mutable bool Initialized;
mutable Selector ObjectAtIndex;
mutable Selector ObjectAtIndexedSubscript;
mutable Selector NullSelector;
public:
ObjCNonNilReturnValueChecker() : Initialized(false) {}
ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr,
ProgramStateRef State,
CheckerContext &C) const;
void assumeExprIsNonNull(const Expr *E, CheckerContext &C) const {
C.addTransition(assumeExprIsNonNull(E, C.getState(), C));
}
void checkPostStmt(const ObjCArrayLiteral *E, CheckerContext &C) const {
assumeExprIsNonNull(E, C);
}
void checkPostStmt(const ObjCDictionaryLiteral *E, CheckerContext &C) const {
assumeExprIsNonNull(E, C);
}
void checkPostStmt(const ObjCBoxedExpr *E, CheckerContext &C) const {
assumeExprIsNonNull(E, C);
}
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
};
}
ProgramStateRef
ObjCNonNilReturnValueChecker::assumeExprIsNonNull(const Expr *NonNullExpr,
ProgramStateRef State,
CheckerContext &C) const {
SVal Val = State->getSVal(NonNullExpr, C.getLocationContext());
if (Optional<DefinedOrUnknownSVal> DV = Val.getAs<DefinedOrUnknownSVal>())
return State->assume(*DV, true);
return State;
}
void ObjCNonNilReturnValueChecker::checkPostObjCMessage(const ObjCMethodCall &M,
CheckerContext &C)
const {
ProgramStateRef State = C.getState();
if (!Initialized) {
ASTContext &Ctx = C.getASTContext();
ObjectAtIndex = GetUnarySelector("objectAtIndex", Ctx);
ObjectAtIndexedSubscript = GetUnarySelector("objectAtIndexedSubscript", Ctx);
NullSelector = GetNullarySelector("null", Ctx);
}
// Check the receiver type.
if (const ObjCInterfaceDecl *Interface = M.getReceiverInterface()) {
// Assume that object returned from '[self init]' or '[super init]' is not
// 'nil' if we are processing an inlined function/method.
//
// A defensive callee will (and should) check if the object returned by
// '[super init]' is 'nil' before doing it's own initialization. However,
// since 'nil' is rarely returned in practice, we should not warn when the
// caller to the defensive constructor uses the object in contexts where
// 'nil' is not accepted.
if (!C.inTopFrame() && M.getDecl() &&
M.getDecl()->getMethodFamily() == OMF_init &&
M.isReceiverSelfOrSuper()) {
State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
}
FoundationClass Cl = findKnownClass(Interface);
// Objects returned from
// [NSArray|NSOrderedSet]::[ObjectAtIndex|ObjectAtIndexedSubscript]
// are never 'nil'.
if (Cl == FC_NSArray || Cl == FC_NSOrderedSet) {
Selector Sel = M.getSelector();
if (Sel == ObjectAtIndex || Sel == ObjectAtIndexedSubscript) {
// Go ahead and assume the value is non-nil.
State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
}
}
// Objects returned from [NSNull null] are not nil.
if (Cl == FC_NSNull) {
if (M.getSelector() == NullSelector) {
// Go ahead and assume the value is non-nil.
State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
}
}
}
C.addTransition(State);
}
//===----------------------------------------------------------------------===//
// Check registration.
//===----------------------------------------------------------------------===//
void ento::registerNilArgChecker(CheckerManager &mgr) {
mgr.registerChecker<NilArgChecker>();
}
void ento::registerCFNumberCreateChecker(CheckerManager &mgr) {
mgr.registerChecker<CFNumberCreateChecker>();
}
void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<CFRetainReleaseChecker>();
}
void ento::registerClassReleaseChecker(CheckerManager &mgr) {
mgr.registerChecker<ClassReleaseChecker>();
}
void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
mgr.registerChecker<VariadicMethodTypeChecker>();
}
void ento::registerObjCLoopChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCLoopChecker>();
}
void
ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCNonNilReturnValueChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp | //==--AnalyzerStatsChecker.cpp - Analyzer visitation statistics --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This file reports various statistics about analyzer visitation.
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
#define DEBUG_TYPE "StatsChecker"
STATISTIC(NumBlocks,
"The # of blocks in top level functions");
STATISTIC(NumBlocksUnreachable,
"The # of unreachable blocks in analyzing top level functions");
namespace {
class AnalyzerStatsChecker : public Checker<check::EndAnalysis> {
public:
void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const;
};
}
void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
BugReporter &B,
ExprEngine &Eng) const {
const CFG *C = nullptr;
const SourceManager &SM = B.getSourceManager();
llvm::SmallPtrSet<const CFGBlock*, 256> reachable;
// Root node should have the location context of the top most function.
const ExplodedNode *GraphRoot = *G.roots_begin();
const LocationContext *LC = GraphRoot->getLocation().getLocationContext();
const Decl *D = LC->getDecl();
// Iterate over the exploded graph.
for (ExplodedGraph::node_iterator I = G.nodes_begin();
I != G.nodes_end(); ++I) {
const ProgramPoint &P = I->getLocation();
// Only check the coverage in the top level function (optimization).
if (D != P.getLocationContext()->getDecl())
continue;
if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
const CFGBlock *CB = BE->getBlock();
reachable.insert(CB);
}
}
// Get the CFG and the Decl of this block.
C = LC->getCFG();
unsigned total = 0, unreachable = 0;
// Find CFGBlocks that were not covered by any node
for (CFG::const_iterator I = C->begin(); I != C->end(); ++I) {
const CFGBlock *CB = *I;
++total;
// Check if the block is unreachable
if (!reachable.count(CB)) {
++unreachable;
}
}
// We never 'reach' the entry block, so correct the unreachable count
unreachable--;
// There is no BlockEntrance corresponding to the exit block as well, so
// assume it is reached as well.
unreachable--;
// Generate the warning string
SmallString<128> buf;
llvm::raw_svector_ostream output(buf);
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
if (!Loc.isValid())
return;
if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
const NamedDecl *ND = cast<NamedDecl>(D);
output << *ND;
}
else if (isa<BlockDecl>(D)) {
output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
}
NumBlocksUnreachable += unreachable;
NumBlocks += total;
std::string NameOfRootFunction = output.str();
output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
<< unreachable << " | Exhausted Block: "
<< (Eng.wasBlocksExhausted() ? "yes" : "no")
<< " | Empty WorkList: "
<< (Eng.hasEmptyWorkList() ? "yes" : "no");
B.EmitBasicReport(D, this, "Analyzer Statistics", "Internal Statistics",
output.str(), PathDiagnosticLocation(D, SM));
// Emit warning for each block we bailed out on.
typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
const CoreEngine &CE = Eng.getCoreEngine();
for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
E = CE.blocks_exhausted_end(); I != E; ++I) {
const BlockEdge &BE = I->first;
const CFGBlock *Exit = BE.getDst();
const CFGElement &CE = Exit->front();
if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
SmallString<128> bufI;
llvm::raw_svector_ostream outputI(bufI);
outputI << "(" << NameOfRootFunction << ")" <<
": The analyzer generated a sink at this point";
B.EmitBasicReport(
D, this, "Sink Point", "Internal Statistics", outputI.str(),
PathDiagnosticLocation::createBegin(CS->getStmt(), SM, LC));
}
}
}
void ento::registerAnalyzerStatsChecker(CheckerManager &mgr) {
mgr.registerChecker<AnalyzerStatsChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp | //==- CheckObjCDealloc.cpp - Check ObjC -dealloc implementation --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a CheckObjCDealloc, a checker that
// analyzes an Objective-C class's implementation to determine if it
// correctly implements -dealloc.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/LangOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
const ObjCPropertyDecl *PD,
Selector Release,
IdentifierInfo* SelfII,
ASTContext &Ctx) {
// [mMyIvar release]
if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
if (ME->getSelector() == Release)
if (ME->getInstanceReceiver())
if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
if (ObjCIvarRefExpr *E = dyn_cast<ObjCIvarRefExpr>(Receiver))
if (E->getDecl() == ID)
return true;
// [self setMyIvar:nil];
if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
if (ME->getInstanceReceiver())
if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
if (DeclRefExpr *E = dyn_cast<DeclRefExpr>(Receiver))
if (E->getDecl()->getIdentifier() == SelfII)
if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
ME->getNumArgs() == 1 &&
ME->getArg(0)->isNullPointerConstant(Ctx,
Expr::NPC_ValueDependentIsNull))
return true;
// self.myIvar = nil;
if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
if (BO->isAssignmentOp())
if (ObjCPropertyRefExpr *PRE =
dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
if (PRE->isExplicitProperty() && PRE->getExplicitProperty() == PD)
if (BO->getRHS()->isNullPointerConstant(Ctx,
Expr::NPC_ValueDependentIsNull)) {
// This is only a 'release' if the property kind is not
// 'assign'.
return PD->getSetterKind() != ObjCPropertyDecl::Assign;
}
// Recurse to children.
for (Stmt *SubStmt : S->children())
if (SubStmt && scan_ivar_release(SubStmt, ID, PD, Release, SelfII, Ctx))
return true;
return false;
}
static void checkObjCDealloc(const CheckerBase *Checker,
const ObjCImplementationDecl *D,
const LangOptions &LOpts, BugReporter &BR) {
assert (LOpts.getGC() != LangOptions::GCOnly);
ASTContext &Ctx = BR.getContext();
const ObjCInterfaceDecl *ID = D->getClassInterface();
// Does the class contain any ivars that are pointers (or id<...>)?
// If not, skip the check entirely.
// NOTE: This is motivated by PR 2517:
// http://llvm.org/bugs/show_bug.cgi?id=2517
bool containsPointerIvar = false;
for (const auto *Ivar : ID->ivars()) {
QualType T = Ivar->getType();
if (!T->isObjCObjectPointerType() ||
Ivar->hasAttr<IBOutletAttr>() || // Skip IBOutlets.
Ivar->hasAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
continue;
containsPointerIvar = true;
break;
}
if (!containsPointerIvar)
return;
// Determine if the class subclasses NSObject.
IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
for ( ; ID ; ID = ID->getSuperClass()) {
IdentifierInfo *II = ID->getIdentifier();
if (II == NSObjectII)
break;
// FIXME: For now, ignore classes that subclass SenTestCase, as these don't
// need to implement -dealloc. They implement tear down in another way,
// which we should try and catch later.
// http://llvm.org/bugs/show_bug.cgi?id=3187
if (II == SenTestCaseII)
return;
}
if (!ID)
return;
// Get the "dealloc" selector.
IdentifierInfo* II = &Ctx.Idents.get("dealloc");
Selector S = Ctx.Selectors.getSelector(0, &II);
const ObjCMethodDecl *MD = nullptr;
// Scan the instance methods for "dealloc".
for (const auto *I : D->instance_methods()) {
if (I->getSelector() == S) {
MD = I;
break;
}
}
PathDiagnosticLocation DLoc =
PathDiagnosticLocation::createBegin(D, BR.getSourceManager());
if (!MD) { // No dealloc found.
const char* name = LOpts.getGC() == LangOptions::NonGC
? "missing -dealloc"
: "missing -dealloc (Hybrid MM, non-GC)";
std::string buf;
llvm::raw_string_ostream os(buf);
os << "Objective-C class '" << *D << "' lacks a 'dealloc' instance method";
BR.EmitBasicReport(D, Checker, name, categories::CoreFoundationObjectiveC,
os.str(), DLoc);
return;
}
// Get the "release" selector.
IdentifierInfo* RII = &Ctx.Idents.get("release");
Selector RS = Ctx.Selectors.getSelector(0, &RII);
// Get the "self" identifier
IdentifierInfo* SelfII = &Ctx.Idents.get("self");
// Scan for missing and extra releases of ivars used by implementations
// of synthesized properties
for (const auto *I : D->property_impls()) {
// We can only check the synthesized properties
if (I->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
continue;
ObjCIvarDecl *ID = I->getPropertyIvarDecl();
if (!ID)
continue;
QualType T = ID->getType();
if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
continue;
const ObjCPropertyDecl *PD = I->getPropertyDecl();
if (!PD)
continue;
// ivars cannot be set via read-only properties, so we'll skip them
if (PD->isReadOnly())
continue;
// ivar must be released if and only if the kind of setter was not 'assign'
bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
!= requiresRelease) {
const char *name = nullptr;
std::string buf;
llvm::raw_string_ostream os(buf);
if (requiresRelease) {
name = LOpts.getGC() == LangOptions::NonGC
? "missing ivar release (leak)"
: "missing ivar release (Hybrid MM, non-GC)";
os << "The '" << *ID
<< "' instance variable was retained by a synthesized property but "
"wasn't released in 'dealloc'";
} else {
name = LOpts.getGC() == LangOptions::NonGC
? "extra ivar release (use-after-release)"
: "extra ivar release (Hybrid MM, non-GC)";
os << "The '" << *ID
<< "' instance variable was not retained by a synthesized property "
"but was released in 'dealloc'";
}
PathDiagnosticLocation SDLoc =
PathDiagnosticLocation::createBegin(I, BR.getSourceManager());
BR.EmitBasicReport(MD, Checker, name,
categories::CoreFoundationObjectiveC, os.str(), SDLoc);
}
}
}
//===----------------------------------------------------------------------===//
// ObjCDeallocChecker
//===----------------------------------------------------------------------===//
namespace {
class ObjCDeallocChecker : public Checker<
check::ASTDecl<ObjCImplementationDecl> > {
public:
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
BugReporter &BR) const {
if (mgr.getLangOpts().getGC() == LangOptions::GCOnly)
return;
checkObjCDealloc(this, cast<ObjCImplementationDecl>(D), mgr.getLangOpts(),
BR);
}
};
}
void ento::registerObjCDeallocChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCDeallocChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/SelectorExtras.h | //=== SelectorExtras.h - Helpers for checkers using selectors -----*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_SELECTOREXTRAS_H
#include "clang/AST/ASTContext.h"
#include <cstdarg>
namespace clang {
namespace ento {
static inline Selector getKeywordSelectorImpl(ASTContext &Ctx,
const char *First,
va_list argp) {
SmallVector<IdentifierInfo*, 10> II;
II.push_back(&Ctx.Idents.get(First));
while (const char *s = va_arg(argp, const char *))
II.push_back(&Ctx.Idents.get(s));
return Ctx.Selectors.getSelector(II.size(), &II[0]);
}
static inline Selector getKeywordSelector(ASTContext &Ctx, va_list argp) {
const char *First = va_arg(argp, const char *);
assert(First && "keyword selectors must have at least one argument");
return getKeywordSelectorImpl(Ctx, First, argp);
}
LLVM_END_WITH_NULL
static inline Selector getKeywordSelector(ASTContext &Ctx,
const char *First, ...) {
va_list argp;
va_start(argp, First);
Selector result = getKeywordSelectorImpl(Ctx, First, argp);
va_end(argp);
return result;
}
LLVM_END_WITH_NULL
static inline void lazyInitKeywordSelector(Selector &Sel, ASTContext &Ctx,
const char *First, ...) {
if (!Sel.isNull())
return;
va_list argp;
va_start(argp, First);
Sel = getKeywordSelectorImpl(Ctx, First, argp);
va_end(argp);
}
static inline void lazyInitNullarySelector(Selector &Sel, ASTContext &Ctx,
const char *Name) {
if (!Sel.isNull())
return;
Sel = GetNullarySelector(Name, Ctx);
}
} // end namespace ento
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp | //=== CastSizeChecker.cpp ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// CastSizeChecker checks when casting a malloc'ed symbolic region to type T,
// whether the size of the symbolic region is a multiple of the size of T.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class CastSizeChecker : public Checker< check::PreStmt<CastExpr> > {
mutable std::unique_ptr<BuiltinBug> BT;
public:
void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
};
}
/// Check if we are casting to a struct with a flexible array at the end.
/// \code
/// struct foo {
/// size_t len;
/// struct bar data[];
/// };
/// \endcode
/// or
/// \code
/// struct foo {
/// size_t len;
/// struct bar data[0];
/// }
/// \endcode
/// In these cases it is also valid to allocate size of struct foo + a multiple
/// of struct bar.
static bool evenFlexibleArraySize(ASTContext &Ctx, CharUnits RegionSize,
CharUnits TypeSize, QualType ToPointeeTy) {
const RecordType *RT = ToPointeeTy->getAs<RecordType>();
if (!RT)
return false;
const RecordDecl *RD = RT->getDecl();
RecordDecl::field_iterator Iter(RD->field_begin());
RecordDecl::field_iterator End(RD->field_end());
const FieldDecl *Last = nullptr;
for (; Iter != End; ++Iter)
Last = *Iter;
assert(Last && "empty structs should already be handled");
const Type *ElemType = Last->getType()->getArrayElementTypeNoTypeQual();
CharUnits FlexSize;
if (const ConstantArrayType *ArrayTy =
Ctx.getAsConstantArrayType(Last->getType())) {
FlexSize = Ctx.getTypeSizeInChars(ElemType);
if (ArrayTy->getSize() == 1 && TypeSize > FlexSize)
TypeSize -= FlexSize;
else if (ArrayTy->getSize() != 0)
return false;
} else if (RD->hasFlexibleArrayMember()) {
FlexSize = Ctx.getTypeSizeInChars(ElemType);
} else {
return false;
}
if (FlexSize.isZero())
return false;
CharUnits Left = RegionSize - TypeSize;
if (Left.isNegative())
return false;
if (Left % FlexSize == 0)
return true;
return false;
}
void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
const Expr *E = CE->getSubExpr();
ASTContext &Ctx = C.getASTContext();
QualType ToTy = Ctx.getCanonicalType(CE->getType());
const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
if (!ToPTy)
return;
QualType ToPointeeTy = ToPTy->getPointeeType();
// Only perform the check if 'ToPointeeTy' is a complete type.
if (ToPointeeTy->isIncompleteType())
return;
ProgramStateRef state = C.getState();
const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
if (!R)
return;
const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
if (!SR)
return;
SValBuilder &svalBuilder = C.getSValBuilder();
SVal extent = SR->getExtent(svalBuilder);
const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
if (!extentInt)
return;
CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
// Ignore void, and a few other un-sizeable types.
if (typeSize.isZero())
return;
if (regionSize % typeSize == 0)
return;
if (evenFlexibleArraySize(Ctx, regionSize, typeSize, ToPointeeTy))
return;
if (ExplodedNode *errorNode = C.generateSink()) {
if (!BT)
BT.reset(new BuiltinBug(this, "Cast region with wrong size.",
"Cast a region whose size is not a multiple"
" of the destination type size."));
auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), errorNode);
R->addRange(CE->getSourceRange());
C.emitReport(std::move(R));
}
}
void ento::registerCastSizeChecker(CheckerManager &mgr) {
mgr.registerChecker<CastSizeChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp | //=== MallocChecker.cpp - A malloc/free checker -------------------*- C++ -*--//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines malloc/free checker, which checks for potential memory
// leaks, double free, and use-after-free problems.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "InterCheckerAPI.h"
#include "clang/AST/Attr.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include <climits>
using namespace clang;
using namespace ento;
namespace {
// Used to check correspondence between allocators and deallocators.
enum AllocationFamily {
AF_None,
AF_Malloc,
AF_CXXNew,
AF_CXXNewArray,
AF_IfNameIndex,
AF_Alloca
};
class RefState {
enum Kind { // Reference to allocated memory.
Allocated,
// Reference to zero-allocated memory.
AllocatedOfSizeZero,
// Reference to released/freed memory.
Released,
// The responsibility for freeing resources has transferred from
// this reference. A relinquished symbol should not be freed.
Relinquished,
// We are no longer guaranteed to have observed all manipulations
// of this pointer/memory. For example, it could have been
// passed as a parameter to an opaque function.
Escaped
};
const Stmt *S;
unsigned K : 3; // Kind enum, but stored as a bitfield.
unsigned Family : 29; // Rest of 32-bit word, currently just an allocation
// family.
RefState(Kind k, const Stmt *s, unsigned family)
: S(s), K(k), Family(family) {
assert(family != AF_None);
}
public:
bool isAllocated() const { return K == Allocated; }
bool isAllocatedOfSizeZero() const { return K == AllocatedOfSizeZero; }
bool isReleased() const { return K == Released; }
bool isRelinquished() const { return K == Relinquished; }
bool isEscaped() const { return K == Escaped; }
AllocationFamily getAllocationFamily() const {
return (AllocationFamily)Family;
}
const Stmt *getStmt() const { return S; }
bool operator==(const RefState &X) const {
return K == X.K && S == X.S && Family == X.Family;
}
static RefState getAllocated(unsigned family, const Stmt *s) {
return RefState(Allocated, s, family);
}
static RefState getAllocatedOfSizeZero(const RefState *RS) {
return RefState(AllocatedOfSizeZero, RS->getStmt(),
RS->getAllocationFamily());
}
static RefState getReleased(unsigned family, const Stmt *s) {
return RefState(Released, s, family);
}
static RefState getRelinquished(unsigned family, const Stmt *s) {
return RefState(Relinquished, s, family);
}
static RefState getEscaped(const RefState *RS) {
return RefState(Escaped, RS->getStmt(), RS->getAllocationFamily());
}
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger(K);
ID.AddPointer(S);
ID.AddInteger(Family);
}
void dump(raw_ostream &OS) const {
switch (static_cast<Kind>(K)) {
#define CASE(ID) case ID: OS << #ID; break;
CASE(Allocated)
CASE(AllocatedOfSizeZero)
CASE(Released)
CASE(Relinquished)
CASE(Escaped)
}
}
LLVM_DUMP_METHOD void dump() const { dump(llvm::errs()); }
};
enum ReallocPairKind {
RPToBeFreedAfterFailure,
// The symbol has been freed when reallocation failed.
RPIsFreeOnFailure,
// The symbol does not need to be freed after reallocation fails.
RPDoNotTrackAfterFailure
};
/// \class ReallocPair
/// \brief Stores information about the symbol being reallocated by a call to
/// 'realloc' to allow modeling failed reallocation later in the path.
struct ReallocPair {
// \brief The symbol which realloc reallocated.
SymbolRef ReallocatedSym;
ReallocPairKind Kind;
ReallocPair(SymbolRef S, ReallocPairKind K) :
ReallocatedSym(S), Kind(K) {}
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger(Kind);
ID.AddPointer(ReallocatedSym);
}
bool operator==(const ReallocPair &X) const {
return ReallocatedSym == X.ReallocatedSym &&
Kind == X.Kind;
}
};
typedef std::pair<const ExplodedNode*, const MemRegion*> LeakInfo;
class MallocChecker : public Checker<check::DeadSymbols,
check::PointerEscape,
check::ConstPointerEscape,
check::PreStmt<ReturnStmt>,
check::PreCall,
check::PostStmt<CallExpr>,
check::PostStmt<CXXNewExpr>,
check::PreStmt<CXXDeleteExpr>,
check::PostStmt<BlockExpr>,
check::PostObjCMessage,
check::Location,
eval::Assume>
{
public:
MallocChecker()
: II_alloca(nullptr), II_malloc(nullptr), II_free(nullptr),
II_realloc(nullptr), II_calloc(nullptr), II_valloc(nullptr),
II_reallocf(nullptr), II_strndup(nullptr), II_strdup(nullptr),
II_kmalloc(nullptr), II_if_nameindex(nullptr),
II_if_freenameindex(nullptr) {}
/// In pessimistic mode, the checker assumes that it does not know which
/// functions might free the memory.
enum CheckKind {
CK_MallocChecker,
CK_NewDeleteChecker,
CK_NewDeleteLeaksChecker,
CK_MismatchedDeallocatorChecker,
CK_NumCheckKinds
};
enum class MemoryOperationKind {
MOK_Allocate,
MOK_Free,
MOK_Any
};
DefaultBool IsOptimistic;
DefaultBool ChecksEnabled[CK_NumCheckKinds];
CheckName CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const;
void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
void checkLocation(SVal l, bool isLoad, const Stmt *S,
CheckerContext &C) const;
ProgramStateRef checkPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const;
ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const;
void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const override;
private:
mutable std::unique_ptr<BugType> BT_DoubleFree[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_DoubleDelete;
mutable std::unique_ptr<BugType> BT_Leak[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_UseFree[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_BadFree[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_FreeAlloca[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_MismatchedDealloc;
mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BT_UseZerroAllocated[CK_NumCheckKinds];
mutable IdentifierInfo *II_alloca, *II_malloc, *II_free, *II_realloc,
*II_calloc, *II_valloc, *II_reallocf, *II_strndup,
*II_strdup, *II_kmalloc, *II_if_nameindex,
*II_if_freenameindex;
mutable Optional<uint64_t> KernelZeroFlagVal;
void initIdentifierInfo(ASTContext &C) const;
/// \brief Determine family of a deallocation expression.
AllocationFamily getAllocationFamily(CheckerContext &C, const Stmt *S) const;
/// \brief Print names of allocators and deallocators.
///
/// \returns true on success.
bool printAllocDeallocName(raw_ostream &os, CheckerContext &C,
const Expr *E) const;
/// \brief Print expected name of an allocator based on the deallocator's
/// family derived from the DeallocExpr.
void printExpectedAllocName(raw_ostream &os, CheckerContext &C,
const Expr *DeallocExpr) const;
/// \brief Print expected name of a deallocator based on the allocator's
/// family.
void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) const;
///@{
/// Check if this is one of the functions which can allocate/reallocate memory
/// pointed to by one of its arguments.
bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
bool isCMemFunction(const FunctionDecl *FD,
ASTContext &C,
AllocationFamily Family,
MemoryOperationKind MemKind) const;
bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
///@}
/// \brief Perform a zero-allocation check.
ProgramStateRef ProcessZeroAllocation(CheckerContext &C, const Expr *E,
const unsigned AllocationSizeArg,
ProgramStateRef State) const;
ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
const CallExpr *CE,
const OwnershipAttr* Att,
ProgramStateRef State) const;
static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
const Expr *SizeEx, SVal Init,
ProgramStateRef State,
AllocationFamily Family = AF_Malloc);
static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
SVal SizeEx, SVal Init,
ProgramStateRef State,
AllocationFamily Family = AF_Malloc);
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
llvm::Optional<ProgramStateRef>
performKernelMalloc(const CallExpr *CE, CheckerContext &C,
const ProgramStateRef &State) const;
/// Update the RefState to reflect the new memory allocation.
static ProgramStateRef
MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
AllocationFamily Family = AF_Malloc);
ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
const OwnershipAttr* Att,
ProgramStateRef State) const;
ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
ProgramStateRef state, unsigned Num,
bool Hold,
bool &ReleasedAllocated,
bool ReturnsNullOnFailure = false) const;
ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *Arg,
const Expr *ParentExpr,
ProgramStateRef State,
bool Hold,
bool &ReleasedAllocated,
bool ReturnsNullOnFailure = false) const;
ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
bool FreesMemOnFailure,
ProgramStateRef State) const;
static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
ProgramStateRef State);
///\brief Check if the memory associated with this symbol was released.
bool isReleased(SymbolRef Sym, CheckerContext &C) const;
bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
void checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const;
bool checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const;
/// Check if the function is known free memory, or if it is
/// "interesting" and should be modeled explicitly.
///
/// \param [out] EscapingSymbol A function might not free memory in general,
/// but could be known to free a particular symbol. In this case, false is
/// returned and the single escaping symbol is returned through the out
/// parameter.
///
/// We assume that pointers do not escape through calls to system functions
/// not handled by this checker.
bool mayFreeAnyEscapedMemoryOrIsModeledExplicitly(const CallEvent *Call,
ProgramStateRef State,
SymbolRef &EscapingSymbol) const;
// Implementation of the checkPointerEscape callabcks.
ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind,
bool(*CheckRefState)(const RefState*)) const;
///@{
/// Tells if a given family/call/symbol is tracked by the current checker.
/// Sets CheckKind to the kind of the checker responsible for this
/// family/call/symbol.
Optional<CheckKind> getCheckIfTracked(AllocationFamily Family,
bool IsALeakCheck = false) const;
Optional<CheckKind> getCheckIfTracked(CheckerContext &C,
const Stmt *AllocDeallocStmt,
bool IsALeakCheck = false) const;
Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck = false) const;
///@}
static bool SummarizeValue(raw_ostream &os, SVal V);
static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
const Expr *DeallocExpr) const;
void ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const;
void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range,
const Expr *DeallocExpr, const RefState *RS,
SymbolRef Sym, bool OwnershipTransferred) const;
void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range,
const Expr *DeallocExpr,
const Expr *AllocExpr = nullptr) const;
void ReportUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const;
void ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
SymbolRef Sym, SymbolRef PrevSym) const;
void ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const;
void ReportUseZeroAllocated(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const;
/// Find the location of the allocation for Sym on the path leading to the
/// exploded node N.
LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
CheckerContext &C) const;
void reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
/// The bug visitor which allows us to print extra diagnostics along the
/// BugReport path. For example, showing the allocation site of the leaked
/// region.
class MallocBugVisitor : public BugReporterVisitorImpl<MallocBugVisitor> {
protected:
enum NotificationMode {
Normal,
ReallocationFailed
};
// The allocated region symbol tracked by the main analysis.
SymbolRef Sym;
// The mode we are in, i.e. what kind of diagnostics will be emitted.
NotificationMode Mode;
// A symbol from when the primary region should have been reallocated.
SymbolRef FailedReallocSymbol;
bool IsLeak;
public:
MallocBugVisitor(SymbolRef S, bool isLeak = false)
: Sym(S), Mode(Normal), FailedReallocSymbol(nullptr), IsLeak(isLeak) {}
~MallocBugVisitor() override {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int X = 0;
ID.AddPointer(&X);
ID.AddPointer(Sym);
}
inline bool isAllocated(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// Did not track -> allocated. Other state (released) -> allocated.
return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXNewExpr>(Stmt)) &&
(S && (S->isAllocated() || S->isAllocatedOfSizeZero())) &&
(!SPrev || !(SPrev->isAllocated() ||
SPrev->isAllocatedOfSizeZero())));
}
inline bool isReleased(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// Did not track -> released. Other state (allocated) -> released.
return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXDeleteExpr>(Stmt)) &&
(S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
}
inline bool isRelinquished(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// Did not track -> relinquished. Other state (allocated) -> relinquished.
return (Stmt && (isa<CallExpr>(Stmt) || isa<ObjCMessageExpr>(Stmt) ||
isa<ObjCPropertyRefExpr>(Stmt)) &&
(S && S->isRelinquished()) &&
(!SPrev || !SPrev->isRelinquished()));
}
inline bool isReallocFailedCheck(const RefState *S, const RefState *SPrev,
const Stmt *Stmt) {
// If the expression is not a call, and the state change is
// released -> allocated, it must be the realloc return value
// check. If we have to handle more cases here, it might be cleaner just
// to track this extra bit in the state itself.
return ((!Stmt || !isa<CallExpr>(Stmt)) &&
(S && (S->isAllocated() || S->isAllocatedOfSizeZero())) &&
(SPrev && !(SPrev->isAllocated() ||
SPrev->isAllocatedOfSizeZero())));
}
PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
std::unique_ptr<PathDiagnosticPiece>
getEndPath(BugReporterContext &BRC, const ExplodedNode *EndPathNode,
BugReport &BR) override {
if (!IsLeak)
return nullptr;
PathDiagnosticLocation L =
PathDiagnosticLocation::createEndOfPath(EndPathNode,
BRC.getSourceManager());
// Do not add the statement itself as a range in case of leak.
return llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(),
false);
}
private:
class StackHintGeneratorForReallocationFailed
: public StackHintGeneratorForSymbol {
public:
StackHintGeneratorForReallocationFailed(SymbolRef S, StringRef M)
: StackHintGeneratorForSymbol(S, M) {}
std::string getMessageForArg(const Expr *ArgE,
unsigned ArgIndex) override {
// Printed parameters start at 1, not 0.
++ArgIndex;
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
os << "Reallocation of " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
<< " parameter failed";
return os.str();
}
std::string getMessageForReturn(const CallExpr *CallExpr) override {
return "Reallocation of returned value failed";
}
};
};
};
} // end anonymous namespace
REGISTER_MAP_WITH_PROGRAMSTATE(RegionState, SymbolRef, RefState)
REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair)
// A map from the freed symbol to the symbol representing the return value of
// the free function.
REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef)
namespace {
class StopTrackingCallback : public SymbolVisitor {
ProgramStateRef state;
public:
StopTrackingCallback(ProgramStateRef st) : state(st) {}
ProgramStateRef getState() const { return state; }
bool VisitSymbol(SymbolRef sym) override {
state = state->remove<RegionState>(sym);
return true;
}
};
} // end anonymous namespace
void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
if (II_malloc)
return;
II_alloca = &Ctx.Idents.get("alloca");
II_malloc = &Ctx.Idents.get("malloc");
II_free = &Ctx.Idents.get("free");
II_realloc = &Ctx.Idents.get("realloc");
II_reallocf = &Ctx.Idents.get("reallocf");
II_calloc = &Ctx.Idents.get("calloc");
II_valloc = &Ctx.Idents.get("valloc");
II_strdup = &Ctx.Idents.get("strdup");
II_strndup = &Ctx.Idents.get("strndup");
II_kmalloc = &Ctx.Idents.get("kmalloc");
II_if_nameindex = &Ctx.Idents.get("if_nameindex");
II_if_freenameindex = &Ctx.Idents.get("if_freenameindex");
}
bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
if (isCMemFunction(FD, C, AF_Malloc, MemoryOperationKind::MOK_Any))
return true;
if (isCMemFunction(FD, C, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
return true;
if (isCMemFunction(FD, C, AF_Alloca, MemoryOperationKind::MOK_Any))
return true;
if (isStandardNewDelete(FD, C))
return true;
return false;
}
bool MallocChecker::isCMemFunction(const FunctionDecl *FD,
ASTContext &C,
AllocationFamily Family,
MemoryOperationKind MemKind) const {
if (!FD)
return false;
bool CheckFree = (MemKind == MemoryOperationKind::MOK_Any ||
MemKind == MemoryOperationKind::MOK_Free);
bool CheckAlloc = (MemKind == MemoryOperationKind::MOK_Any ||
MemKind == MemoryOperationKind::MOK_Allocate);
if (FD->getKind() == Decl::Function) {
const IdentifierInfo *FunI = FD->getIdentifier();
initIdentifierInfo(C);
if (Family == AF_Malloc && CheckFree) {
if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf)
return true;
}
if (Family == AF_Malloc && CheckAlloc) {
if (FunI == II_malloc || FunI == II_realloc || FunI == II_reallocf ||
FunI == II_calloc || FunI == II_valloc || FunI == II_strdup ||
FunI == II_strndup || FunI == II_kmalloc)
return true;
}
if (Family == AF_IfNameIndex && CheckFree) {
if (FunI == II_if_freenameindex)
return true;
}
if (Family == AF_IfNameIndex && CheckAlloc) {
if (FunI == II_if_nameindex)
return true;
}
if (Family == AF_Alloca && CheckAlloc) {
if (FunI == II_alloca)
return true;
}
}
if (Family != AF_Malloc)
return false;
if (IsOptimistic && FD->hasAttrs()) {
for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind();
if(OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds) {
if (CheckFree)
return true;
} else if (OwnKind == OwnershipAttr::Returns) {
if (CheckAlloc)
return true;
}
}
}
return false;
}
// Tells if the callee is one of the following:
// 1) A global non-placement new/delete operator function.
// 2) A global placement operator function with the single placement argument
// of type std::nothrow_t.
bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
ASTContext &C) const {
if (!FD)
return false;
OverloadedOperatorKind Kind = FD->getOverloadedOperator();
if (Kind != OO_New && Kind != OO_Array_New &&
Kind != OO_Delete && Kind != OO_Array_Delete)
return false;
// Skip all operator new/delete methods.
if (isa<CXXMethodDecl>(FD))
return false;
// Return true if tested operator is a standard placement nothrow operator.
if (FD->getNumParams() == 2) {
QualType T = FD->getParamDecl(1)->getType();
if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
return II->getName().equals("nothrow_t");
}
// Skip placement operators.
if (FD->getNumParams() != 1 || FD->isVariadic())
return false;
// One of the standard new/new[]/delete/delete[] non-placement operators.
return true;
}
llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
const CallExpr *CE, CheckerContext &C, const ProgramStateRef &State) const {
// 3-argument malloc(), as commonly used in {Free,Net,Open}BSD Kernels:
//
// void *malloc(unsigned long size, struct malloc_type *mtp, int flags);
//
// One of the possible flags is M_ZERO, which means 'give me back an
// allocation which is already zeroed', like calloc.
// 2-argument kmalloc(), as used in the Linux kernel:
//
// void *kmalloc(size_t size, gfp_t flags);
//
// Has the similar flag value __GFP_ZERO.
// This logic is largely cloned from O_CREAT in UnixAPIChecker, maybe some
// code could be shared.
ASTContext &Ctx = C.getASTContext();
llvm::Triple::OSType OS = Ctx.getTargetInfo().getTriple().getOS();
if (!KernelZeroFlagVal.hasValue()) {
if (OS == llvm::Triple::FreeBSD)
KernelZeroFlagVal = 0x0100;
else if (OS == llvm::Triple::NetBSD)
KernelZeroFlagVal = 0x0002;
else if (OS == llvm::Triple::OpenBSD)
KernelZeroFlagVal = 0x0008;
else if (OS == llvm::Triple::Linux)
// __GFP_ZERO
KernelZeroFlagVal = 0x8000;
else
// FIXME: We need a more general way of getting the M_ZERO value.
// See also: O_CREAT in UnixAPIChecker.cpp.
// Fall back to normal malloc behavior on platforms where we don't
// know M_ZERO.
return None;
}
// We treat the last argument as the flags argument, and callers fall-back to
// normal malloc on a None return. This works for the FreeBSD kernel malloc
// as well as Linux kmalloc.
if (CE->getNumArgs() < 2)
return None;
const Expr *FlagsEx = CE->getArg(CE->getNumArgs() - 1);
const SVal V = State->getSVal(FlagsEx, C.getLocationContext());
if (!V.getAs<NonLoc>()) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
return None;
}
NonLoc Flags = V.castAs<NonLoc>();
NonLoc ZeroFlag = C.getSValBuilder()
.makeIntVal(KernelZeroFlagVal.getValue(), FlagsEx->getType())
.castAs<NonLoc>();
SVal MaskedFlagsUC = C.getSValBuilder().evalBinOpNN(State, BO_And,
Flags, ZeroFlag,
FlagsEx->getType());
if (MaskedFlagsUC.isUnknownOrUndef())
return None;
DefinedSVal MaskedFlags = MaskedFlagsUC.castAs<DefinedSVal>();
// Check if maskedFlags is non-zero.
ProgramStateRef TrueState, FalseState;
std::tie(TrueState, FalseState) = State->assume(MaskedFlags);
// If M_ZERO is set, treat this like calloc (initialized).
if (TrueState && !FalseState) {
SVal ZeroVal = C.getSValBuilder().makeZeroVal(Ctx.CharTy);
return MallocMemAux(C, CE, CE->getArg(0), ZeroVal, TrueState);
}
return None;
}
void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
if (C.wasInlined)
return;
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
ProgramStateRef State = C.getState();
bool ReleasedAllocatedMemory = false;
if (FD->getKind() == Decl::Function) {
initIdentifierInfo(C.getASTContext());
IdentifierInfo *FunI = FD->getIdentifier();
if (FunI == II_malloc) {
if (CE->getNumArgs() < 1)
return;
if (CE->getNumArgs() < 3) {
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
if (CE->getNumArgs() == 1)
State = ProcessZeroAllocation(C, CE, 0, State);
} else if (CE->getNumArgs() == 3) {
llvm::Optional<ProgramStateRef> MaybeState =
performKernelMalloc(CE, C, State);
if (MaybeState.hasValue())
State = MaybeState.getValue();
else
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
}
} else if (FunI == II_kmalloc) {
llvm::Optional<ProgramStateRef> MaybeState =
performKernelMalloc(CE, C, State);
if (MaybeState.hasValue())
State = MaybeState.getValue();
else
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
} else if (FunI == II_valloc) {
if (CE->getNumArgs() < 1)
return;
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
State = ProcessZeroAllocation(C, CE, 0, State);
} else if (FunI == II_realloc) {
State = ReallocMem(C, CE, false, State);
State = ProcessZeroAllocation(C, CE, 1, State);
} else if (FunI == II_reallocf) {
State = ReallocMem(C, CE, true, State);
State = ProcessZeroAllocation(C, CE, 1, State);
} else if (FunI == II_calloc) {
State = CallocMem(C, CE, State);
State = ProcessZeroAllocation(C, CE, 0, State);
State = ProcessZeroAllocation(C, CE, 1, State);
} else if (FunI == II_free) {
State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
} else if (FunI == II_strdup) {
State = MallocUpdateRefState(C, CE, State);
} else if (FunI == II_strndup) {
State = MallocUpdateRefState(C, CE, State);
} else if (FunI == II_alloca) {
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
AF_Alloca);
State = ProcessZeroAllocation(C, CE, 0, State);
} else if (isStandardNewDelete(FD, C.getASTContext())) {
// Process direct calls to operator new/new[]/delete/delete[] functions
// as distinct from new/new[]/delete/delete[] expressions that are
// processed by the checkPostStmt callbacks for CXXNewExpr and
// CXXDeleteExpr.
OverloadedOperatorKind K = FD->getOverloadedOperator();
if (K == OO_New) {
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
AF_CXXNew);
State = ProcessZeroAllocation(C, CE, 0, State);
}
else if (K == OO_Array_New) {
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
AF_CXXNewArray);
State = ProcessZeroAllocation(C, CE, 0, State);
}
else if (K == OO_Delete || K == OO_Array_Delete)
State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
else
llvm_unreachable("not a new/delete operator");
} else if (FunI == II_if_nameindex) {
// Should we model this differently? We can allocate a fixed number of
// elements with zeros in the last one.
State = MallocMemAux(C, CE, UnknownVal(), UnknownVal(), State,
AF_IfNameIndex);
} else if (FunI == II_if_freenameindex) {
State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
}
}
if (IsOptimistic || ChecksEnabled[CK_MismatchedDeallocatorChecker]) {
// Check all the attributes, if there are any.
// There can be multiple of these attributes.
if (FD->hasAttrs())
for (const auto *I : FD->specific_attrs<OwnershipAttr>()) {
switch (I->getOwnKind()) {
case OwnershipAttr::Returns:
State = MallocMemReturnsAttr(C, CE, I, State);
break;
case OwnershipAttr::Takes:
case OwnershipAttr::Holds:
State = FreeMemAttr(C, CE, I, State);
break;
}
}
}
C.addTransition(State);
}
// Performs a 0-sized allocations check.
ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C,
const Expr *E,
const unsigned AllocationSizeArg,
ProgramStateRef State) const {
if (!State)
return nullptr;
const Expr *Arg = nullptr;
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
Arg = CE->getArg(AllocationSizeArg);
}
else if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
if (NE->isArray())
Arg = NE->getArraySize();
else
return State;
}
else
llvm_unreachable("not a CallExpr or CXXNewExpr");
assert(Arg);
Optional<DefinedSVal> DefArgVal =
State->getSVal(Arg, C.getLocationContext()).getAs<DefinedSVal>();
if (!DefArgVal)
return State;
// Check if the allocation size is 0.
ProgramStateRef TrueState, FalseState;
SValBuilder &SvalBuilder = C.getSValBuilder();
DefinedSVal Zero =
SvalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
std::tie(TrueState, FalseState) =
State->assume(SvalBuilder.evalEQ(State, *DefArgVal, Zero));
if (TrueState && !FalseState) {
SVal retVal = State->getSVal(E, C.getLocationContext());
SymbolRef Sym = retVal.getAsLocSymbol();
if (!Sym)
return State;
const RefState *RS = State->get<RegionState>(Sym);
if (!RS)
return State; // TODO: change to assert(RS); after realloc() will
// guarantee have a RegionState attached.
if (!RS->isAllocated())
return State;
return TrueState->set<RegionState>(Sym,
RefState::getAllocatedOfSizeZero(RS));
}
// Assume the value is non-zero going forward.
assert(FalseState);
return FalseState;
}
static QualType getDeepPointeeType(QualType T) {
QualType Result = T, PointeeType = T->getPointeeType();
while (!PointeeType.isNull()) {
Result = PointeeType;
PointeeType = PointeeType->getPointeeType();
}
return Result;
}
static bool treatUnusedNewEscaped(const CXXNewExpr *NE) {
const CXXConstructExpr *ConstructE = NE->getConstructExpr();
if (!ConstructE)
return false;
if (!NE->getAllocatedType()->getAsCXXRecordDecl())
return false;
const CXXConstructorDecl *CtorD = ConstructE->getConstructor();
// Iterate over the constructor parameters.
for (const auto *CtorParam : CtorD->params()) {
QualType CtorParamPointeeT = CtorParam->getType()->getPointeeType();
if (CtorParamPointeeT.isNull())
continue;
CtorParamPointeeT = getDeepPointeeType(CtorParamPointeeT);
if (CtorParamPointeeT->getAsCXXRecordDecl())
return true;
}
return false;
}
void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
CheckerContext &C) const {
if (NE->getNumPlacementArgs())
for (CXXNewExpr::const_arg_iterator I = NE->placement_arg_begin(),
E = NE->placement_arg_end(); I != E; ++I)
if (SymbolRef Sym = C.getSVal(*I).getAsSymbol())
checkUseAfterFree(Sym, C, *I);
if (!isStandardNewDelete(NE->getOperatorNew(), C.getASTContext()))
return;
ParentMap &PM = C.getLocationContext()->getParentMap();
if (!PM.isConsumedExpr(NE) && treatUnusedNewEscaped(NE))
return;
ProgramStateRef State = C.getState();
// The return value from operator new is bound to a specified initialization
// value (if any) and we don't want to loose this value. So we call
// MallocUpdateRefState() instead of MallocMemAux() which breakes the
// existing binding.
State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
: AF_CXXNew);
State = ProcessZeroAllocation(C, NE, 0, State);
C.addTransition(State);
}
void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
CheckerContext &C) const {
if (!ChecksEnabled[CK_NewDeleteChecker])
if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
checkUseAfterFree(Sym, C, DE->getArgument());
if (!isStandardNewDelete(DE->getOperatorDelete(), C.getASTContext()))
return;
ProgramStateRef State = C.getState();
bool ReleasedAllocated;
State = FreeMemAux(C, DE->getArgument(), DE, State,
/*Hold*/false, ReleasedAllocated);
C.addTransition(State);
}
static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
// If the first selector piece is one of the names below, assume that the
// object takes ownership of the memory, promising to eventually deallocate it
// with free().
// Ex: [NSData dataWithBytesNoCopy:bytes length:10];
// (...unless a 'freeWhenDone' parameter is false, but that's checked later.)
StringRef FirstSlot = Call.getSelector().getNameForSlot(0);
if (FirstSlot == "dataWithBytesNoCopy" ||
FirstSlot == "initWithBytesNoCopy" ||
FirstSlot == "initWithCharactersNoCopy")
return true;
return false;
}
static Optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
Selector S = Call.getSelector();
// FIXME: We should not rely on fully-constrained symbols being folded.
for (unsigned i = 1; i < S.getNumArgs(); ++i)
if (S.getNameForSlot(i).equals("freeWhenDone"))
return !Call.getArgSVal(i).isZeroConstant();
return None;
}
void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
CheckerContext &C) const {
if (C.wasInlined)
return;
if (!isKnownDeallocObjCMethodName(Call))
return;
if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(Call))
if (!*FreeWhenDone)
return;
bool ReleasedAllocatedMemory;
ProgramStateRef State = FreeMemAux(C, Call.getArgExpr(0),
Call.getOriginExpr(), C.getState(),
/*Hold=*/true, ReleasedAllocatedMemory,
/*RetNullOnFailure=*/true);
C.addTransition(State);
}
ProgramStateRef
MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE,
const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
if (Att->getModule() != II_malloc)
return nullptr;
OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
if (I != E) {
return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), State);
}
return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), State);
}
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const CallExpr *CE,
const Expr *SizeEx, SVal Init,
ProgramStateRef State,
AllocationFamily Family) {
if (!State)
return nullptr;
return MallocMemAux(C, CE, State->getSVal(SizeEx, C.getLocationContext()),
Init, State, Family);
}
ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
const CallExpr *CE,
SVal Size, SVal Init,
ProgramStateRef State,
AllocationFamily Family) {
if (!State)
return nullptr;
// We expect the malloc functions to return a pointer.
if (!Loc::isLocType(CE->getType()))
return nullptr;
// Bind the return value to the symbolic value from the heap region.
// TODO: We could rewrite post visit to eval call; 'malloc' does not have
// side effects other than what we model here.
unsigned Count = C.blockCount();
SValBuilder &svalBuilder = C.getSValBuilder();
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
DefinedSVal RetVal = svalBuilder.getConjuredHeapSymbolVal(CE, LCtx, Count)
.castAs<DefinedSVal>();
State = State->BindExpr(CE, C.getLocationContext(), RetVal);
// Fill the region with the initialization value.
State = State->bindDefault(RetVal, Init);
// Set the region's extent equal to the Size parameter.
const SymbolicRegion *R =
dyn_cast_or_null<SymbolicRegion>(RetVal.getAsRegion());
if (!R)
return nullptr;
if (Optional<DefinedOrUnknownSVal> DefinedSize =
Size.getAs<DefinedOrUnknownSVal>()) {
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
DefinedOrUnknownSVal extentMatchesSize =
svalBuilder.evalEQ(State, Extent, *DefinedSize);
State = State->assume(extentMatchesSize, true);
assert(State);
}
return MallocUpdateRefState(C, CE, State, Family);
}
ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
const Expr *E,
ProgramStateRef State,
AllocationFamily Family) {
if (!State)
return nullptr;
// Get the return value.
SVal retVal = State->getSVal(E, C.getLocationContext());
// We expect the malloc functions to return a pointer.
if (!retVal.getAs<Loc>())
return nullptr;
SymbolRef Sym = retVal.getAsLocSymbol();
assert(Sym);
// Set the symbol's state to Allocated.
return State->set<RegionState>(Sym, RefState::getAllocated(Family, E));
}
ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
const CallExpr *CE,
const OwnershipAttr *Att,
ProgramStateRef State) const {
if (!State)
return nullptr;
if (Att->getModule() != II_malloc)
return nullptr;
bool ReleasedAllocated = false;
for (const auto &Arg : Att->args()) {
ProgramStateRef StateI = FreeMemAux(C, CE, State, Arg,
Att->getOwnKind() == OwnershipAttr::Holds,
ReleasedAllocated);
if (StateI)
State = StateI;
}
return State;
}
ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
const CallExpr *CE,
ProgramStateRef State,
unsigned Num,
bool Hold,
bool &ReleasedAllocated,
bool ReturnsNullOnFailure) const {
if (!State)
return nullptr;
if (CE->getNumArgs() < (Num + 1))
return nullptr;
return FreeMemAux(C, CE->getArg(Num), CE, State, Hold,
ReleasedAllocated, ReturnsNullOnFailure);
}
/// Checks if the previous call to free on the given symbol failed - if free
/// failed, returns true. Also, returns the corresponding return value symbol.
static bool didPreviousFreeFail(ProgramStateRef State,
SymbolRef Sym, SymbolRef &RetStatusSymbol) {
const SymbolRef *Ret = State->get<FreeReturnValue>(Sym);
if (Ret) {
assert(*Ret && "We should not store the null return symbol");
ConstraintManager &CMgr = State->getConstraintManager();
ConditionTruthVal FreeFailed = CMgr.isNull(State, *Ret);
RetStatusSymbol = *Ret;
return FreeFailed.isConstrainedTrue();
}
return false;
}
AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C,
const Stmt *S) const {
if (!S)
return AF_None;
if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
ASTContext &Ctx = C.getASTContext();
if (isCMemFunction(FD, Ctx, AF_Malloc, MemoryOperationKind::MOK_Any))
return AF_Malloc;
if (isStandardNewDelete(FD, Ctx)) {
OverloadedOperatorKind Kind = FD->getOverloadedOperator();
if (Kind == OO_New || Kind == OO_Delete)
return AF_CXXNew;
else if (Kind == OO_Array_New || Kind == OO_Array_Delete)
return AF_CXXNewArray;
}
if (isCMemFunction(FD, Ctx, AF_IfNameIndex, MemoryOperationKind::MOK_Any))
return AF_IfNameIndex;
if (isCMemFunction(FD, Ctx, AF_Alloca, MemoryOperationKind::MOK_Any))
return AF_Alloca;
return AF_None;
}
if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(S))
return NE->isArray() ? AF_CXXNewArray : AF_CXXNew;
if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(S))
return DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew;
if (isa<ObjCMessageExpr>(S))
return AF_Malloc;
return AF_None;
}
bool MallocChecker::printAllocDeallocName(raw_ostream &os, CheckerContext &C,
const Expr *E) const {
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
// FIXME: This doesn't handle indirect calls.
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
return false;
os << *FD;
if (!FD->isOverloadedOperator())
os << "()";
return true;
}
if (const ObjCMessageExpr *Msg = dyn_cast<ObjCMessageExpr>(E)) {
if (Msg->isInstanceMessage())
os << "-";
else
os << "+";
Msg->getSelector().print(os);
return true;
}
if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
os << "'"
<< getOperatorSpelling(NE->getOperatorNew()->getOverloadedOperator())
<< "'";
return true;
}
if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(E)) {
os << "'"
<< getOperatorSpelling(DE->getOperatorDelete()->getOverloadedOperator())
<< "'";
return true;
}
return false;
}
void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C,
const Expr *E) const {
AllocationFamily Family = getAllocationFamily(C, E);
switch(Family) {
case AF_Malloc: os << "malloc()"; return;
case AF_CXXNew: os << "'new'"; return;
case AF_CXXNewArray: os << "'new[]'"; return;
case AF_IfNameIndex: os << "'if_nameindex()'"; return;
case AF_Alloca:
case AF_None: llvm_unreachable("not a deallocation expression");
}
}
void MallocChecker::printExpectedDeallocName(raw_ostream &os,
AllocationFamily Family) const {
switch(Family) {
case AF_Malloc: os << "free()"; return;
case AF_CXXNew: os << "'delete'"; return;
case AF_CXXNewArray: os << "'delete[]'"; return;
case AF_IfNameIndex: os << "'if_freenameindex()'"; return;
case AF_Alloca:
case AF_None: llvm_unreachable("suspicious argument");
}
}
ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
const Expr *ArgExpr,
const Expr *ParentExpr,
ProgramStateRef State,
bool Hold,
bool &ReleasedAllocated,
bool ReturnsNullOnFailure) const {
if (!State)
return nullptr;
SVal ArgVal = State->getSVal(ArgExpr, C.getLocationContext());
if (!ArgVal.getAs<DefinedOrUnknownSVal>())
return nullptr;
DefinedOrUnknownSVal location = ArgVal.castAs<DefinedOrUnknownSVal>();
// Check for null dereferences.
if (!location.getAs<Loc>())
return nullptr;
// The explicit NULL case, no operation is performed.
ProgramStateRef notNullState, nullState;
std::tie(notNullState, nullState) = State->assume(location);
if (nullState && !notNullState)
return nullptr;
// Unknown values could easily be okay
// Undefined values are handled elsewhere
if (ArgVal.isUnknownOrUndef())
return nullptr;
const MemRegion *R = ArgVal.getAsRegion();
// Nonlocs can't be freed, of course.
// Non-region locations (labels and fixed addresses) also shouldn't be freed.
if (!R) {
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return nullptr;
}
R = R->StripCasts();
// Blocks might show up as heap data, but should not be free()d
if (isa<BlockDataRegion>(R)) {
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return nullptr;
}
const MemSpaceRegion *MS = R->getMemorySpace();
// Parameters, locals, statics, globals, and memory returned by
// __builtin_alloca() shouldn't be freed.
if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
// FIXME: at the time this code was written, malloc() regions were
// represented by conjured symbols, which are all in UnknownSpaceRegion.
// This means that there isn't actually anything from HeapSpaceRegion
// that should be freed, even though we allow it here.
// Of course, free() can work on memory allocated outside the current
// function, so UnknownSpaceRegion is always a possibility.
// False negatives are better than false positives.
if (isa<AllocaRegion>(R))
ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
else
ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
return nullptr;
}
const SymbolicRegion *SrBase = dyn_cast<SymbolicRegion>(R->getBaseRegion());
// Various cases could lead to non-symbol values here.
// For now, ignore them.
if (!SrBase)
return nullptr;
SymbolRef SymBase = SrBase->getSymbol();
const RefState *RsBase = State->get<RegionState>(SymBase);
SymbolRef PreviousRetStatusSymbol = nullptr;
if (RsBase) {
// Memory returned by alloca() shouldn't be freed.
if (RsBase->getAllocationFamily() == AF_Alloca) {
ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
return nullptr;
}
// Check for double free first.
if ((RsBase->isReleased() || RsBase->isRelinquished()) &&
!didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) {
ReportDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
SymBase, PreviousRetStatusSymbol);
return nullptr;
// If the pointer is allocated or escaped, but we are now trying to free it,
// check that the call to free is proper.
} else if (RsBase->isAllocated() || RsBase->isAllocatedOfSizeZero() ||
RsBase->isEscaped()) {
// Check if an expected deallocation function matches the real one.
bool DeallocMatchesAlloc =
RsBase->getAllocationFamily() == getAllocationFamily(C, ParentExpr);
if (!DeallocMatchesAlloc) {
ReportMismatchedDealloc(C, ArgExpr->getSourceRange(),
ParentExpr, RsBase, SymBase, Hold);
return nullptr;
}
// Check if the memory location being freed is the actual location
// allocated, or an offset.
RegionOffset Offset = R->getAsOffset();
if (Offset.isValid() &&
!Offset.hasSymbolicOffset() &&
Offset.getOffset() != 0) {
const Expr *AllocExpr = cast<Expr>(RsBase->getStmt());
ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr,
AllocExpr);
return nullptr;
}
}
}
ReleasedAllocated = (RsBase != nullptr) && (RsBase->isAllocated() ||
RsBase->isAllocatedOfSizeZero());
// Clean out the info on previous call to free return info.
State = State->remove<FreeReturnValue>(SymBase);
// Keep track of the return value. If it is NULL, we will know that free
// failed.
if (ReturnsNullOnFailure) {
SVal RetVal = C.getSVal(ParentExpr);
SymbolRef RetStatusSymbol = RetVal.getAsSymbol();
if (RetStatusSymbol) {
C.getSymbolManager().addSymbolDependency(SymBase, RetStatusSymbol);
State = State->set<FreeReturnValue>(SymBase, RetStatusSymbol);
}
}
AllocationFamily Family = RsBase ? RsBase->getAllocationFamily()
: getAllocationFamily(C, ParentExpr);
// Normal free.
if (Hold)
return State->set<RegionState>(SymBase,
RefState::getRelinquished(Family,
ParentExpr));
return State->set<RegionState>(SymBase,
RefState::getReleased(Family, ParentExpr));
}
Optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(AllocationFamily Family,
bool IsALeakCheck) const {
switch (Family) {
case AF_Malloc:
case AF_Alloca:
case AF_IfNameIndex: {
if (ChecksEnabled[CK_MallocChecker])
return CK_MallocChecker;
return Optional<MallocChecker::CheckKind>();
}
case AF_CXXNew:
case AF_CXXNewArray: {
if (IsALeakCheck) {
if (ChecksEnabled[CK_NewDeleteLeaksChecker])
return CK_NewDeleteLeaksChecker;
}
else {
if (ChecksEnabled[CK_NewDeleteChecker])
return CK_NewDeleteChecker;
}
return Optional<MallocChecker::CheckKind>();
}
case AF_None: {
llvm_unreachable("no family");
}
}
llvm_unreachable("unhandled family");
}
Optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(CheckerContext &C,
const Stmt *AllocDeallocStmt,
bool IsALeakCheck) const {
return getCheckIfTracked(getAllocationFamily(C, AllocDeallocStmt),
IsALeakCheck);
}
Optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck) const {
const RefState *RS = C.getState()->get<RegionState>(Sym);
assert(RS);
return getCheckIfTracked(RS->getAllocationFamily(), IsALeakCheck);
}
bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
if (Optional<nonloc::ConcreteInt> IntVal = V.getAs<nonloc::ConcreteInt>())
os << "an integer (" << IntVal->getValue() << ")";
else if (Optional<loc::ConcreteInt> ConstAddr = V.getAs<loc::ConcreteInt>())
os << "a constant address (" << ConstAddr->getValue() << ")";
else if (Optional<loc::GotoLabel> Label = V.getAs<loc::GotoLabel>())
os << "the address of the label '" << Label->getLabel()->getName() << "'";
else
return false;
return true;
}
bool MallocChecker::SummarizeRegion(raw_ostream &os,
const MemRegion *MR) {
switch (MR->getKind()) {
case MemRegion::FunctionTextRegionKind: {
const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
if (FD)
os << "the address of the function '" << *FD << '\'';
else
os << "the address of a function";
return true;
}
case MemRegion::BlockTextRegionKind:
os << "block text";
return true;
case MemRegion::BlockDataRegionKind:
// FIXME: where the block came from?
os << "a block";
return true;
default: {
const MemSpaceRegion *MS = MR->getMemorySpace();
if (isa<StackLocalsSpaceRegion>(MS)) {
const VarRegion *VR = dyn_cast<VarRegion>(MR);
const VarDecl *VD;
if (VR)
VD = VR->getDecl();
else
VD = nullptr;
if (VD)
os << "the address of the local variable '" << VD->getName() << "'";
else
os << "the address of a local stack variable";
return true;
}
if (isa<StackArgumentsSpaceRegion>(MS)) {
const VarRegion *VR = dyn_cast<VarRegion>(MR);
const VarDecl *VD;
if (VR)
VD = VR->getDecl();
else
VD = nullptr;
if (VD)
os << "the address of the parameter '" << VD->getName() << "'";
else
os << "the address of a parameter";
return true;
}
if (isa<GlobalsSpaceRegion>(MS)) {
const VarRegion *VR = dyn_cast<VarRegion>(MR);
const VarDecl *VD;
if (VR)
VD = VR->getDecl();
else
VD = nullptr;
if (VD) {
if (VD->isStaticLocal())
os << "the address of the static variable '" << VD->getName() << "'";
else
os << "the address of the global variable '" << VD->getName() << "'";
} else
os << "the address of a global variable";
return true;
}
return false;
}
}
}
void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
SourceRange Range,
const Expr *DeallocExpr) const {
if (!ChecksEnabled[CK_MallocChecker] &&
!ChecksEnabled[CK_NewDeleteChecker])
return;
Optional<MallocChecker::CheckKind> CheckKind =
getCheckIfTracked(C, DeallocExpr);
if (!CheckKind.hasValue())
return;
if (ExplodedNode *N = C.generateSink()) {
if (!BT_BadFree[*CheckKind])
BT_BadFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Bad free", "Memory Error"));
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
const MemRegion *MR = ArgVal.getAsRegion();
while (const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(MR))
MR = ER->getSuperRegion();
os << "Argument to ";
if (!printAllocDeallocName(os, C, DeallocExpr))
os << "deallocator";
os << " is ";
bool Summarized = MR ? SummarizeRegion(os, MR)
: SummarizeValue(os, ArgVal);
if (Summarized)
os << ", which is not memory allocated by ";
else
os << "not memory allocated by ";
printExpectedAllocName(os, C, DeallocExpr);
auto R = llvm::make_unique<BugReport>(*BT_BadFree[*CheckKind], os.str(), N);
R->markInteresting(MR);
R->addRange(Range);
C.emitReport(std::move(R));
}
}
void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const {
Optional<MallocChecker::CheckKind> CheckKind;
if (ChecksEnabled[CK_MallocChecker])
CheckKind = CK_MallocChecker;
else if (ChecksEnabled[CK_MismatchedDeallocatorChecker])
CheckKind = CK_MismatchedDeallocatorChecker;
else
return;
if (ExplodedNode *N = C.generateSink()) {
if (!BT_FreeAlloca[*CheckKind])
BT_FreeAlloca[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Free alloca()", "Memory Error"));
auto R = llvm::make_unique<BugReport>(
*BT_FreeAlloca[*CheckKind],
"Memory allocated by alloca() should not be deallocated", N);
R->markInteresting(ArgVal.getAsRegion());
R->addRange(Range);
C.emitReport(std::move(R));
}
}
void MallocChecker::ReportMismatchedDealloc(CheckerContext &C,
SourceRange Range,
const Expr *DeallocExpr,
const RefState *RS,
SymbolRef Sym,
bool OwnershipTransferred) const {
if (!ChecksEnabled[CK_MismatchedDeallocatorChecker])
return;
if (ExplodedNode *N = C.generateSink()) {
if (!BT_MismatchedDealloc)
BT_MismatchedDealloc.reset(
new BugType(CheckNames[CK_MismatchedDeallocatorChecker],
"Bad deallocator", "Memory Error"));
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
const Expr *AllocExpr = cast<Expr>(RS->getStmt());
SmallString<20> AllocBuf;
llvm::raw_svector_ostream AllocOs(AllocBuf);
SmallString<20> DeallocBuf;
llvm::raw_svector_ostream DeallocOs(DeallocBuf);
if (OwnershipTransferred) {
if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
os << DeallocOs.str() << " cannot";
else
os << "Cannot";
os << " take ownership of memory";
if (printAllocDeallocName(AllocOs, C, AllocExpr))
os << " allocated by " << AllocOs.str();
} else {
os << "Memory";
if (printAllocDeallocName(AllocOs, C, AllocExpr))
os << " allocated by " << AllocOs.str();
os << " should be deallocated by ";
printExpectedDeallocName(os, RS->getAllocationFamily());
if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
os << ", not " << DeallocOs.str();
}
auto R = llvm::make_unique<BugReport>(*BT_MismatchedDealloc, os.str(), N);
R->markInteresting(Sym);
R->addRange(Range);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(std::move(R));
}
}
void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
SourceRange Range, const Expr *DeallocExpr,
const Expr *AllocExpr) const {
if (!ChecksEnabled[CK_MallocChecker] &&
!ChecksEnabled[CK_NewDeleteChecker])
return;
Optional<MallocChecker::CheckKind> CheckKind =
getCheckIfTracked(C, AllocExpr);
if (!CheckKind.hasValue())
return;
ExplodedNode *N = C.generateSink();
if (!N)
return;
if (!BT_OffsetFree[*CheckKind])
BT_OffsetFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Offset free", "Memory Error"));
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
SmallString<20> AllocNameBuf;
llvm::raw_svector_ostream AllocNameOs(AllocNameBuf);
const MemRegion *MR = ArgVal.getAsRegion();
assert(MR && "Only MemRegion based symbols can have offset free errors");
RegionOffset Offset = MR->getAsOffset();
assert((Offset.isValid() &&
!Offset.hasSymbolicOffset() &&
Offset.getOffset() != 0) &&
"Only symbols with a valid offset can have offset free errors");
int offsetBytes = Offset.getOffset() / C.getASTContext().getCharWidth();
os << "Argument to ";
if (!printAllocDeallocName(os, C, DeallocExpr))
os << "deallocator";
os << " is offset by "
<< offsetBytes
<< " "
<< ((abs(offsetBytes) > 1) ? "bytes" : "byte")
<< " from the start of ";
if (AllocExpr && printAllocDeallocName(AllocNameOs, C, AllocExpr))
os << "memory allocated by " << AllocNameOs.str();
else
os << "allocated memory";
auto R = llvm::make_unique<BugReport>(*BT_OffsetFree[*CheckKind], os.str(), N);
R->markInteresting(MR->getBaseRegion());
R->addRange(Range);
C.emitReport(std::move(R));
}
void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
SymbolRef Sym) const {
if (!ChecksEnabled[CK_MallocChecker] &&
!ChecksEnabled[CK_NewDeleteChecker])
return;
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
return;
if (ExplodedNode *N = C.generateSink()) {
if (!BT_UseFree[*CheckKind])
BT_UseFree[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Use-after-free", "Memory Error"));
auto R = llvm::make_unique<BugReport>(*BT_UseFree[*CheckKind],
"Use of memory after it is freed", N);
R->markInteresting(Sym);
R->addRange(Range);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(std::move(R));
}
}
void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
bool Released, SymbolRef Sym,
SymbolRef PrevSym) const {
if (!ChecksEnabled[CK_MallocChecker] &&
!ChecksEnabled[CK_NewDeleteChecker])
return;
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
return;
if (ExplodedNode *N = C.generateSink()) {
if (!BT_DoubleFree[*CheckKind])
BT_DoubleFree[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Double free", "Memory Error"));
auto R = llvm::make_unique<BugReport>(
*BT_DoubleFree[*CheckKind],
(Released ? "Attempt to free released memory"
: "Attempt to free non-owned memory"),
N);
R->addRange(Range);
R->markInteresting(Sym);
if (PrevSym)
R->markInteresting(PrevSym);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(std::move(R));
}
}
void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
if (!ChecksEnabled[CK_NewDeleteChecker])
return;
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
return;
if (ExplodedNode *N = C.generateSink()) {
if (!BT_DoubleDelete)
BT_DoubleDelete.reset(new BugType(CheckNames[CK_NewDeleteChecker],
"Double delete", "Memory Error"));
auto R = llvm::make_unique<BugReport>(
*BT_DoubleDelete, "Attempt to delete released memory", N);
R->markInteresting(Sym);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
C.emitReport(std::move(R));
}
}
void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
SourceRange Range,
SymbolRef Sym) const {
if (!ChecksEnabled[CK_MallocChecker] &&
!ChecksEnabled[CK_NewDeleteChecker])
return;
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
if (!CheckKind.hasValue())
return;
if (ExplodedNode *N = C.generateSink()) {
if (!BT_UseZerroAllocated[*CheckKind])
BT_UseZerroAllocated[*CheckKind].reset(new BugType(
CheckNames[*CheckKind], "Use of zero allocated", "Memory Error"));
auto R = llvm::make_unique<BugReport>(*BT_UseZerroAllocated[*CheckKind],
"Use of zero-allocated memory", N);
R->addRange(Range);
if (Sym) {
R->markInteresting(Sym);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym));
}
C.emitReport(std::move(R));
}
}
ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
const CallExpr *CE,
bool FreesOnFail,
ProgramStateRef State) const {
if (!State)
return nullptr;
if (CE->getNumArgs() < 2)
return nullptr;
const Expr *arg0Expr = CE->getArg(0);
const LocationContext *LCtx = C.getLocationContext();
SVal Arg0Val = State->getSVal(arg0Expr, LCtx);
if (!Arg0Val.getAs<DefinedOrUnknownSVal>())
return nullptr;
DefinedOrUnknownSVal arg0Val = Arg0Val.castAs<DefinedOrUnknownSVal>();
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal PtrEQ =
svalBuilder.evalEQ(State, arg0Val, svalBuilder.makeNull());
// Get the size argument. If there is no size arg then give up.
const Expr *Arg1 = CE->getArg(1);
if (!Arg1)
return nullptr;
// Get the value of the size argument.
SVal Arg1ValG = State->getSVal(Arg1, LCtx);
if (!Arg1ValG.getAs<DefinedOrUnknownSVal>())
return nullptr;
DefinedOrUnknownSVal Arg1Val = Arg1ValG.castAs<DefinedOrUnknownSVal>();
// Compare the size argument to 0.
DefinedOrUnknownSVal SizeZero =
svalBuilder.evalEQ(State, Arg1Val,
svalBuilder.makeIntValWithPtrWidth(0, false));
ProgramStateRef StatePtrIsNull, StatePtrNotNull;
std::tie(StatePtrIsNull, StatePtrNotNull) = State->assume(PtrEQ);
ProgramStateRef StateSizeIsZero, StateSizeNotZero;
std::tie(StateSizeIsZero, StateSizeNotZero) = State->assume(SizeZero);
// We only assume exceptional states if they are definitely true; if the
// state is under-constrained, assume regular realloc behavior.
bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull;
bool SizeIsZero = StateSizeIsZero && !StateSizeNotZero;
// If the ptr is NULL and the size is not 0, the call is equivalent to
// malloc(size).
if ( PrtIsNull && !SizeIsZero) {
ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
UndefinedVal(), StatePtrIsNull);
return stateMalloc;
}
if (PrtIsNull && SizeIsZero)
return nullptr;
// Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
assert(!PrtIsNull);
SymbolRef FromPtr = arg0Val.getAsSymbol();
SVal RetVal = State->getSVal(CE, LCtx);
SymbolRef ToPtr = RetVal.getAsSymbol();
if (!FromPtr || !ToPtr)
return nullptr;
bool ReleasedAllocated = false;
// If the size is 0, free the memory.
if (SizeIsZero)
if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero, 0,
false, ReleasedAllocated)){
// The semantics of the return value are:
// If size was equal to 0, either NULL or a pointer suitable to be passed
// to free() is returned. We just free the input pointer and do not add
// any constrains on the output pointer.
return stateFree;
}
// Default behavior.
if (ProgramStateRef stateFree =
FreeMemAux(C, CE, State, 0, false, ReleasedAllocated)) {
ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
UnknownVal(), stateFree);
if (!stateRealloc)
return nullptr;
ReallocPairKind Kind = RPToBeFreedAfterFailure;
if (FreesOnFail)
Kind = RPIsFreeOnFailure;
else if (!ReleasedAllocated)
Kind = RPDoNotTrackAfterFailure;
// Record the info about the reallocated symbol so that we could properly
// process failed reallocation.
stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
ReallocPair(FromPtr, Kind));
// The reallocated symbol should stay alive for as long as the new symbol.
C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
return stateRealloc;
}
return nullptr;
}
ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
ProgramStateRef State) {
if (!State)
return nullptr;
if (CE->getNumArgs() < 2)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
const LocationContext *LCtx = C.getLocationContext();
SVal count = State->getSVal(CE->getArg(0), LCtx);
SVal elementSize = State->getSVal(CE->getArg(1), LCtx);
SVal TotalSize = svalBuilder.evalBinOp(State, BO_Mul, count, elementSize,
svalBuilder.getContext().getSizeType());
SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
return MallocMemAux(C, CE, TotalSize, zeroVal, State);
}
LeakInfo
MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
CheckerContext &C) const {
const LocationContext *LeakContext = N->getLocationContext();
// Walk the ExplodedGraph backwards and find the first node that referred to
// the tracked symbol.
const ExplodedNode *AllocNode = N;
const MemRegion *ReferenceRegion = nullptr;
while (N) {
ProgramStateRef State = N->getState();
if (!State->get<RegionState>(Sym))
break;
// Find the most recent expression bound to the symbol in the current
// context.
if (!ReferenceRegion) {
if (const MemRegion *MR = C.getLocationRegionIfPostStore(N)) {
SVal Val = State->getSVal(MR);
if (Val.getAsLocSymbol() == Sym) {
const VarRegion* VR = MR->getBaseRegion()->getAs<VarRegion>();
// Do not show local variables belonging to a function other than
// where the error is reported.
if (!VR ||
(VR->getStackFrame() == LeakContext->getCurrentStackFrame()))
ReferenceRegion = MR;
}
}
}
// Allocation node, is the last node in the current or parent context in
// which the symbol was tracked.
const LocationContext *NContext = N->getLocationContext();
if (NContext == LeakContext ||
NContext->isParentOf(LeakContext))
AllocNode = N;
N = N->pred_empty() ? nullptr : *(N->pred_begin());
}
return LeakInfo(AllocNode, ReferenceRegion);
}
void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
CheckerContext &C) const {
if (!ChecksEnabled[CK_MallocChecker] &&
!ChecksEnabled[CK_NewDeleteLeaksChecker])
return;
const RefState *RS = C.getState()->get<RegionState>(Sym);
assert(RS && "cannot leak an untracked symbol");
AllocationFamily Family = RS->getAllocationFamily();
if (Family == AF_Alloca)
return;
Optional<MallocChecker::CheckKind>
CheckKind = getCheckIfTracked(Family, true);
if (!CheckKind.hasValue())
return;
assert(N);
if (!BT_Leak[*CheckKind]) {
BT_Leak[*CheckKind].reset(
new BugType(CheckNames[*CheckKind], "Memory leak", "Memory Error"));
// Leaks should not be reported if they are post-dominated by a sink:
// (1) Sinks are higher importance bugs.
// (2) NoReturnFunctionChecker uses sink nodes to represent paths ending
// with __noreturn functions such as assert() or exit(). We choose not
// to report leaks on such paths.
BT_Leak[*CheckKind]->setSuppressOnSink(true);
}
// Most bug reports are cached at the location where they occurred.
// With leaks, we want to unique them by the location where they were
// allocated, and only report a single path.
PathDiagnosticLocation LocUsedForUniqueing;
const ExplodedNode *AllocNode = nullptr;
const MemRegion *Region = nullptr;
std::tie(AllocNode, Region) = getAllocationSite(N, Sym, C);
ProgramPoint P = AllocNode->getLocation();
const Stmt *AllocationStmt = nullptr;
if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
AllocationStmt = Exit->getCalleeContext()->getCallSite();
else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
AllocationStmt = SP->getStmt();
if (AllocationStmt)
LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocationStmt,
C.getSourceManager(),
AllocNode->getLocationContext());
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
if (Region && Region->canPrintPretty()) {
os << "Potential leak of memory pointed to by ";
Region->printPretty(os);
} else {
os << "Potential memory leak";
}
auto R = llvm::make_unique<BugReport>(
*BT_Leak[*CheckKind], os.str(), N, LocUsedForUniqueing,
AllocNode->getLocationContext()->getDecl());
R->markInteresting(Sym);
R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym, true));
C.emitReport(std::move(R));
}
void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const
{
if (!SymReaper.hasDeadSymbols())
return;
ProgramStateRef state = C.getState();
RegionStateTy RS = state->get<RegionState>();
RegionStateTy::Factory &F = state->get_context<RegionState>();
SmallVector<SymbolRef, 2> Errors;
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
if (SymReaper.isDead(I->first)) {
if (I->second.isAllocated() || I->second.isAllocatedOfSizeZero())
Errors.push_back(I->first);
// Remove the dead symbol from the map.
RS = F.remove(RS, I->first);
}
}
// Cleanup the Realloc Pairs Map.
ReallocPairsTy RP = state->get<ReallocPairs>();
for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
if (SymReaper.isDead(I->first) ||
SymReaper.isDead(I->second.ReallocatedSym)) {
state = state->remove<ReallocPairs>(I->first);
}
}
// Cleanup the FreeReturnValue Map.
FreeReturnValueTy FR = state->get<FreeReturnValue>();
for (FreeReturnValueTy::iterator I = FR.begin(), E = FR.end(); I != E; ++I) {
if (SymReaper.isDead(I->first) ||
SymReaper.isDead(I->second)) {
state = state->remove<FreeReturnValue>(I->first);
}
}
// Generate leak node.
ExplodedNode *N = C.getPredecessor();
if (!Errors.empty()) {
static CheckerProgramPointTag Tag("MallocChecker", "DeadSymbolsLeak");
N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
for (SmallVectorImpl<SymbolRef>::iterator
I = Errors.begin(), E = Errors.end(); I != E; ++I) {
reportLeak(*I, N, C);
}
}
C.addTransition(state->set<RegionState>(RS), N);
}
void MallocChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
if (const CXXDestructorCall *DC = dyn_cast<CXXDestructorCall>(&Call)) {
SymbolRef Sym = DC->getCXXThisVal().getAsSymbol();
if (!Sym || checkDoubleDelete(Sym, C))
return;
}
// We will check for double free in the post visit.
if (const AnyFunctionCall *FC = dyn_cast<AnyFunctionCall>(&Call)) {
const FunctionDecl *FD = FC->getDecl();
if (!FD)
return;
ASTContext &Ctx = C.getASTContext();
if (ChecksEnabled[CK_MallocChecker] &&
(isCMemFunction(FD, Ctx, AF_Malloc, MemoryOperationKind::MOK_Free) ||
isCMemFunction(FD, Ctx, AF_IfNameIndex,
MemoryOperationKind::MOK_Free)))
return;
if (ChecksEnabled[CK_NewDeleteChecker] &&
isStandardNewDelete(FD, Ctx))
return;
}
// Check if the callee of a method is deleted.
if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
SymbolRef Sym = CC->getCXXThisVal().getAsSymbol();
if (!Sym || checkUseAfterFree(Sym, C, CC->getCXXThisExpr()))
return;
}
// Check arguments for being used after free.
for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
SVal ArgSVal = Call.getArgSVal(I);
if (ArgSVal.getAs<Loc>()) {
SymbolRef Sym = ArgSVal.getAsSymbol();
if (!Sym)
continue;
if (checkUseAfterFree(Sym, C, Call.getArgExpr(I)))
return;
}
}
}
void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
const Expr *E = S->getRetValue();
if (!E)
return;
// Check if we are returning a symbol.
ProgramStateRef State = C.getState();
SVal RetVal = State->getSVal(E, C.getLocationContext());
SymbolRef Sym = RetVal.getAsSymbol();
if (!Sym)
// If we are returning a field of the allocated struct or an array element,
// the callee could still free the memory.
// TODO: This logic should be a part of generic symbol escape callback.
if (const MemRegion *MR = RetVal.getAsRegion())
if (isa<FieldRegion>(MR) || isa<ElementRegion>(MR))
if (const SymbolicRegion *BMR =
dyn_cast<SymbolicRegion>(MR->getBaseRegion()))
Sym = BMR->getSymbol();
// Check if we are returning freed memory.
if (Sym)
checkUseAfterFree(Sym, C, E);
}
// TODO: Blocks should be either inlined or should call invalidate regions
// upon invocation. After that's in place, special casing here will not be
// needed.
void MallocChecker::checkPostStmt(const BlockExpr *BE,
CheckerContext &C) const {
// Scan the BlockDecRefExprs for any object the retain count checker
// may be tracking.
if (!BE->getBlockDecl()->hasCaptures())
return;
ProgramStateRef state = C.getState();
const BlockDataRegion *R =
cast<BlockDataRegion>(state->getSVal(BE,
C.getLocationContext()).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
if (I == E)
return;
SmallVector<const MemRegion*, 10> Regions;
const LocationContext *LC = C.getLocationContext();
MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
for ( ; I != E; ++I) {
const VarRegion *VR = I.getCapturedRegion();
if (VR->getSuperRegion() == R) {
VR = MemMgr.getVarRegion(VR->getDecl(), LC);
}
Regions.push_back(VR);
}
state =
state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
Regions.data() + Regions.size()).getState();
C.addTransition(state);
}
bool MallocChecker::isReleased(SymbolRef Sym, CheckerContext &C) const {
assert(Sym);
const RefState *RS = C.getState()->get<RegionState>(Sym);
return (RS && RS->isReleased());
}
bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const {
if (isReleased(Sym, C)) {
ReportUseAfterFree(C, S->getSourceRange(), Sym);
return true;
}
return false;
}
void MallocChecker::checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C,
const Stmt *S) const {
assert(Sym);
const RefState *RS = C.getState()->get<RegionState>(Sym);
if (RS && RS->isAllocatedOfSizeZero())
ReportUseZeroAllocated(C, RS->getStmt()->getSourceRange(), Sym);
}
bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const {
if (isReleased(Sym, C)) {
ReportDoubleDelete(C, Sym);
return true;
}
return false;
}
// Check if the location is a freed symbolic region.
void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S,
CheckerContext &C) const {
SymbolRef Sym = l.getLocSymbolInBase();
if (Sym) {
checkUseAfterFree(Sym, C, S);
checkUseZeroAllocated(Sym, C, S);
}
}
// If a symbolic region is assumed to NULL (or another constant), stop tracking
// it - assuming that allocation failed on this path.
ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
SVal Cond,
bool Assumption) const {
RegionStateTy RS = state->get<RegionState>();
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
// If the symbol is assumed to be NULL, remove it from consideration.
ConstraintManager &CMgr = state->getConstraintManager();
ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
if (AllocFailed.isConstrainedTrue())
state = state->remove<RegionState>(I.getKey());
}
// Realloc returns 0 when reallocation fails, which means that we should
// restore the state of the pointer being reallocated.
ReallocPairsTy RP = state->get<ReallocPairs>();
for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
// If the symbol is assumed to be NULL, remove it from consideration.
ConstraintManager &CMgr = state->getConstraintManager();
ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
if (!AllocFailed.isConstrainedTrue())
continue;
SymbolRef ReallocSym = I.getData().ReallocatedSym;
if (const RefState *RS = state->get<RegionState>(ReallocSym)) {
if (RS->isReleased()) {
if (I.getData().Kind == RPToBeFreedAfterFailure)
state = state->set<RegionState>(ReallocSym,
RefState::getAllocated(RS->getAllocationFamily(), RS->getStmt()));
else if (I.getData().Kind == RPDoNotTrackAfterFailure)
state = state->remove<RegionState>(ReallocSym);
else
assert(I.getData().Kind == RPIsFreeOnFailure);
}
}
state = state->remove<ReallocPairs>(I.getKey());
}
return state;
}
bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
const CallEvent *Call,
ProgramStateRef State,
SymbolRef &EscapingSymbol) const {
assert(Call);
EscapingSymbol = nullptr;
// For now, assume that any C++ or block call can free memory.
// TODO: If we want to be more optimistic here, we'll need to make sure that
// regions escape to C++ containers. They seem to do that even now, but for
// mysterious reasons.
if (!(isa<SimpleFunctionCall>(Call) || isa<ObjCMethodCall>(Call)))
return true;
// Check Objective-C messages by selector name.
if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
// If it's not a framework call, or if it takes a callback, assume it
// can free memory.
if (!Call->isInSystemHeader() || Call->hasNonZeroCallbackArg())
return true;
// If it's a method we know about, handle it explicitly post-call.
// This should happen before the "freeWhenDone" check below.
if (isKnownDeallocObjCMethodName(*Msg))
return false;
// If there's a "freeWhenDone" parameter, but the method isn't one we know
// about, we can't be sure that the object will use free() to deallocate the
// memory, so we can't model it explicitly. The best we can do is use it to
// decide whether the pointer escapes.
if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(*Msg))
return *FreeWhenDone;
// If the first selector piece ends with "NoCopy", and there is no
// "freeWhenDone" parameter set to zero, we know ownership is being
// transferred. Again, though, we can't be sure that the object will use
// free() to deallocate the memory, so we can't model it explicitly.
StringRef FirstSlot = Msg->getSelector().getNameForSlot(0);
if (FirstSlot.endswith("NoCopy"))
return true;
// If the first selector starts with addPointer, insertPointer,
// or replacePointer, assume we are dealing with NSPointerArray or similar.
// This is similar to C++ containers (vector); we still might want to check
// that the pointers get freed by following the container itself.
if (FirstSlot.startswith("addPointer") ||
FirstSlot.startswith("insertPointer") ||
FirstSlot.startswith("replacePointer") ||
FirstSlot.equals("valueWithPointer")) {
return true;
}
// We should escape receiver on call to 'init'. This is especially relevant
// to the receiver, as the corresponding symbol is usually not referenced
// after the call.
if (Msg->getMethodFamily() == OMF_init) {
EscapingSymbol = Msg->getReceiverSVal().getAsSymbol();
return true;
}
// Otherwise, assume that the method does not free memory.
// Most framework methods do not free memory.
return false;
}
// At this point the only thing left to handle is straight function calls.
const FunctionDecl *FD = cast<SimpleFunctionCall>(Call)->getDecl();
if (!FD)
return true;
ASTContext &ASTC = State->getStateManager().getContext();
// If it's one of the allocation functions we can reason about, we model
// its behavior explicitly.
if (isMemFunction(FD, ASTC))
return false;
// If it's not a system call, assume it frees memory.
if (!Call->isInSystemHeader())
return true;
// White list the system functions whose arguments escape.
const IdentifierInfo *II = FD->getIdentifier();
if (!II)
return true;
StringRef FName = II->getName();
// White list the 'XXXNoCopy' CoreFoundation functions.
// We specifically check these before
if (FName.endswith("NoCopy")) {
// Look for the deallocator argument. We know that the memory ownership
// is not transferred only if the deallocator argument is
// 'kCFAllocatorNull'.
for (unsigned i = 1; i < Call->getNumArgs(); ++i) {
const Expr *ArgE = Call->getArgExpr(i)->IgnoreParenCasts();
if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(ArgE)) {
StringRef DeallocatorName = DE->getFoundDecl()->getName();
if (DeallocatorName == "kCFAllocatorNull")
return false;
}
}
return true;
}
// Associating streams with malloced buffers. The pointer can escape if
// 'closefn' is specified (and if that function does free memory),
// but it will not if closefn is not specified.
// Currently, we do not inspect the 'closefn' function (PR12101).
if (FName == "funopen")
if (Call->getNumArgs() >= 4 && Call->getArgSVal(4).isConstant(0))
return false;
// Do not warn on pointers passed to 'setbuf' when used with std streams,
// these leaks might be intentional when setting the buffer for stdio.
// http://stackoverflow.com/questions/2671151/who-frees-setvbuf-buffer
if (FName == "setbuf" || FName =="setbuffer" ||
FName == "setlinebuf" || FName == "setvbuf") {
if (Call->getNumArgs() >= 1) {
const Expr *ArgE = Call->getArgExpr(0)->IgnoreParenCasts();
if (const DeclRefExpr *ArgDRE = dyn_cast<DeclRefExpr>(ArgE))
if (const VarDecl *D = dyn_cast<VarDecl>(ArgDRE->getDecl()))
if (D->getCanonicalDecl()->getName().find("std") != StringRef::npos)
return true;
}
}
// A bunch of other functions which either take ownership of a pointer or
// wrap the result up in a struct or object, meaning it can be freed later.
// (See RetainCountChecker.) Not all the parameters here are invalidated,
// but the Malloc checker cannot differentiate between them. The right way
// of doing this would be to implement a pointer escapes callback.
if (FName == "CGBitmapContextCreate" ||
FName == "CGBitmapContextCreateWithData" ||
FName == "CVPixelBufferCreateWithBytes" ||
FName == "CVPixelBufferCreateWithPlanarBytes" ||
FName == "OSAtomicEnqueue") {
return true;
}
// Handle cases where we know a buffer's /address/ can escape.
// Note that the above checks handle some special cases where we know that
// even though the address escapes, it's still our responsibility to free the
// buffer.
if (Call->argumentsMayEscape())
return true;
// Otherwise, assume that the function does not free memory.
// Most system calls do not free the memory.
return false;
}
static bool retTrue(const RefState *RS) {
return true;
}
static bool checkIfNewOrNewArrayFamily(const RefState *RS) {
return (RS->getAllocationFamily() == AF_CXXNewArray ||
RS->getAllocationFamily() == AF_CXXNew);
}
ProgramStateRef MallocChecker::checkPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const {
return checkPointerEscapeAux(State, Escaped, Call, Kind, &retTrue);
}
ProgramStateRef MallocChecker::checkConstPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const {
return checkPointerEscapeAux(State, Escaped, Call, Kind,
&checkIfNewOrNewArrayFamily);
}
ProgramStateRef MallocChecker::checkPointerEscapeAux(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind,
bool(*CheckRefState)(const RefState*)) const {
// If we know that the call does not free memory, or we want to process the
// call later, keep tracking the top level arguments.
SymbolRef EscapingSymbol = nullptr;
if (Kind == PSK_DirectEscapeOnCall &&
!mayFreeAnyEscapedMemoryOrIsModeledExplicitly(Call, State,
EscapingSymbol) &&
!EscapingSymbol) {
return State;
}
for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
E = Escaped.end();
I != E; ++I) {
SymbolRef sym = *I;
if (EscapingSymbol && EscapingSymbol != sym)
continue;
if (const RefState *RS = State->get<RegionState>(sym)) {
if ((RS->isAllocated() || RS->isAllocatedOfSizeZero()) &&
CheckRefState(RS)) {
State = State->remove<RegionState>(sym);
State = State->set<RegionState>(sym, RefState::getEscaped(RS));
}
}
}
return State;
}
static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
ProgramStateRef prevState) {
ReallocPairsTy currMap = currState->get<ReallocPairs>();
ReallocPairsTy prevMap = prevState->get<ReallocPairs>();
for (ReallocPairsTy::iterator I = prevMap.begin(), E = prevMap.end();
I != E; ++I) {
SymbolRef sym = I.getKey();
if (!currMap.lookup(sym))
return sym;
}
return nullptr;
}
PathDiagnosticPiece *
MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
ProgramStateRef state = N->getState();
ProgramStateRef statePrev = PrevN->getState();
const RefState *RS = state->get<RegionState>(Sym);
const RefState *RSPrev = statePrev->get<RegionState>(Sym);
if (!RS)
return nullptr;
const Stmt *S = nullptr;
const char *Msg = nullptr;
StackHintGeneratorForSymbol *StackHint = nullptr;
// Retrieve the associated statement.
ProgramPoint ProgLoc = N->getLocation();
if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
S = SP->getStmt();
} else if (Optional<CallExitEnd> Exit = ProgLoc.getAs<CallExitEnd>()) {
S = Exit->getCalleeContext()->getCallSite();
} else if (Optional<BlockEdge> Edge = ProgLoc.getAs<BlockEdge>()) {
// If an assumption was made on a branch, it should be caught
// here by looking at the state transition.
S = Edge->getSrc()->getTerminator();
}
if (!S)
return nullptr;
// FIXME: We will eventually need to handle non-statement-based events
// (__attribute__((cleanup))).
// Find out if this is an interesting point and what is the kind.
if (Mode == Normal) {
if (isAllocated(RS, RSPrev, S)) {
Msg = "Memory is allocated";
StackHint = new StackHintGeneratorForSymbol(Sym,
"Returned allocated memory");
} else if (isReleased(RS, RSPrev, S)) {
Msg = "Memory is released";
StackHint = new StackHintGeneratorForSymbol(Sym,
"Returning; memory was released");
} else if (isRelinquished(RS, RSPrev, S)) {
Msg = "Memory ownership is transferred";
StackHint = new StackHintGeneratorForSymbol(Sym, "");
} else if (isReallocFailedCheck(RS, RSPrev, S)) {
Mode = ReallocationFailed;
Msg = "Reallocation failed";
StackHint = new StackHintGeneratorForReallocationFailed(Sym,
"Reallocation failed");
if (SymbolRef sym = findFailedReallocSymbol(state, statePrev)) {
// Is it possible to fail two reallocs WITHOUT testing in between?
assert((!FailedReallocSymbol || FailedReallocSymbol == sym) &&
"We only support one failed realloc at a time.");
BR.markInteresting(sym);
FailedReallocSymbol = sym;
}
}
// We are in a special mode if a reallocation failed later in the path.
} else if (Mode == ReallocationFailed) {
assert(FailedReallocSymbol && "No symbol to look for.");
// Is this is the first appearance of the reallocated symbol?
if (!statePrev->get<RegionState>(FailedReallocSymbol)) {
// We're at the reallocation point.
Msg = "Attempt to reallocate memory";
StackHint = new StackHintGeneratorForSymbol(Sym,
"Returned reallocated memory");
FailedReallocSymbol = nullptr;
Mode = Normal;
}
}
if (!Msg)
return nullptr;
assert(StackHint);
// Generate the extra diagnostic.
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
return new PathDiagnosticEventPiece(Pos, Msg, true, StackHint);
}
void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
RegionStateTy RS = State->get<RegionState>();
if (!RS.isEmpty()) {
Out << Sep << "MallocChecker :" << NL;
for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
const RefState *RefS = State->get<RegionState>(I.getKey());
AllocationFamily Family = RefS->getAllocationFamily();
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
if (!CheckKind.hasValue())
CheckKind = getCheckIfTracked(Family, true);
I.getKey()->dumpToStream(Out);
Out << " : ";
I.getData().dump(Out);
if (CheckKind.hasValue())
Out << " (" << CheckNames[*CheckKind].getName() << ")";
Out << NL;
}
}
}
void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
registerCStringCheckerBasic(mgr);
MallocChecker *checker = mgr.registerChecker<MallocChecker>();
checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption(
"Optimistic", false, checker);
checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true;
checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] =
mgr.getCurrentCheckName();
// We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
// checker.
if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker])
checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker] = true;
}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
registerCStringCheckerBasic(mgr); \
MallocChecker *checker = mgr.registerChecker<MallocChecker>(); \
checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption( \
"Optimistic", false, checker); \
checker->ChecksEnabled[MallocChecker::CK_##name] = true; \
checker->CheckNames[MallocChecker::CK_##name] = mgr.getCurrentCheckName(); \
}
REGISTER_CHECKER(MallocChecker)
REGISTER_CHECKER(NewDeleteChecker)
REGISTER_CHECKER(MismatchedDeallocatorChecker)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp | //===--- UndefinedAssignmentChecker.h ---------------------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines UndefinedAssignmentChecker, a builtin check in ExprEngine that
// checks for assigning undefined values.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class UndefinedAssignmentChecker
: public Checker<check::Bind> {
mutable std::unique_ptr<BugType> BT;
public:
void checkBind(SVal location, SVal val, const Stmt *S,
CheckerContext &C) const;
};
}
void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
const Stmt *StoreE,
CheckerContext &C) const {
if (!val.isUndef())
return;
// Do not report assignments of uninitialized values inside swap functions.
// This should allow to swap partially uninitialized structs
// (radar://14129997)
if (const FunctionDecl *EnclosingFunctionDecl =
dyn_cast<FunctionDecl>(C.getStackFrame()->getDecl()))
if (C.getCalleeName(EnclosingFunctionDecl) == "swap")
return;
ExplodedNode *N = C.generateSink();
if (!N)
return;
const char *str = "Assigned value is garbage or undefined";
if (!BT)
BT.reset(new BuiltinBug(this, str));
// Generate a report for this bug.
const Expr *ex = nullptr;
while (StoreE) {
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(StoreE)) {
if (B->isCompoundAssignmentOp()) {
ProgramStateRef state = C.getState();
if (state->getSVal(B->getLHS(), C.getLocationContext()).isUndef()) {
str = "The left expression of the compound assignment is an "
"uninitialized value. The computed value will also be garbage";
ex = B->getLHS();
break;
}
}
ex = B->getRHS();
break;
}
if (const DeclStmt *DS = dyn_cast<DeclStmt>(StoreE)) {
const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
ex = VD->getInit();
}
break;
}
auto R = llvm::make_unique<BugReport>(*BT, str, N);
if (ex) {
R->addRange(ex->getSourceRange());
bugreporter::trackNullOrUndefValue(N, ex, *R);
}
C.emitReport(std::move(R));
}
void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedAssignmentChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp | //= CStringChecker.cpp - Checks calls to C string functions --------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines CStringChecker, which is an assortment of checks on calls
// to functions in <string.h>.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "InterCheckerAPI.h"
#include "clang/Basic/CharInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class CStringChecker : public Checker< eval::Call,
check::PreStmt<DeclStmt>,
check::LiveSymbols,
check::DeadSymbols,
check::RegionChanges
> {
mutable std::unique_ptr<BugType> BT_Null, BT_Bounds, BT_Overlap,
BT_NotCString, BT_AdditionOverflow;
mutable const char *CurrentFunctionDescription;
public:
/// The filter is used to filter out the diagnostics which are not enabled by
/// the user.
struct CStringChecksFilter {
DefaultBool CheckCStringNullArg;
DefaultBool CheckCStringOutOfBounds;
DefaultBool CheckCStringBufferOverlap;
DefaultBool CheckCStringNotNullTerm;
CheckName CheckNameCStringNullArg;
CheckName CheckNameCStringOutOfBounds;
CheckName CheckNameCStringBufferOverlap;
CheckName CheckNameCStringNotNullTerm;
};
CStringChecksFilter Filter;
static void *getTag() { static int tag; return &tag; }
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
bool wantsRegionChangeUpdate(ProgramStateRef state) const;
ProgramStateRef
checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const;
typedef void (CStringChecker::*FnCheck)(CheckerContext &,
const CallExpr *) const;
void evalMemcpy(CheckerContext &C, const CallExpr *CE) const;
void evalMempcpy(CheckerContext &C, const CallExpr *CE) const;
void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
ProgramStateRef state,
const Expr *Size,
const Expr *Source,
const Expr *Dest,
bool Restricted = false,
bool IsMempcpy = false) const;
void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
void evalstrLength(CheckerContext &C, const CallExpr *CE) const;
void evalstrnLength(CheckerContext &C, const CallExpr *CE) const;
void evalstrLengthCommon(CheckerContext &C,
const CallExpr *CE,
bool IsStrnlen = false) const;
void evalStrcpy(CheckerContext &C, const CallExpr *CE) const;
void evalStrncpy(CheckerContext &C, const CallExpr *CE) const;
void evalStpcpy(CheckerContext &C, const CallExpr *CE) const;
void evalStrcpyCommon(CheckerContext &C,
const CallExpr *CE,
bool returnEnd,
bool isBounded,
bool isAppending) const;
void evalStrcat(CheckerContext &C, const CallExpr *CE) const;
void evalStrncat(CheckerContext &C, const CallExpr *CE) const;
void evalStrcmp(CheckerContext &C, const CallExpr *CE) const;
void evalStrncmp(CheckerContext &C, const CallExpr *CE) const;
void evalStrcasecmp(CheckerContext &C, const CallExpr *CE) const;
void evalStrncasecmp(CheckerContext &C, const CallExpr *CE) const;
void evalStrcmpCommon(CheckerContext &C,
const CallExpr *CE,
bool isBounded = false,
bool ignoreCase = false) const;
void evalStrsep(CheckerContext &C, const CallExpr *CE) const;
// Utility methods
std::pair<ProgramStateRef , ProgramStateRef >
static assumeZero(CheckerContext &C,
ProgramStateRef state, SVal V, QualType Ty);
static ProgramStateRef setCStringLength(ProgramStateRef state,
const MemRegion *MR,
SVal strLength);
static SVal getCStringLengthForRegion(CheckerContext &C,
ProgramStateRef &state,
const Expr *Ex,
const MemRegion *MR,
bool hypothetical);
SVal getCStringLength(CheckerContext &C,
ProgramStateRef &state,
const Expr *Ex,
SVal Buf,
bool hypothetical = false) const;
const StringLiteral *getCStringLiteral(CheckerContext &C,
ProgramStateRef &state,
const Expr *expr,
SVal val) const;
static ProgramStateRef InvalidateBuffer(CheckerContext &C,
ProgramStateRef state,
const Expr *Ex, SVal V,
bool IsSourceBuffer);
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
// Re-usable checks
ProgramStateRef checkNonNull(CheckerContext &C,
ProgramStateRef state,
const Expr *S,
SVal l) const;
ProgramStateRef CheckLocation(CheckerContext &C,
ProgramStateRef state,
const Expr *S,
SVal l,
const char *message = nullptr) const;
ProgramStateRef CheckBufferAccess(CheckerContext &C,
ProgramStateRef state,
const Expr *Size,
const Expr *FirstBuf,
const Expr *SecondBuf,
const char *firstMessage = nullptr,
const char *secondMessage = nullptr,
bool WarnAboutSize = false) const;
ProgramStateRef CheckBufferAccess(CheckerContext &C,
ProgramStateRef state,
const Expr *Size,
const Expr *Buf,
const char *message = nullptr,
bool WarnAboutSize = false) const {
// This is a convenience override.
return CheckBufferAccess(C, state, Size, Buf, nullptr, message, nullptr,
WarnAboutSize);
}
ProgramStateRef CheckOverlap(CheckerContext &C,
ProgramStateRef state,
const Expr *Size,
const Expr *First,
const Expr *Second) const;
void emitOverlapBug(CheckerContext &C,
ProgramStateRef state,
const Stmt *First,
const Stmt *Second) const;
ProgramStateRef checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
NonLoc right) const;
};
} //end anonymous namespace
REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal)
//===----------------------------------------------------------------------===//
// Individual checks and utility methods.
//===----------------------------------------------------------------------===//
std::pair<ProgramStateRef , ProgramStateRef >
CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
QualType Ty) {
Optional<DefinedSVal> val = V.getAs<DefinedSVal>();
if (!val)
return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
return state->assume(svalBuilder.evalEQ(state, *val, zero));
}
ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
ProgramStateRef state,
const Expr *S, SVal l) const {
// If a previous check has failed, propagate the failure.
if (!state)
return nullptr;
ProgramStateRef stateNull, stateNonNull;
std::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
if (stateNull && !stateNonNull) {
if (!Filter.CheckCStringNullArg)
return nullptr;
ExplodedNode *N = C.generateSink(stateNull);
if (!N)
return nullptr;
if (!BT_Null)
BT_Null.reset(new BuiltinBug(
Filter.CheckNameCStringNullArg, categories::UnixAPI,
"Null pointer argument in call to byte string function"));
SmallString<80> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
os << "Null pointer argument in call to " << CurrentFunctionDescription;
// Generate a report for this bug.
BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null.get());
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addRange(S->getSourceRange());
bugreporter::trackNullOrUndefValue(N, S, *report);
C.emitReport(std::move(report));
return nullptr;
}
// From here on, assume that the value is non-null.
assert(stateNonNull);
return stateNonNull;
}
// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
ProgramStateRef state,
const Expr *S, SVal l,
const char *warningMsg) const {
// If a previous check has failed, propagate the failure.
if (!state)
return nullptr;
// Check for out of bound array element access.
const MemRegion *R = l.getAsRegion();
if (!R)
return state;
const ElementRegion *ER = dyn_cast<ElementRegion>(R);
if (!ER)
return state;
assert(ER->getValueType() == C.getASTContext().CharTy &&
"CheckLocation should only be called with char* ElementRegions");
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
SValBuilder &svalBuilder = C.getSValBuilder();
SVal Extent =
svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
DefinedOrUnknownSVal Size = Extent.castAs<DefinedOrUnknownSVal>();
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateSink(StOutBound);
if (!N)
return nullptr;
if (!BT_Bounds) {
BT_Bounds.reset(new BuiltinBug(
Filter.CheckNameCStringOutOfBounds, "Out-of-bound array access",
"Byte string function accesses out-of-bound array element"));
}
BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds.get());
// Generate a report for this bug.
std::unique_ptr<BugReport> report;
if (warningMsg) {
report = llvm::make_unique<BugReport>(*BT, warningMsg, N);
} else {
assert(CurrentFunctionDescription);
assert(CurrentFunctionDescription[0] != '\0');
SmallString<80> buf;
llvm::raw_svector_ostream os(buf);
os << toUppercase(CurrentFunctionDescription[0])
<< &CurrentFunctionDescription[1]
<< " accesses out-of-bound array element";
report = llvm::make_unique<BugReport>(*BT, os.str(), N);
}
// FIXME: It would be nice to eventually make this diagnostic more clear,
// e.g., by referencing the original declaration or by saying *why* this
// reference is outside the range.
report->addRange(S->getSourceRange());
C.emitReport(std::move(report));
return nullptr;
}
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
return StInBound;
}
ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
ProgramStateRef state,
const Expr *Size,
const Expr *FirstBuf,
const Expr *SecondBuf,
const char *firstMessage,
const char *secondMessage,
bool WarnAboutSize) const {
// If a previous check has failed, propagate the failure.
if (!state)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
ASTContext &Ctx = svalBuilder.getContext();
const LocationContext *LCtx = C.getLocationContext();
QualType sizeTy = Size->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
// Check that the first buffer is non-null.
SVal BufVal = state->getSVal(FirstBuf, LCtx);
state = checkNonNull(C, state, FirstBuf, BufVal);
if (!state)
return nullptr;
// If out-of-bounds checking is turned off, skip the rest.
if (!Filter.CheckCStringOutOfBounds)
return state;
// Get the access length and make sure it is known.
// FIXME: This assumes the caller has already checked that the access length
// is positive. And that it's unsigned.
SVal LengthVal = state->getSVal(Size, LCtx);
Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return state;
// Compute the offset of the last element to be accessed: size-1.
NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
NonLoc LastOffset = svalBuilder
.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy).castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
const Expr *warningExpr = (WarnAboutSize ? Size : FirstBuf);
SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
LastOffset, PtrTy);
state = CheckLocation(C, state, warningExpr, BufEnd, firstMessage);
// If the buffer isn't large enough, abort.
if (!state)
return nullptr;
}
// If there's a second buffer, check it as well.
if (SecondBuf) {
BufVal = state->getSVal(SecondBuf, LCtx);
state = checkNonNull(C, state, SecondBuf, BufVal);
if (!state)
return nullptr;
BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
const Expr *warningExpr = (WarnAboutSize ? Size : SecondBuf);
SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
LastOffset, PtrTy);
state = CheckLocation(C, state, warningExpr, BufEnd, secondMessage);
}
}
// Large enough or not, return this state!
return state;
}
ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
ProgramStateRef state,
const Expr *Size,
const Expr *First,
const Expr *Second) const {
if (!Filter.CheckCStringBufferOverlap)
return state;
// Do a simple check for overlap: if the two arguments are from the same
// buffer, see if the end of the first is greater than the start of the second
// or vice versa.
// If a previous check has failed, propagate the failure.
if (!state)
return nullptr;
ProgramStateRef stateTrue, stateFalse;
// Get the buffer values and make sure they're known locations.
const LocationContext *LCtx = C.getLocationContext();
SVal firstVal = state->getSVal(First, LCtx);
SVal secondVal = state->getSVal(Second, LCtx);
Optional<Loc> firstLoc = firstVal.getAs<Loc>();
if (!firstLoc)
return state;
Optional<Loc> secondLoc = secondVal.getAs<Loc>();
if (!secondLoc)
return state;
// Are the two values the same?
SValBuilder &svalBuilder = C.getSValBuilder();
std::tie(stateTrue, stateFalse) =
state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
if (stateTrue && !stateFalse) {
// If the values are known to be equal, that's automatically an overlap.
emitOverlapBug(C, stateTrue, First, Second);
return nullptr;
}
// assume the two expressions are not equal.
assert(stateFalse);
state = stateFalse;
// Which value comes first?
QualType cmpTy = svalBuilder.getConditionType();
SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
*firstLoc, *secondLoc, cmpTy);
Optional<DefinedOrUnknownSVal> reverseTest =
reverse.getAs<DefinedOrUnknownSVal>();
if (!reverseTest)
return state;
std::tie(stateTrue, stateFalse) = state->assume(*reverseTest);
if (stateTrue) {
if (stateFalse) {
// If we don't know which one comes first, we can't perform this test.
return state;
} else {
// Switch the values so that firstVal is before secondVal.
std::swap(firstLoc, secondLoc);
// Switch the Exprs as well, so that they still correspond.
std::swap(First, Second);
}
}
// Get the length, and make sure it too is known.
SVal LengthVal = state->getSVal(Size, LCtx);
Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return state;
// Convert the first buffer's start address to char*.
// Bail out if the cast fails.
ASTContext &Ctx = svalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
First->getType());
Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
if (!FirstStartLoc)
return state;
// Compute the end of the first buffer. Bail out if THAT fails.
SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add,
*FirstStartLoc, *Length, CharPtrTy);
Optional<Loc> FirstEndLoc = FirstEnd.getAs<Loc>();
if (!FirstEndLoc)
return state;
// Is the end of the first buffer past the start of the second buffer?
SVal Overlap = svalBuilder.evalBinOpLL(state, BO_GT,
*FirstEndLoc, *secondLoc, cmpTy);
Optional<DefinedOrUnknownSVal> OverlapTest =
Overlap.getAs<DefinedOrUnknownSVal>();
if (!OverlapTest)
return state;
std::tie(stateTrue, stateFalse) = state->assume(*OverlapTest);
if (stateTrue && !stateFalse) {
// Overlap!
emitOverlapBug(C, stateTrue, First, Second);
return nullptr;
}
// assume the two expressions don't overlap.
assert(stateFalse);
return stateFalse;
}
void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
const Stmt *First, const Stmt *Second) const {
ExplodedNode *N = C.generateSink(state);
if (!N)
return;
if (!BT_Overlap)
BT_Overlap.reset(new BugType(Filter.CheckNameCStringBufferOverlap,
categories::UnixAPI, "Improper arguments"));
// Generate a report for this bug.
auto report = llvm::make_unique<BugReport>(
*BT_Overlap, "Arguments must not be overlapping buffers", N);
report->addRange(First->getSourceRange());
report->addRange(Second->getSourceRange());
C.emitReport(std::move(report));
}
ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
NonLoc right) const {
// If out-of-bounds checking is turned off, skip the rest.
if (!Filter.CheckCStringOutOfBounds)
return state;
// If a previous check has failed, propagate the failure.
if (!state)
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
QualType sizeTy = svalBuilder.getContext().getSizeType();
const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
SVal maxMinusRight;
if (right.getAs<nonloc::ConcreteInt>()) {
maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
sizeTy);
} else {
// Try switching the operands. (The order of these two assignments is
// important!)
maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left,
sizeTy);
left = right;
}
if (Optional<NonLoc> maxMinusRightNL = maxMinusRight.getAs<NonLoc>()) {
QualType cmpTy = svalBuilder.getConditionType();
// If left > max - right, we have an overflow.
SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left,
*maxMinusRightNL, cmpTy);
ProgramStateRef stateOverflow, stateOkay;
std::tie(stateOverflow, stateOkay) =
state->assume(willOverflow.castAs<DefinedOrUnknownSVal>());
if (stateOverflow && !stateOkay) {
// We have an overflow. Emit a bug report.
ExplodedNode *N = C.generateSink(stateOverflow);
if (!N)
return nullptr;
if (!BT_AdditionOverflow)
BT_AdditionOverflow.reset(
new BuiltinBug(Filter.CheckNameCStringOutOfBounds, "API",
"Sum of expressions causes overflow"));
// This isn't a great error message, but this should never occur in real
// code anyway -- you'd have to create a buffer longer than a size_t can
// represent, which is sort of a contradiction.
const char *warning =
"This expression will create a string whose length is too big to "
"be represented as a size_t";
// Generate a report for this bug.
C.emitReport(
llvm::make_unique<BugReport>(*BT_AdditionOverflow, warning, N));
return nullptr;
}
// From now on, assume an overflow didn't occur.
assert(stateOkay);
state = stateOkay;
}
return state;
}
ProgramStateRef CStringChecker::setCStringLength(ProgramStateRef state,
const MemRegion *MR,
SVal strLength) {
assert(!strLength.isUndef() && "Attempt to set an undefined string length");
MR = MR->StripCasts();
switch (MR->getKind()) {
case MemRegion::StringRegionKind:
// FIXME: This can happen if we strcpy() into a string region. This is
// undefined [C99 6.4.5p6], but we should still warn about it.
return state;
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
case MemRegion::VarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
// These are the types we can currently track string lengths for.
break;
case MemRegion::ElementRegionKind:
// FIXME: Handle element regions by upper-bounding the parent region's
// string length.
return state;
default:
// Other regions (mostly non-data) can't have a reliable C string length.
// For now, just ignore the change.
// FIXME: These are rare but not impossible. We should output some kind of
// warning for things like strcpy((char[]){'a', 0}, "b");
return state;
}
if (strLength.isUnknown())
return state->remove<CStringLength>(MR);
return state->set<CStringLength>(MR, strLength);
}
SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
ProgramStateRef &state,
const Expr *Ex,
const MemRegion *MR,
bool hypothetical) {
if (!hypothetical) {
// If there's a recorded length, go ahead and return it.
const SVal *Recorded = state->get<CStringLength>(MR);
if (Recorded)
return *Recorded;
}
// Otherwise, get a new symbol and update the state.
SValBuilder &svalBuilder = C.getSValBuilder();
QualType sizeTy = svalBuilder.getContext().getSizeType();
SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(),
MR, Ex, sizeTy,
C.blockCount());
if (!hypothetical) {
if (Optional<NonLoc> strLn = strLength.getAs<NonLoc>()) {
// In case of unbounded calls strlen etc bound the range to SIZE_MAX/4
BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
llvm::APSInt fourInt = APSIntType(maxValInt).getValue(4);
const llvm::APSInt *maxLengthInt = BVF.evalAPSInt(BO_Div, maxValInt,
fourInt);
NonLoc maxLength = svalBuilder.makeIntVal(*maxLengthInt);
SVal evalLength = svalBuilder.evalBinOpNN(state, BO_LE, *strLn,
maxLength, sizeTy);
state = state->assume(evalLength.castAs<DefinedOrUnknownSVal>(), true);
}
state = state->set<CStringLength>(MR, strLength);
}
return strLength;
}
SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
const Expr *Ex, SVal Buf,
bool hypothetical) const {
const MemRegion *MR = Buf.getAsRegion();
if (!MR) {
// If we can't get a region, see if it's something we /know/ isn't a
// C string. In the context of locations, the only time we can issue such
// a warning is for labels.
if (Optional<loc::GotoLabel> Label = Buf.getAs<loc::GotoLabel>()) {
if (!Filter.CheckCStringNotNullTerm)
return UndefinedVal();
if (ExplodedNode *N = C.addTransition(state)) {
if (!BT_NotCString)
BT_NotCString.reset(new BuiltinBug(
Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
"Argument is not a null-terminated string."));
SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
os << "Argument to " << CurrentFunctionDescription
<< " is the address of the label '" << Label->getLabel()->getName()
<< "', which is not a null-terminated string";
// Generate a report for this bug.
auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
report->addRange(Ex->getSourceRange());
C.emitReport(std::move(report));
}
return UndefinedVal();
}
// If it's not a region and not a label, give up.
return UnknownVal();
}
// If we have a region, strip casts from it and see if we can figure out
// its length. For anything we can't figure out, just return UnknownVal.
MR = MR->StripCasts();
switch (MR->getKind()) {
case MemRegion::StringRegionKind: {
// Modifying the contents of string regions is undefined [C99 6.4.5p6],
// so we can assume that the byte length is the correct C string length.
SValBuilder &svalBuilder = C.getSValBuilder();
QualType sizeTy = svalBuilder.getContext().getSizeType();
const StringLiteral *strLit = cast<StringRegion>(MR)->getStringLiteral();
return svalBuilder.makeIntVal(strLit->getByteLength(), sizeTy);
}
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
case MemRegion::VarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
return getCStringLengthForRegion(C, state, Ex, MR, hypothetical);
case MemRegion::CompoundLiteralRegionKind:
// FIXME: Can we track this? Is it necessary?
return UnknownVal();
case MemRegion::ElementRegionKind:
// FIXME: How can we handle this? It's not good enough to subtract the
// offset from the base string length; consider "123\x00567" and &a[5].
return UnknownVal();
default:
// Other regions (mostly non-data) can't have a reliable C string length.
// In this case, an error is emitted and UndefinedVal is returned.
// The caller should always be prepared to handle this case.
if (!Filter.CheckCStringNotNullTerm)
return UndefinedVal();
if (ExplodedNode *N = C.addTransition(state)) {
if (!BT_NotCString)
BT_NotCString.reset(new BuiltinBug(
Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
"Argument is not a null-terminated string."));
SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
os << "Argument to " << CurrentFunctionDescription << " is ";
if (SummarizeRegion(os, C.getASTContext(), MR))
os << ", which is not a null-terminated string";
else
os << "not a null-terminated string";
// Generate a report for this bug.
auto report = llvm::make_unique<BugReport>(*BT_NotCString, os.str(), N);
report->addRange(Ex->getSourceRange());
C.emitReport(std::move(report));
}
return UndefinedVal();
}
}
const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
ProgramStateRef &state, const Expr *expr, SVal val) const {
// Get the memory region pointed to by the val.
const MemRegion *bufRegion = val.getAsRegion();
if (!bufRegion)
return nullptr;
// Strip casts off the memory region.
bufRegion = bufRegion->StripCasts();
// Cast the memory region to a string region.
const StringRegion *strRegion= dyn_cast<StringRegion>(bufRegion);
if (!strRegion)
return nullptr;
// Return the actual string in the string region.
return strRegion->getStringLiteral();
}
ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
ProgramStateRef state,
const Expr *E, SVal V,
bool IsSourceBuffer) {
Optional<Loc> L = V.getAs<Loc>();
if (!L)
return state;
// FIXME: This is a simplified version of what's in CFRefCount.cpp -- it makes
// some assumptions about the value that CFRefCount can't. Even so, it should
// probably be refactored.
if (Optional<loc::MemRegionVal> MR = L->getAs<loc::MemRegionVal>()) {
const MemRegion *R = MR->getRegion()->StripCasts();
// Are we dealing with an ElementRegion? If so, we should be invalidating
// the super-region.
if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
R = ER->getSuperRegion();
// FIXME: What about layers of ElementRegions?
}
// Invalidate this region.
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
bool CausesPointerEscape = false;
RegionAndSymbolInvalidationTraits ITraits;
// Invalidate and escape only indirect regions accessible through the source
// buffer.
if (IsSourceBuffer) {
ITraits.setTrait(R,
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
ITraits.setTrait(R, RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
CausesPointerEscape = true;
}
return state->invalidateRegions(R, E, C.blockCount(), LCtx,
CausesPointerEscape, nullptr, nullptr,
&ITraits);
}
// If we have a non-region value by chance, just remove the binding.
// FIXME: is this necessary or correct? This handles the non-Region
// cases. Is it ever valid to store to these?
return state->killBinding(*L);
}
bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR) {
const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR);
switch (MR->getKind()) {
case MemRegion::FunctionTextRegionKind: {
const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
if (FD)
os << "the address of the function '" << *FD << '\'';
else
os << "the address of a function";
return true;
}
case MemRegion::BlockTextRegionKind:
os << "block text";
return true;
case MemRegion::BlockDataRegionKind:
os << "a block";
return true;
case MemRegion::CXXThisRegionKind:
case MemRegion::CXXTempObjectRegionKind:
os << "a C++ temp object of type " << TVR->getValueType().getAsString();
return true;
case MemRegion::VarRegionKind:
os << "a variable of type" << TVR->getValueType().getAsString();
return true;
case MemRegion::FieldRegionKind:
os << "a field of type " << TVR->getValueType().getAsString();
return true;
case MemRegion::ObjCIvarRegionKind:
os << "an instance variable of type " << TVR->getValueType().getAsString();
return true;
default:
return false;
}
}
//===----------------------------------------------------------------------===//
// evaluation of individual function calls.
//===----------------------------------------------------------------------===//
void CStringChecker::evalCopyCommon(CheckerContext &C,
const CallExpr *CE,
ProgramStateRef state,
const Expr *Size, const Expr *Dest,
const Expr *Source, bool Restricted,
bool IsMempcpy) const {
CurrentFunctionDescription = "memory copy function";
// See if the size argument is zero.
const LocationContext *LCtx = C.getLocationContext();
SVal sizeVal = state->getSVal(Size, LCtx);
QualType sizeTy = Size->getType();
ProgramStateRef stateZeroSize, stateNonZeroSize;
std::tie(stateZeroSize, stateNonZeroSize) =
assumeZero(C, state, sizeVal, sizeTy);
// Get the value of the Dest.
SVal destVal = state->getSVal(Dest, LCtx);
// If the size is zero, there won't be any actual memory access, so
// just bind the return value to the destination buffer and return.
if (stateZeroSize && !stateNonZeroSize) {
stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, destVal);
C.addTransition(stateZeroSize);
return;
}
// If the size can be nonzero, we have to check the other arguments.
if (stateNonZeroSize) {
state = stateNonZeroSize;
// Ensure the destination is not null. If it is NULL there will be a
// NULL pointer dereference.
state = checkNonNull(C, state, Dest, destVal);
if (!state)
return;
// Get the value of the Src.
SVal srcVal = state->getSVal(Source, LCtx);
// Ensure the source is not null. If it is NULL there will be a
// NULL pointer dereference.
state = checkNonNull(C, state, Source, srcVal);
if (!state)
return;
// Ensure the accesses are valid and that the buffers do not overlap.
const char * const writeWarning =
"Memory copy function overflows destination buffer";
state = CheckBufferAccess(C, state, Size, Dest, Source,
writeWarning, /* sourceWarning = */ nullptr);
if (Restricted)
state = CheckOverlap(C, state, Size, Dest, Source);
if (!state)
return;
// If this is mempcpy, get the byte after the last byte copied and
// bind the expr.
if (IsMempcpy) {
loc::MemRegionVal destRegVal = destVal.castAs<loc::MemRegionVal>();
// Get the length to copy.
if (Optional<NonLoc> lenValNonLoc = sizeVal.getAs<NonLoc>()) {
// Get the byte after the last byte copied.
SValBuilder &SvalBuilder = C.getSValBuilder();
ASTContext &Ctx = SvalBuilder.getContext();
QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
loc::MemRegionVal DestRegCharVal = SvalBuilder.evalCast(destRegVal,
CharPtrTy, Dest->getType()).castAs<loc::MemRegionVal>();
SVal lastElement = C.getSValBuilder().evalBinOpLN(state, BO_Add,
DestRegCharVal,
*lenValNonLoc,
Dest->getType());
// The byte after the last byte copied is the return value.
state = state->BindExpr(CE, LCtx, lastElement);
} else {
// If we don't know how much we copied, we can at least
// conjure a return value for later.
SVal result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount());
state = state->BindExpr(CE, LCtx, result);
}
} else {
// All other copies return the destination buffer.
// (Well, bcopy() has a void return type, but this won't hurt.)
state = state->BindExpr(CE, LCtx, destVal);
}
// Invalidate the destination (regular invalidation without pointer-escaping
// the address of the top-level region).
// FIXME: Even if we can't perfectly model the copy, we should see if we
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
state = InvalidateBuffer(C, state, Dest, C.getSVal(Dest),
/*IsSourceBuffer*/false);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
state = InvalidateBuffer(C, state, Source, C.getSVal(Source),
/*IsSourceBuffer*/true);
C.addTransition(state);
}
}
void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
ProgramStateRef state = C.getState();
evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true);
}
void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
// void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is a pointer to the byte following the last written byte.
const Expr *Dest = CE->getArg(0);
ProgramStateRef state = C.getState();
evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
}
void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
const Expr *Dest = CE->getArg(0);
ProgramStateRef state = C.getState();
evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1));
}
void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
// void bcopy(const void *src, void *dst, size_t n);
evalCopyCommon(C, CE, C.getState(),
CE->getArg(2), CE->getArg(1), CE->getArg(0));
}
void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
// int memcmp(const void *s1, const void *s2, size_t n);
CurrentFunctionDescription = "memory comparison function";
const Expr *Left = CE->getArg(0);
const Expr *Right = CE->getArg(1);
const Expr *Size = CE->getArg(2);
ProgramStateRef state = C.getState();
SValBuilder &svalBuilder = C.getSValBuilder();
// See if the size argument is zero.
const LocationContext *LCtx = C.getLocationContext();
SVal sizeVal = state->getSVal(Size, LCtx);
QualType sizeTy = Size->getType();
ProgramStateRef stateZeroSize, stateNonZeroSize;
std::tie(stateZeroSize, stateNonZeroSize) =
assumeZero(C, state, sizeVal, sizeTy);
// If the size can be zero, the result will be 0 in that case, and we don't
// have to check either of the buffers.
if (stateZeroSize) {
state = stateZeroSize;
state = state->BindExpr(CE, LCtx,
svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(state);
}
// If the size can be nonzero, we have to check the other arguments.
if (stateNonZeroSize) {
state = stateNonZeroSize;
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
DefinedOrUnknownSVal LV =
state->getSVal(Left, LCtx).castAs<DefinedOrUnknownSVal>();
DefinedOrUnknownSVal RV =
state->getSVal(Right, LCtx).castAs<DefinedOrUnknownSVal>();
// See if they are the same.
DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
ProgramStateRef StSameBuf, StNotSameBuf;
std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
// If the two arguments might be the same buffer, we know the result is 0,
// and we only need to check one size.
if (StSameBuf) {
state = StSameBuf;
state = CheckBufferAccess(C, state, Size, Left);
if (state) {
state = StSameBuf->BindExpr(CE, LCtx,
svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(state);
}
}
// If the two arguments might be different buffers, we have to check the
// size of both of them.
if (StNotSameBuf) {
state = StNotSameBuf;
state = CheckBufferAccess(C, state, Size, Left, Right);
if (state) {
// The return value is the comparison result, which we don't know.
SVal CmpV = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount());
state = state->BindExpr(CE, LCtx, CmpV);
C.addTransition(state);
}
}
}
}
void CStringChecker::evalstrLength(CheckerContext &C,
const CallExpr *CE) const {
if (CE->getNumArgs() < 1)
return;
// size_t strlen(const char *s);
evalstrLengthCommon(C, CE, /* IsStrnlen = */ false);
}
void CStringChecker::evalstrnLength(CheckerContext &C,
const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
// size_t strnlen(const char *s, size_t maxlen);
evalstrLengthCommon(C, CE, /* IsStrnlen = */ true);
}
void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
bool IsStrnlen) const {
CurrentFunctionDescription = "string length function";
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
if (IsStrnlen) {
const Expr *maxlenExpr = CE->getArg(1);
SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
ProgramStateRef stateZeroSize, stateNonZeroSize;
std::tie(stateZeroSize, stateNonZeroSize) =
assumeZero(C, state, maxlenVal, maxlenExpr->getType());
// If the size can be zero, the result will be 0 in that case, and we don't
// have to check the string itself.
if (stateZeroSize) {
SVal zero = C.getSValBuilder().makeZeroVal(CE->getType());
stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, zero);
C.addTransition(stateZeroSize);
}
// If the size is GUARANTEED to be zero, we're done!
if (!stateNonZeroSize)
return;
// Otherwise, record the assumption that the size is nonzero.
state = stateNonZeroSize;
}
// Check that the string argument is non-null.
const Expr *Arg = CE->getArg(0);
SVal ArgVal = state->getSVal(Arg, LCtx);
state = checkNonNull(C, state, Arg, ArgVal);
if (!state)
return;
SVal strLength = getCStringLength(C, state, Arg, ArgVal);
// If the argument isn't a valid C string, there's no valid state to
// transition to.
if (strLength.isUndef())
return;
DefinedOrUnknownSVal result = UnknownVal();
// If the check is for strnlen() then bind the return value to no more than
// the maxlen value.
if (IsStrnlen) {
QualType cmpTy = C.getSValBuilder().getConditionType();
// It's a little unfortunate to be getting this again,
// but it's not that expensive...
const Expr *maxlenExpr = CE->getArg(1);
SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
Optional<NonLoc> maxlenValNL = maxlenVal.getAs<NonLoc>();
if (strLengthNL && maxlenValNL) {
ProgramStateRef stateStringTooLong, stateStringNotTooLong;
// Check if the strLength is greater than the maxlen.
std::tie(stateStringTooLong, stateStringNotTooLong) = state->assume(
C.getSValBuilder()
.evalBinOpNN(state, BO_GT, *strLengthNL, *maxlenValNL, cmpTy)
.castAs<DefinedOrUnknownSVal>());
if (stateStringTooLong && !stateStringNotTooLong) {
// If the string is longer than maxlen, return maxlen.
result = *maxlenValNL;
} else if (stateStringNotTooLong && !stateStringTooLong) {
// If the string is shorter than maxlen, return its length.
result = *strLengthNL;
}
}
if (result.isUnknown()) {
// If we don't have enough information for a comparison, there's
// no guarantee the full string length will actually be returned.
// All we know is the return value is the min of the string length
// and the limit. This is better than nothing.
result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount());
NonLoc resultNL = result.castAs<NonLoc>();
if (strLengthNL) {
state = state->assume(C.getSValBuilder().evalBinOpNN(
state, BO_LE, resultNL, *strLengthNL, cmpTy)
.castAs<DefinedOrUnknownSVal>(), true);
}
if (maxlenValNL) {
state = state->assume(C.getSValBuilder().evalBinOpNN(
state, BO_LE, resultNL, *maxlenValNL, cmpTy)
.castAs<DefinedOrUnknownSVal>(), true);
}
}
} else {
// This is a plain strlen(), not strnlen().
result = strLength.castAs<DefinedOrUnknownSVal>();
// If we don't know the length of the string, conjure a return
// value, so it can be used in constraints, at least.
if (result.isUnknown()) {
result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount());
}
}
// Bind the return value.
assert(!result.isUnknown() && "Should have conjured a value by now");
state = state->BindExpr(CE, LCtx, result);
C.addTransition(state);
}
void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
// char *strcpy(char *restrict dst, const char *restrict src);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
/* isBounded = */ false,
/* isAppending = */ false);
}
void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
// char *strncpy(char *restrict dst, const char *restrict src, size_t n);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
/* isBounded = */ true,
/* isAppending = */ false);
}
void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
// char *stpcpy(char *restrict dst, const char *restrict src);
evalStrcpyCommon(C, CE,
/* returnEnd = */ true,
/* isBounded = */ false,
/* isAppending = */ false);
}
void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
//char *strcat(char *restrict s1, const char *restrict s2);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
/* isBounded = */ false,
/* isAppending = */ true);
}
void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
//char *strncat(char *restrict s1, const char *restrict s2, size_t n);
evalStrcpyCommon(C, CE,
/* returnEnd = */ false,
/* isBounded = */ true,
/* isAppending = */ true);
}
void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
bool returnEnd, bool isBounded,
bool isAppending) const {
CurrentFunctionDescription = "string copy function";
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
// Check that the destination is non-null.
const Expr *Dst = CE->getArg(0);
SVal DstVal = state->getSVal(Dst, LCtx);
state = checkNonNull(C, state, Dst, DstVal);
if (!state)
return;
// Check that the source is non-null.
const Expr *srcExpr = CE->getArg(1);
SVal srcVal = state->getSVal(srcExpr, LCtx);
state = checkNonNull(C, state, srcExpr, srcVal);
if (!state)
return;
// Get the string length of the source.
SVal strLength = getCStringLength(C, state, srcExpr, srcVal);
// If the source isn't a valid C string, give up.
if (strLength.isUndef())
return;
SValBuilder &svalBuilder = C.getSValBuilder();
QualType cmpTy = svalBuilder.getConditionType();
QualType sizeTy = svalBuilder.getContext().getSizeType();
// These two values allow checking two kinds of errors:
// - actual overflows caused by a source that doesn't fit in the destination
// - potential overflows caused by a bound that could exceed the destination
SVal amountCopied = UnknownVal();
SVal maxLastElementIndex = UnknownVal();
const char *boundWarning = nullptr;
// If the function is strncpy, strncat, etc... it is bounded.
if (isBounded) {
// Get the max number of characters to copy.
const Expr *lenExpr = CE->getArg(2);
SVal lenVal = state->getSVal(lenExpr, LCtx);
// Protect against misdeclared strncpy().
lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
Optional<NonLoc> lenValNL = lenVal.getAs<NonLoc>();
// If we know both values, we might be able to figure out how much
// we're copying.
if (strLengthNL && lenValNL) {
ProgramStateRef stateSourceTooLong, stateSourceNotTooLong;
// Check if the max number to copy is less than the length of the src.
// If the bound is equal to the source length, strncpy won't null-
// terminate the result!
std::tie(stateSourceTooLong, stateSourceNotTooLong) = state->assume(
svalBuilder.evalBinOpNN(state, BO_GE, *strLengthNL, *lenValNL, cmpTy)
.castAs<DefinedOrUnknownSVal>());
if (stateSourceTooLong && !stateSourceNotTooLong) {
// Max number to copy is less than the length of the src, so the actual
// strLength copied is the max number arg.
state = stateSourceTooLong;
amountCopied = lenVal;
} else if (!stateSourceTooLong && stateSourceNotTooLong) {
// The source buffer entirely fits in the bound.
state = stateSourceNotTooLong;
amountCopied = strLength;
}
}
// We still want to know if the bound is known to be too large.
if (lenValNL) {
if (isAppending) {
// For strncat, the check is strlen(dst) + lenVal < sizeof(dst)
// Get the string length of the destination. If the destination is
// memory that can't have a string length, we shouldn't be copying
// into it anyway.
SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
if (dstStrLength.isUndef())
return;
if (Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>()) {
maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Add,
*lenValNL,
*dstStrLengthNL,
sizeTy);
boundWarning = "Size argument is greater than the free space in the "
"destination buffer";
}
} else {
// For strncpy, this is just checking that lenVal <= sizeof(dst)
// (Yes, strncpy and strncat differ in how they treat termination.
// strncat ALWAYS terminates, but strncpy doesn't.)
// We need a special case for when the copy size is zero, in which
// case strncpy will do no work at all. Our bounds check uses n-1
// as the last element accessed, so n == 0 is problematic.
ProgramStateRef StateZeroSize, StateNonZeroSize;
std::tie(StateZeroSize, StateNonZeroSize) =
assumeZero(C, state, *lenValNL, sizeTy);
// If the size is known to be zero, we're done.
if (StateZeroSize && !StateNonZeroSize) {
StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, DstVal);
C.addTransition(StateZeroSize);
return;
}
// Otherwise, go ahead and figure out the last element we'll touch.
// We don't record the non-zero assumption here because we can't
// be sure. We won't warn on a possible zero.
NonLoc one = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
one, sizeTy);
boundWarning = "Size argument is greater than the length of the "
"destination buffer";
}
}
// If we couldn't pin down the copy length, at least bound it.
// FIXME: We should actually run this code path for append as well, but
// right now it creates problems with constraints (since we can end up
// trying to pass constraints from symbol to symbol).
if (amountCopied.isUnknown() && !isAppending) {
// Try to get a "hypothetical" string length symbol, which we can later
// set as a real value if that turns out to be the case.
amountCopied = getCStringLength(C, state, lenExpr, srcVal, true);
assert(!amountCopied.isUndef());
if (Optional<NonLoc> amountCopiedNL = amountCopied.getAs<NonLoc>()) {
if (lenValNL) {
// amountCopied <= lenVal
SVal copiedLessThanBound = svalBuilder.evalBinOpNN(state, BO_LE,
*amountCopiedNL,
*lenValNL,
cmpTy);
state = state->assume(
copiedLessThanBound.castAs<DefinedOrUnknownSVal>(), true);
if (!state)
return;
}
if (strLengthNL) {
// amountCopied <= strlen(source)
SVal copiedLessThanSrc = svalBuilder.evalBinOpNN(state, BO_LE,
*amountCopiedNL,
*strLengthNL,
cmpTy);
state = state->assume(
copiedLessThanSrc.castAs<DefinedOrUnknownSVal>(), true);
if (!state)
return;
}
}
}
} else {
// The function isn't bounded. The amount copied should match the length
// of the source buffer.
amountCopied = strLength;
}
assert(state);
// This represents the number of characters copied into the destination
// buffer. (It may not actually be the strlen if the destination buffer
// is not terminated.)
SVal finalStrLength = UnknownVal();
// If this is an appending function (strcat, strncat...) then set the
// string length to strlen(src) + strlen(dst) since the buffer will
// ultimately contain both.
if (isAppending) {
// Get the string length of the destination. If the destination is memory
// that can't have a string length, we shouldn't be copying into it anyway.
SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
if (dstStrLength.isUndef())
return;
Optional<NonLoc> srcStrLengthNL = amountCopied.getAs<NonLoc>();
Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
// If we know both string lengths, we might know the final string length.
if (srcStrLengthNL && dstStrLengthNL) {
// Make sure the two lengths together don't overflow a size_t.
state = checkAdditionOverflow(C, state, *srcStrLengthNL, *dstStrLengthNL);
if (!state)
return;
finalStrLength = svalBuilder.evalBinOpNN(state, BO_Add, *srcStrLengthNL,
*dstStrLengthNL, sizeTy);
}
// If we couldn't get a single value for the final string length,
// we can at least bound it by the individual lengths.
if (finalStrLength.isUnknown()) {
// Try to get a "hypothetical" string length symbol, which we can later
// set as a real value if that turns out to be the case.
finalStrLength = getCStringLength(C, state, CE, DstVal, true);
assert(!finalStrLength.isUndef());
if (Optional<NonLoc> finalStrLengthNL = finalStrLength.getAs<NonLoc>()) {
if (srcStrLengthNL) {
// finalStrLength >= srcStrLength
SVal sourceInResult = svalBuilder.evalBinOpNN(state, BO_GE,
*finalStrLengthNL,
*srcStrLengthNL,
cmpTy);
state = state->assume(sourceInResult.castAs<DefinedOrUnknownSVal>(),
true);
if (!state)
return;
}
if (dstStrLengthNL) {
// finalStrLength >= dstStrLength
SVal destInResult = svalBuilder.evalBinOpNN(state, BO_GE,
*finalStrLengthNL,
*dstStrLengthNL,
cmpTy);
state =
state->assume(destInResult.castAs<DefinedOrUnknownSVal>(), true);
if (!state)
return;
}
}
}
} else {
// Otherwise, this is a copy-over function (strcpy, strncpy, ...), and
// the final string length will match the input string length.
finalStrLength = amountCopied;
}
// The final result of the function will either be a pointer past the last
// copied element, or a pointer to the start of the destination buffer.
SVal Result = (returnEnd ? UnknownVal() : DstVal);
assert(state);
// If the destination is a MemRegion, try to check for a buffer overflow and
// record the new string length.
if (Optional<loc::MemRegionVal> dstRegVal =
DstVal.getAs<loc::MemRegionVal>()) {
QualType ptrTy = Dst->getType();
// If we have an exact value on a bounded copy, use that to check for
// overflows, rather than our estimate about how much is actually copied.
if (boundWarning) {
if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
*maxLastNL, ptrTy);
state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
boundWarning);
if (!state)
return;
}
}
// Then, if the final length is known...
if (Optional<NonLoc> knownStrLength = finalStrLength.getAs<NonLoc>()) {
SVal lastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
*knownStrLength, ptrTy);
// ...and we haven't checked the bound, we'll check the actual copy.
if (!boundWarning) {
const char * const warningMsg =
"String copy function overflows destination buffer";
state = CheckLocation(C, state, Dst, lastElement, warningMsg);
if (!state)
return;
}
// If this is a stpcpy-style copy, the last element is the return value.
if (returnEnd)
Result = lastElement;
}
// Invalidate the destination (regular invalidation without pointer-escaping
// the address of the top-level region). This must happen before we set the
// C string length because invalidation will clear the length.
// FIXME: Even if we can't perfectly model the copy, we should see if we
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
state = InvalidateBuffer(C, state, Dst, *dstRegVal,
/*IsSourceBuffer*/false);
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
state = InvalidateBuffer(C, state, srcExpr, srcVal, /*IsSourceBuffer*/true);
// Set the C string length of the destination, if we know it.
if (isBounded && !isAppending) {
// strncpy is annoying in that it doesn't guarantee to null-terminate
// the result string. If the original string didn't fit entirely inside
// the bound (including the null-terminator), we don't know how long the
// result is.
if (amountCopied != strLength)
finalStrLength = UnknownVal();
}
state = setCStringLength(state, dstRegVal->getRegion(), finalStrLength);
}
assert(state);
// If this is a stpcpy-style copy, but we were unable to check for a buffer
// overflow, we still need a result. Conjure a return value.
if (returnEnd && Result.isUnknown()) {
Result = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
}
// Set the return value.
state = state->BindExpr(CE, LCtx, Result);
C.addTransition(state);
}
void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
//int strcmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ false);
}
void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
//int strncmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
}
void CStringChecker::evalStrcasecmp(CheckerContext &C,
const CallExpr *CE) const {
if (CE->getNumArgs() < 2)
return;
//int strcasecmp(const char *s1, const char *s2);
evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
}
void CStringChecker::evalStrncasecmp(CheckerContext &C,
const CallExpr *CE) const {
if (CE->getNumArgs() < 3)
return;
//int strncasecmp(const char *s1, const char *s2, size_t n);
evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ true);
}
void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
bool isBounded, bool ignoreCase) const {
CurrentFunctionDescription = "string comparison function";
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
// Check that the first string is non-null
const Expr *s1 = CE->getArg(0);
SVal s1Val = state->getSVal(s1, LCtx);
state = checkNonNull(C, state, s1, s1Val);
if (!state)
return;
// Check that the second string is non-null.
const Expr *s2 = CE->getArg(1);
SVal s2Val = state->getSVal(s2, LCtx);
state = checkNonNull(C, state, s2, s2Val);
if (!state)
return;
// Get the string length of the first string or give up.
SVal s1Length = getCStringLength(C, state, s1, s1Val);
if (s1Length.isUndef())
return;
// Get the string length of the second string or give up.
SVal s2Length = getCStringLength(C, state, s2, s2Val);
if (s2Length.isUndef())
return;
// If we know the two buffers are the same, we know the result is 0.
// First, get the two buffers' addresses. Another checker will have already
// made sure they're not undefined.
DefinedOrUnknownSVal LV = s1Val.castAs<DefinedOrUnknownSVal>();
DefinedOrUnknownSVal RV = s2Val.castAs<DefinedOrUnknownSVal>();
// See if they are the same.
SValBuilder &svalBuilder = C.getSValBuilder();
DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
ProgramStateRef StSameBuf, StNotSameBuf;
std::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
// If the two arguments might be the same buffer, we know the result is 0,
// and we only need to check one size.
if (StSameBuf) {
StSameBuf = StSameBuf->BindExpr(CE, LCtx,
svalBuilder.makeZeroVal(CE->getType()));
C.addTransition(StSameBuf);
// If the two arguments are GUARANTEED to be the same, we're done!
if (!StNotSameBuf)
return;
}
assert(StNotSameBuf);
state = StNotSameBuf;
// At this point we can go about comparing the two buffers.
// For now, we only do this if they're both known string literals.
// Attempt to extract string literals from both expressions.
const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
bool canComputeResult = false;
if (s1StrLiteral && s2StrLiteral) {
StringRef s1StrRef = s1StrLiteral->getString();
StringRef s2StrRef = s2StrLiteral->getString();
if (isBounded) {
// Get the max number of characters to compare.
const Expr *lenExpr = CE->getArg(2);
SVal lenVal = state->getSVal(lenExpr, LCtx);
// If the length is known, we can get the right substrings.
if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
// Create substrings of each to compare the prefix.
s1StrRef = s1StrRef.substr(0, (size_t)len->getZExtValue());
s2StrRef = s2StrRef.substr(0, (size_t)len->getZExtValue());
canComputeResult = true;
}
} else {
// This is a normal, unbounded strcmp.
canComputeResult = true;
}
if (canComputeResult) {
// Real strcmp stops at null characters.
size_t s1Term = s1StrRef.find('\0');
if (s1Term != StringRef::npos)
s1StrRef = s1StrRef.substr(0, s1Term);
size_t s2Term = s2StrRef.find('\0');
if (s2Term != StringRef::npos)
s2StrRef = s2StrRef.substr(0, s2Term);
// Use StringRef's comparison methods to compute the actual result.
int result;
if (ignoreCase) {
// Compare string 1 to string 2 the same way strcasecmp() does.
result = s1StrRef.compare_lower(s2StrRef);
} else {
// Compare string 1 to string 2 the same way strcmp() does.
result = s1StrRef.compare(s2StrRef);
}
// Build the SVal of the comparison and bind the return value.
SVal resultVal = svalBuilder.makeIntVal(result, CE->getType());
state = state->BindExpr(CE, LCtx, resultVal);
}
}
if (!canComputeResult) {
// Conjure a symbolic value. It's the best we can do.
SVal resultVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount());
state = state->BindExpr(CE, LCtx, resultVal);
}
// Record this as a possible path.
C.addTransition(state);
}
void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
//char *strsep(char **stringp, const char *delim);
if (CE->getNumArgs() < 2)
return;
// Sanity: does the search string parameter match the return type?
const Expr *SearchStrPtr = CE->getArg(0);
QualType CharPtrTy = SearchStrPtr->getType()->getPointeeType();
if (CharPtrTy.isNull() ||
CE->getType().getUnqualifiedType() != CharPtrTy.getUnqualifiedType())
return;
CurrentFunctionDescription = "strsep()";
ProgramStateRef State = C.getState();
const LocationContext *LCtx = C.getLocationContext();
// Check that the search string pointer is non-null (though it may point to
// a null string).
SVal SearchStrVal = State->getSVal(SearchStrPtr, LCtx);
State = checkNonNull(C, State, SearchStrPtr, SearchStrVal);
if (!State)
return;
// Check that the delimiter string is non-null.
const Expr *DelimStr = CE->getArg(1);
SVal DelimStrVal = State->getSVal(DelimStr, LCtx);
State = checkNonNull(C, State, DelimStr, DelimStrVal);
if (!State)
return;
SValBuilder &SVB = C.getSValBuilder();
SVal Result;
if (Optional<Loc> SearchStrLoc = SearchStrVal.getAs<Loc>()) {
// Get the current value of the search string pointer, as a char*.
Result = State->getSVal(*SearchStrLoc, CharPtrTy);
// Invalidate the search string, representing the change of one delimiter
// character to NUL.
State = InvalidateBuffer(C, State, SearchStrPtr, Result,
/*IsSourceBuffer*/false);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
State = State->bindLoc(*SearchStrLoc,
SVB.conjureSymbolVal(getTag(), CE, LCtx, CharPtrTy,
C.blockCount()));
} else {
assert(SearchStrVal.isUnknown());
// Conjure a symbolic value. It's the best we can do.
Result = SVB.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
}
// Set the return value, and finish.
State = State->BindExpr(CE, LCtx, Result);
C.addTransition(State);
}
//===----------------------------------------------------------------------===//
// The driver method, and other Checker callbacks.
//===----------------------------------------------------------------------===//
bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
if (!FDecl)
return false;
// FIXME: Poorly-factored string switches are slow.
FnCheck evalFunction = nullptr;
if (C.isCLibraryFunction(FDecl, "memcpy"))
evalFunction = &CStringChecker::evalMemcpy;
else if (C.isCLibraryFunction(FDecl, "mempcpy"))
evalFunction = &CStringChecker::evalMempcpy;
else if (C.isCLibraryFunction(FDecl, "memcmp"))
evalFunction = &CStringChecker::evalMemcmp;
else if (C.isCLibraryFunction(FDecl, "memmove"))
evalFunction = &CStringChecker::evalMemmove;
else if (C.isCLibraryFunction(FDecl, "strcpy"))
evalFunction = &CStringChecker::evalStrcpy;
else if (C.isCLibraryFunction(FDecl, "strncpy"))
evalFunction = &CStringChecker::evalStrncpy;
else if (C.isCLibraryFunction(FDecl, "stpcpy"))
evalFunction = &CStringChecker::evalStpcpy;
else if (C.isCLibraryFunction(FDecl, "strcat"))
evalFunction = &CStringChecker::evalStrcat;
else if (C.isCLibraryFunction(FDecl, "strncat"))
evalFunction = &CStringChecker::evalStrncat;
else if (C.isCLibraryFunction(FDecl, "strlen"))
evalFunction = &CStringChecker::evalstrLength;
else if (C.isCLibraryFunction(FDecl, "strnlen"))
evalFunction = &CStringChecker::evalstrnLength;
else if (C.isCLibraryFunction(FDecl, "strcmp"))
evalFunction = &CStringChecker::evalStrcmp;
else if (C.isCLibraryFunction(FDecl, "strncmp"))
evalFunction = &CStringChecker::evalStrncmp;
else if (C.isCLibraryFunction(FDecl, "strcasecmp"))
evalFunction = &CStringChecker::evalStrcasecmp;
else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
evalFunction = &CStringChecker::evalStrncasecmp;
else if (C.isCLibraryFunction(FDecl, "strsep"))
evalFunction = &CStringChecker::evalStrsep;
else if (C.isCLibraryFunction(FDecl, "bcopy"))
evalFunction = &CStringChecker::evalBcopy;
else if (C.isCLibraryFunction(FDecl, "bcmp"))
evalFunction = &CStringChecker::evalMemcmp;
// If the callee isn't a string function, let another checker handle it.
if (!evalFunction)
return false;
// Check and evaluate the call.
(this->*evalFunction)(C, CE);
// If the evaluate call resulted in no change, chain to the next eval call
// handler.
// Note, the custom CString evaluation calls assume that basic safety
// properties are held. However, if the user chooses to turn off some of these
// checks, we ignore the issues and leave the call evaluation to a generic
// handler.
if (!C.isDifferent())
return false;
return true;
}
void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
// Record string length for char a[] = "abc";
ProgramStateRef state = C.getState();
for (const auto *I : DS->decls()) {
const VarDecl *D = dyn_cast<VarDecl>(I);
if (!D)
continue;
// FIXME: Handle array fields of structs.
if (!D->getType()->isArrayType())
continue;
const Expr *Init = D->getInit();
if (!Init)
continue;
if (!isa<StringLiteral>(Init))
continue;
Loc VarLoc = state->getLValue(D, C.getLocationContext());
const MemRegion *MR = VarLoc.getAsRegion();
if (!MR)
continue;
SVal StrVal = state->getSVal(Init, C.getLocationContext());
assert(StrVal.isValid() && "Initializer string is unknown or undefined");
DefinedOrUnknownSVal strLength =
getCStringLength(C, state, Init, StrVal).castAs<DefinedOrUnknownSVal>();
state = state->set<CStringLength>(MR, strLength);
}
C.addTransition(state);
}
bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
CStringLengthTy Entries = state->get<CStringLength>();
return !Entries.isEmpty();
}
ProgramStateRef
CStringChecker::checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const {
CStringLengthTy Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return state;
llvm::SmallPtrSet<const MemRegion *, 8> Invalidated;
llvm::SmallPtrSet<const MemRegion *, 32> SuperRegions;
// First build sets for the changed regions and their super-regions.
for (ArrayRef<const MemRegion *>::iterator
I = Regions.begin(), E = Regions.end(); I != E; ++I) {
const MemRegion *MR = *I;
Invalidated.insert(MR);
SuperRegions.insert(MR);
while (const SubRegion *SR = dyn_cast<SubRegion>(MR)) {
MR = SR->getSuperRegion();
SuperRegions.insert(MR);
}
}
CStringLengthTy::Factory &F = state->get_context<CStringLength>();
// Then loop over the entries in the current state.
for (CStringLengthTy::iterator I = Entries.begin(),
E = Entries.end(); I != E; ++I) {
const MemRegion *MR = I.getKey();
// Is this entry for a super-region of a changed region?
if (SuperRegions.count(MR)) {
Entries = F.remove(Entries, MR);
continue;
}
// Is this entry for a sub-region of a changed region?
const MemRegion *Super = MR;
while (const SubRegion *SR = dyn_cast<SubRegion>(Super)) {
Super = SR->getSuperRegion();
if (Invalidated.count(Super)) {
Entries = F.remove(Entries, MR);
break;
}
}
}
return state->set<CStringLength>(Entries);
}
void CStringChecker::checkLiveSymbols(ProgramStateRef state,
SymbolReaper &SR) const {
// Mark all symbols in our string length map as valid.
CStringLengthTy Entries = state->get<CStringLength>();
for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
I != E; ++I) {
SVal Len = I.getData();
for (SymExpr::symbol_iterator si = Len.symbol_begin(),
se = Len.symbol_end(); si != se; ++si)
SR.markInUse(*si);
}
}
void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
if (!SR.hasDeadSymbols())
return;
ProgramStateRef state = C.getState();
CStringLengthTy Entries = state->get<CStringLength>();
if (Entries.isEmpty())
return;
CStringLengthTy::Factory &F = state->get_context<CStringLength>();
for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
I != E; ++I) {
SVal Len = I.getData();
if (SymbolRef Sym = Len.getAsSymbol()) {
if (SR.isDead(Sym))
Entries = F.remove(Entries, I.getKey());
}
}
state = state->set<CStringLength>(Entries);
C.addTransition(state);
}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
CStringChecker *checker = mgr.registerChecker<CStringChecker>(); \
checker->Filter.Check##name = true; \
checker->Filter.CheckName##name = mgr.getCurrentCheckName(); \
}
REGISTER_CHECKER(CStringNullArg)
REGISTER_CHECKER(CStringOutOfBounds)
REGISTER_CHECKER(CStringBufferOverlap)
REGISTER_CHECKER(CStringNotNullTerm)
void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
registerCStringNullArg(Mgr);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp | //===--- NonNullParamChecker.cpp - Undefined arguments checker -*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines NonNullParamChecker, which checks for arguments expected not to
// be null due to:
// - the corresponding parameters being declared to have nonnull attribute
// - the corresponding parameters being references; since the call would form
// a reference to a null pointer
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Attr.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class NonNullParamChecker
: public Checker< check::PreCall > {
mutable std::unique_ptr<BugType> BTAttrNonNull;
mutable std::unique_ptr<BugType> BTNullRefArg;
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
std::unique_ptr<BugReport>
genReportNullAttrNonNull(const ExplodedNode *ErrorN, const Expr *ArgE) const;
std::unique_ptr<BugReport>
genReportReferenceToNullPointer(const ExplodedNode *ErrorN,
const Expr *ArgE) const;
};
} // end anonymous namespace
void NonNullParamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
const Decl *FD = Call.getDecl();
if (!FD)
return;
// Merge all non-null attributes
unsigned NumArgs = Call.getNumArgs();
llvm::SmallBitVector AttrNonNull(NumArgs);
for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
if (!NonNull->args_size()) {
AttrNonNull.set(0, NumArgs);
break;
}
for (unsigned Val : NonNull->args()) {
if (Val >= NumArgs)
continue;
AttrNonNull.set(Val);
}
}
ProgramStateRef state = C.getState();
CallEvent::param_type_iterator TyI = Call.param_type_begin(),
TyE = Call.param_type_end();
for (unsigned idx = 0; idx < NumArgs; ++idx) {
// Check if the parameter is a reference. We want to report when reference
// to a null pointer is passed as a paramter.
bool haveRefTypeParam = false;
if (TyI != TyE) {
haveRefTypeParam = (*TyI)->isReferenceType();
TyI++;
}
bool haveAttrNonNull = AttrNonNull[idx];
if (!haveAttrNonNull) {
// Check if the parameter is also marked 'nonnull'.
ArrayRef<ParmVarDecl*> parms = Call.parameters();
if (idx < parms.size())
haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
}
if (!haveRefTypeParam && !haveAttrNonNull)
continue;
// If the value is unknown or undefined, we can't perform this check.
const Expr *ArgE = Call.getArgExpr(idx);
SVal V = Call.getArgSVal(idx);
Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();
if (!DV)
continue;
// Process the case when the argument is not a location.
assert(!haveRefTypeParam || DV->getAs<Loc>());
if (haveAttrNonNull && !DV->getAs<Loc>()) {
// If the argument is a union type, we want to handle a potential
// transparent_union GCC extension.
if (!ArgE)
continue;
QualType T = ArgE->getType();
const RecordType *UT = T->getAsUnionType();
if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
continue;
if (Optional<nonloc::CompoundVal> CSV =
DV->getAs<nonloc::CompoundVal>()) {
nonloc::CompoundVal::iterator CSV_I = CSV->begin();
assert(CSV_I != CSV->end());
V = *CSV_I;
DV = V.getAs<DefinedSVal>();
assert(++CSV_I == CSV->end());
// FIXME: Handle (some_union){ some_other_union_val }, which turns into
// a LazyCompoundVal inside a CompoundVal.
if (!V.getAs<Loc>())
continue;
// Retrieve the corresponding expression.
if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
if (const InitListExpr *IE =
dyn_cast<InitListExpr>(CE->getInitializer()))
ArgE = dyn_cast<Expr>(*(IE->begin()));
} else {
// FIXME: Handle LazyCompoundVals?
continue;
}
}
ConstraintManager &CM = C.getConstraintManager();
ProgramStateRef stateNotNull, stateNull;
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (stateNull && !stateNotNull) {
// Generate an error node. Check for a null node in case
// we cache out.
if (ExplodedNode *errorNode = C.generateSink(stateNull)) {
std::unique_ptr<BugReport> R;
if (haveAttrNonNull)
R = genReportNullAttrNonNull(errorNode, ArgE);
else if (haveRefTypeParam)
R = genReportReferenceToNullPointer(errorNode, ArgE);
// Highlight the range of the argument that was null.
R->addRange(Call.getArgSourceRange(idx));
// Emit the bug report.
C.emitReport(std::move(R));
}
// Always return. Either we cached out or we just emitted an error.
return;
}
// If a pointer value passed the check we should assume that it is
// indeed not null from this point forward.
assert(stateNotNull);
state = stateNotNull;
}
// If we reach here all of the arguments passed the nonnull check.
// If 'state' has been updated generated a new node.
C.addTransition(state);
}
std::unique_ptr<BugReport>
NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
const Expr *ArgE) const {
// Lazily allocate the BugType object if it hasn't already been
// created. Ownership is transferred to the BugReporter object once
// the BugReport is passed to 'EmitWarning'.
if (!BTAttrNonNull)
BTAttrNonNull.reset(new BugType(
this, "Argument with 'nonnull' attribute passed null", "API"));
auto R = llvm::make_unique<BugReport>(
*BTAttrNonNull,
"Null pointer passed as an argument to a 'nonnull' parameter", ErrorNode);
if (ArgE)
bugreporter::trackNullOrUndefValue(ErrorNode, ArgE, *R);
return R;
}
std::unique_ptr<BugReport> NonNullParamChecker::genReportReferenceToNullPointer(
const ExplodedNode *ErrorNode, const Expr *ArgE) const {
if (!BTNullRefArg)
BTNullRefArg.reset(new BuiltinBug(this, "Dereference of null pointer"));
auto R = llvm::make_unique<BugReport>(
*BTNullRefArg, "Forming reference to null pointer", ErrorNode);
if (ArgE) {
const Expr *ArgEDeref = bugreporter::getDerefExpr(ArgE);
if (!ArgEDeref)
ArgEDeref = ArgE;
bugreporter::trackNullOrUndefValue(ErrorNode,
ArgEDeref,
*R);
}
return R;
}
void ento::registerNonNullParamChecker(CheckerManager &mgr) {
mgr.registerChecker<NonNullParamChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp | //=== FixedAddressChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This files defines FixedAddressChecker, a builtin checker that checks for
// assignment of a fixed address to a pointer.
// This check corresponds to CWE-587.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class FixedAddressChecker
: public Checker< check::PreStmt<BinaryOperator> > {
mutable std::unique_ptr<BuiltinBug> BT;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
};
}
void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
CheckerContext &C) const {
// Using a fixed address is not portable because that address will probably
// not be valid in all environments or platforms.
if (B->getOpcode() != BO_Assign)
return;
QualType T = B->getType();
if (!T->isPointerType())
return;
ProgramStateRef state = C.getState();
SVal RV = state->getSVal(B->getRHS(), C.getLocationContext());
if (!RV.isConstant() || RV.isZeroConstant())
return;
if (ExplodedNode *N = C.addTransition()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "Use fixed address",
"Using a fixed address is not portable because that "
"address will probably not be valid in all "
"environments or platforms."));
auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
R->addRange(B->getRHS()->getSourceRange());
C.emitReport(std::move(R));
}
}
void ento::registerFixedAddressChecker(CheckerManager &mgr) {
mgr.registerChecker<FixedAddressChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp | //== ReturnUndefChecker.cpp -------------------------------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines ReturnUndefChecker, which is a path-sensitive
// check which looks for undefined or garbage values being returned to the
// caller.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class ReturnUndefChecker : public Checker< check::PreStmt<ReturnStmt> > {
mutable std::unique_ptr<BuiltinBug> BT_Undef;
mutable std::unique_ptr<BuiltinBug> BT_NullReference;
void emitUndef(CheckerContext &C, const Expr *RetE) const;
void checkReference(CheckerContext &C, const Expr *RetE,
DefinedOrUnknownSVal RetVal) const;
public:
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
};
}
void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
CheckerContext &C) const {
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
SVal RetVal = C.getSVal(RetE);
const StackFrameContext *SFC = C.getStackFrame();
QualType RT = CallEvent::getDeclaredResultType(SFC->getDecl());
if (RetVal.isUndef()) {
// "return;" is modeled to evaluate to an UndefinedVal. Allow UndefinedVal
// to be returned in functions returning void to support this pattern:
// void foo() {
// return;
// }
// void test() {
// return foo();
// }
if (!RT.isNull() && RT->isVoidType())
return;
// Not all blocks have explicitly-specified return types; if the return type
// is not available, but the return value expression has 'void' type, assume
// Sema already checked it.
if (RT.isNull() && isa<BlockDecl>(SFC->getDecl()) &&
RetE->getType()->isVoidType())
return;
emitUndef(C, RetE);
return;
}
if (RT.isNull())
return;
if (RT->isReferenceType()) {
checkReference(C, RetE, RetVal.castAs<DefinedOrUnknownSVal>());
return;
}
}
static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
const Expr *TrackingE = nullptr) {
ExplodedNode *N = C.generateSink();
if (!N)
return;
auto Report = llvm::make_unique<BugReport>(BT, BT.getDescription(), N);
Report->addRange(RetE->getSourceRange());
bugreporter::trackNullOrUndefValue(N, TrackingE ? TrackingE : RetE, *Report);
C.emitReport(std::move(Report));
}
void ReturnUndefChecker::emitUndef(CheckerContext &C, const Expr *RetE) const {
if (!BT_Undef)
BT_Undef.reset(
new BuiltinBug(this, "Garbage return value",
"Undefined or garbage value returned to caller"));
emitBug(C, *BT_Undef, RetE);
}
void ReturnUndefChecker::checkReference(CheckerContext &C, const Expr *RetE,
DefinedOrUnknownSVal RetVal) const {
ProgramStateRef StNonNull, StNull;
std::tie(StNonNull, StNull) = C.getState()->assume(RetVal);
if (StNonNull) {
// Going forward, assume the location is non-null.
C.addTransition(StNonNull);
return;
}
// The return value is known to be null. Emit a bug report.
if (!BT_NullReference)
BT_NullReference.reset(new BuiltinBug(this, "Returning null reference"));
emitBug(C, *BT_NullReference, RetE, bugreporter::getDerefExpr(RetE));
}
void ento::registerReturnUndefChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnUndefChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp | //== TraversalChecker.cpp -------------------------------------- -*- C++ -*--=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These checkers print various aspects of the ExprEngine's traversal of the CFG
// as it builds the ExplodedGraph.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtObjC.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/Support/raw_ostream.h"
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
using namespace ento;
namespace {
class TraversalDumper : public Checker< check::BranchCondition,
check::EndFunction > {
public:
void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
void checkEndFunction(CheckerContext &C) const;
};
}
void TraversalDumper::checkBranchCondition(const Stmt *Condition,
CheckerContext &C) const {
// Special-case Objective-C's for-in loop, which uses the entire loop as its
// condition. We just print the collection expression.
const Stmt *Parent = dyn_cast<ObjCForCollectionStmt>(Condition);
if (!Parent) {
const ParentMap &Parents = C.getLocationContext()->getParentMap();
Parent = Parents.getParent(Condition);
}
// It is mildly evil to print directly to llvm::outs() rather than emitting
// warnings, but this ensures things do not get filtered out by the rest of
// the static analyzer machinery.
SourceLocation Loc = Parent->getLocStart();
llvm::outs() << C.getSourceManager().getSpellingLineNumber(Loc) << " "
<< Parent->getStmtClassName() << "\n";
}
void TraversalDumper::checkEndFunction(CheckerContext &C) const {
llvm::outs() << "--END FUNCTION--\n";
}
void ento::registerTraversalDumper(CheckerManager &mgr) {
mgr.registerChecker<TraversalDumper>();
}
//------------------------------------------------------------------------------
namespace {
class CallDumper : public Checker< check::PreCall,
check::PostCall > {
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
};
}
void CallDumper::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
unsigned Indentation = 0;
for (const LocationContext *LC = C.getLocationContext()->getParent();
LC != nullptr; LC = LC->getParent())
++Indentation;
// It is mildly evil to print directly to llvm::outs() rather than emitting
// warnings, but this ensures things do not get filtered out by the rest of
// the static analyzer machinery.
llvm::outs().indent(Indentation);
Call.dump(llvm::outs());
}
void CallDumper::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
const Expr *CallE = Call.getOriginExpr();
if (!CallE)
return;
unsigned Indentation = 0;
for (const LocationContext *LC = C.getLocationContext()->getParent();
LC != nullptr; LC = LC->getParent())
++Indentation;
// It is mildly evil to print directly to llvm::outs() rather than emitting
// warnings, but this ensures things do not get filtered out by the rest of
// the static analyzer machinery.
llvm::outs().indent(Indentation);
if (Call.getResultType()->isVoidType())
llvm::outs() << "Returning void\n";
else
llvm::outs() << "Returning " << C.getSVal(CallE) << "\n";
}
void ento::registerCallDumper(CheckerManager &mgr) {
mgr.registerChecker<CallDumper>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp | //===--- ClangCheckers.h - Provides builtin checkers ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
// FIXME: This is only necessary as long as there are checker registration
// functions that do additional work besides mgr.registerChecker<CLASS>().
// The only checkers that currently do this are:
// - NSAutoreleasePoolChecker
// - NSErrorChecker
// - ObjCAtSyncChecker
// It's probably worth including this information in Checkers.td to minimize
// boilerplate code.
#include "ClangSACheckers.h"
using namespace clang;
using namespace ento;
void ento::registerBuiltinCheckers(CheckerRegistry ®istry) {
#define GET_CHECKERS
#define CHECKER(FULLNAME,CLASS,DESCFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
registry.addChecker(register##CLASS, FULLNAME, HELPTEXT);
#include "Checkers.inc"
#undef GET_CHECKERS
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp | //===- Chrootchecker.cpp -------- Basic security checks ----------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines chroot checker, which checks improper use of chroot.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
using namespace ento;
namespace {
// enum value that represent the jail state
enum Kind { NO_CHROOT, ROOT_CHANGED, JAIL_ENTERED };
bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
//bool isJailEntered(intptr_t k) { return k == JAIL_ENTERED; }
// This checker checks improper use of chroot.
// The state transition:
// NO_CHROOT ---chroot(path)--> ROOT_CHANGED ---chdir(/) --> JAIL_ENTERED
// | |
// ROOT_CHANGED<--chdir(..)-- JAIL_ENTERED<--chdir(..)--
// | |
// bug<--foo()-- JAIL_ENTERED<--foo()--
class ChrootChecker : public Checker<eval::Call, check::PreStmt<CallExpr> > {
mutable IdentifierInfo *II_chroot, *II_chdir;
// This bug refers to possibly break out of a chroot() jail.
mutable std::unique_ptr<BuiltinBug> BT_BreakJail;
public:
ChrootChecker() : II_chroot(nullptr), II_chdir(nullptr) {}
static void *getTag() {
static int x;
return &x;
}
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
private:
void Chroot(CheckerContext &C, const CallExpr *CE) const;
void Chdir(CheckerContext &C, const CallExpr *CE) const;
};
} // end anonymous namespace
bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return false;
ASTContext &Ctx = C.getASTContext();
if (!II_chroot)
II_chroot = &Ctx.Idents.get("chroot");
if (!II_chdir)
II_chdir = &Ctx.Idents.get("chdir");
if (FD->getIdentifier() == II_chroot) {
Chroot(C, CE);
return true;
}
if (FD->getIdentifier() == II_chdir) {
Chdir(C, CE);
return true;
}
return false;
}
void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
ProgramStateManager &Mgr = state->getStateManager();
// Once encouter a chroot(), set the enum value ROOT_CHANGED directly in
// the GDM.
state = Mgr.addGDM(state, ChrootChecker::getTag(), (void*) ROOT_CHANGED);
C.addTransition(state);
}
void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
ProgramStateManager &Mgr = state->getStateManager();
// If there are no jail state in the GDM, just return.
const void *k = state->FindGDM(ChrootChecker::getTag());
if (!k)
return;
// After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
const Expr *ArgExpr = CE->getArg(0);
SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
if (const MemRegion *R = ArgVal.getAsRegion()) {
R = R->StripCasts();
if (const StringRegion* StrRegion= dyn_cast<StringRegion>(R)) {
const StringLiteral* Str = StrRegion->getStringLiteral();
if (Str->getString() == "/")
state = Mgr.addGDM(state, ChrootChecker::getTag(),
(void*) JAIL_ENTERED);
}
}
C.addTransition(state);
}
// Check the jail state before any function call except chroot and chdir().
void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
ASTContext &Ctx = C.getASTContext();
if (!II_chroot)
II_chroot = &Ctx.Idents.get("chroot");
if (!II_chdir)
II_chdir = &Ctx.Idents.get("chdir");
// Ingnore chroot and chdir.
if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
return;
// If jail state is ROOT_CHANGED, generate BugReport.
void *const* k = C.getState()->FindGDM(ChrootChecker::getTag());
if (k)
if (isRootChanged((intptr_t) *k))
if (ExplodedNode *N = C.addTransition()) {
if (!BT_BreakJail)
BT_BreakJail.reset(new BuiltinBug(
this, "Break out of jail", "No call of chdir(\"/\") immediately "
"after chroot"));
C.emitReport(llvm::make_unique<BugReport>(
*BT_BreakJail, BT_BreakJail->getDescription(), N));
}
return;
}
void ento::registerChrootChecker(CheckerManager &mgr) {
mgr.registerChecker<ChrootChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp | //==- ExprInspectionChecker.cpp - Used for regression tests ------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
using namespace ento;
namespace {
class ExprInspectionChecker : public Checker< eval::Call > {
mutable std::unique_ptr<BugType> BT;
void analyzerEval(const CallExpr *CE, CheckerContext &C) const;
void analyzerCheckInlined(const CallExpr *CE, CheckerContext &C) const;
void analyzerWarnIfReached(const CallExpr *CE, CheckerContext &C) const;
void analyzerCrash(const CallExpr *CE, CheckerContext &C) const;
typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
CheckerContext &C) const;
public:
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
};
}
bool ExprInspectionChecker::evalCall(const CallExpr *CE,
CheckerContext &C) const {
// These checks should have no effect on the surrounding environment
// (globals should not be invalidated, etc), hence the use of evalCall.
FnCheck Handler = llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
.Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
.Case("clang_analyzer_checkInlined",
&ExprInspectionChecker::analyzerCheckInlined)
.Case("clang_analyzer_crash", &ExprInspectionChecker::analyzerCrash)
.Case("clang_analyzer_warnIfReached", &ExprInspectionChecker::analyzerWarnIfReached)
.Default(nullptr);
if (!Handler)
return false;
(this->*Handler)(CE, C);
return true;
}
static const char *getArgumentValueString(const CallExpr *CE,
CheckerContext &C) {
if (CE->getNumArgs() == 0)
return "Missing assertion argument";
ExplodedNode *N = C.getPredecessor();
const LocationContext *LC = N->getLocationContext();
ProgramStateRef State = N->getState();
const Expr *Assertion = CE->getArg(0);
SVal AssertionVal = State->getSVal(Assertion, LC);
if (AssertionVal.isUndef())
return "UNDEFINED";
ProgramStateRef StTrue, StFalse;
std::tie(StTrue, StFalse) =
State->assume(AssertionVal.castAs<DefinedOrUnknownSVal>());
if (StTrue) {
if (StFalse)
return "UNKNOWN";
else
return "TRUE";
} else {
if (StFalse)
return "FALSE";
else
llvm_unreachable("Invalid constraint; neither true or false.");
}
}
void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
CheckerContext &C) const {
ExplodedNode *N = C.getPredecessor();
const LocationContext *LC = N->getLocationContext();
// A specific instantiation of an inlined function may have more constrained
// values than can generally be assumed. Skip the check.
if (LC->getCurrentStackFrame()->getParent() != nullptr)
return;
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
C.emitReport(
llvm::make_unique<BugReport>(*BT, getArgumentValueString(CE, C), N));
}
void ExprInspectionChecker::analyzerWarnIfReached(const CallExpr *CE,
CheckerContext &C) const {
ExplodedNode *N = C.getPredecessor();
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
C.emitReport(llvm::make_unique<BugReport>(*BT, "REACHABLE", N));
}
void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
CheckerContext &C) const {
ExplodedNode *N = C.getPredecessor();
const LocationContext *LC = N->getLocationContext();
// An inlined function could conceivably also be analyzed as a top-level
// function. We ignore this case and only emit a message (TRUE or FALSE)
// when we are analyzing it as an inlined function. This means that
// clang_analyzer_checkInlined(true) should always print TRUE, but
// clang_analyzer_checkInlined(false) should never actually print anything.
if (LC->getCurrentStackFrame()->getParent() == nullptr)
return;
if (!BT)
BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
C.emitReport(
llvm::make_unique<BugReport>(*BT, getArgumentValueString(CE, C), N));
}
void ExprInspectionChecker::analyzerCrash(const CallExpr *CE,
CheckerContext &C) const {
LLVM_BUILTIN_TRAP;
}
void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ExprInspectionChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp | //== ObjCAtSyncChecker.cpp - nil mutex checker for @synchronized -*- C++ -*--=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines ObjCAtSyncChecker, a builtin check that checks for null pointers
// used as mutexes for @synchronized.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/StmtObjC.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
using namespace ento;
namespace {
class ObjCAtSyncChecker
: public Checker< check::PreStmt<ObjCAtSynchronizedStmt> > {
mutable std::unique_ptr<BuiltinBug> BT_null;
mutable std::unique_ptr<BuiltinBug> BT_undef;
public:
void checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const;
};
} // end anonymous namespace
void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
CheckerContext &C) const {
const Expr *Ex = S->getSynchExpr();
ProgramStateRef state = C.getState();
SVal V = state->getSVal(Ex, C.getLocationContext());
// Uninitialized value used for the mutex?
if (V.getAs<UndefinedVal>()) {
if (ExplodedNode *N = C.generateSink()) {
if (!BT_undef)
BT_undef.reset(new BuiltinBug(this, "Uninitialized value used as mutex "
"for @synchronized"));
auto report =
llvm::make_unique<BugReport>(*BT_undef, BT_undef->getDescription(), N);
bugreporter::trackNullOrUndefValue(N, Ex, *report);
C.emitReport(std::move(report));
}
return;
}
if (V.isUnknown())
return;
// Check for null mutexes.
ProgramStateRef notNullState, nullState;
std::tie(notNullState, nullState) = state->assume(V.castAs<DefinedSVal>());
if (nullState) {
if (!notNullState) {
// Generate an error node. This isn't a sink since
// a null mutex just means no synchronization occurs.
if (ExplodedNode *N = C.addTransition(nullState)) {
if (!BT_null)
BT_null.reset(new BuiltinBug(
this, "Nil value used as mutex for @synchronized() "
"(no synchronization will occur)"));
auto report =
llvm::make_unique<BugReport>(*BT_null, BT_null->getDescription(), N);
bugreporter::trackNullOrUndefValue(N, Ex, *report);
C.emitReport(std::move(report));
return;
}
}
// Don't add a transition for 'nullState'. If the value is
// under-constrained to be null or non-null, assume it is non-null
// afterwards.
}
if (notNullState)
C.addTransition(notNullState);
}
void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
if (mgr.getLangOpts().ObjC2)
mgr.registerChecker<ObjCAtSyncChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp | //== TestAfterDivZeroChecker.cpp - Test after division by zero checker --*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines TestAfterDivZeroChecker, a builtin check that performs checks
// for division by zero where the division occurs before comparison with zero.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/FoldingSet.h"
using namespace clang;
using namespace ento;
namespace {
class ZeroState {
private:
SymbolRef ZeroSymbol;
unsigned BlockID;
const StackFrameContext *SFC;
public:
ZeroState(SymbolRef S, unsigned B, const StackFrameContext *SFC)
: ZeroSymbol(S), BlockID(B), SFC(SFC) {}
const StackFrameContext *getStackFrameContext() const { return SFC; }
bool operator==(const ZeroState &X) const {
return BlockID == X.BlockID && SFC == X.SFC && ZeroSymbol == X.ZeroSymbol;
}
bool operator<(const ZeroState &X) const {
if (BlockID != X.BlockID)
return BlockID < X.BlockID;
if (SFC != X.SFC)
return SFC < X.SFC;
return ZeroSymbol < X.ZeroSymbol;
}
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger(BlockID);
ID.AddPointer(SFC);
ID.AddPointer(ZeroSymbol);
}
};
class DivisionBRVisitor : public BugReporterVisitorImpl<DivisionBRVisitor> {
private:
SymbolRef ZeroSymbol;
const StackFrameContext *SFC;
bool Satisfied;
public:
DivisionBRVisitor(SymbolRef ZeroSymbol, const StackFrameContext *SFC)
: ZeroSymbol(ZeroSymbol), SFC(SFC), Satisfied(false) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
ID.Add(ZeroSymbol);
ID.Add(SFC);
}
PathDiagnosticPiece *VisitNode(const ExplodedNode *Succ,
const ExplodedNode *Pred,
BugReporterContext &BRC,
BugReport &BR) override;
};
class TestAfterDivZeroChecker
: public Checker<check::PreStmt<BinaryOperator>, check::BranchCondition,
check::EndFunction> {
mutable std::unique_ptr<BuiltinBug> DivZeroBug;
void reportBug(SVal Val, CheckerContext &C) const;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
void checkEndFunction(CheckerContext &C) const;
void setDivZeroMap(SVal Var, CheckerContext &C) const;
bool hasDivZeroMap(SVal Var, const CheckerContext &C) const;
bool isZero(SVal S, CheckerContext &C) const;
};
} // end anonymous namespace
REGISTER_SET_WITH_PROGRAMSTATE(DivZeroMap, ZeroState)
PathDiagnosticPiece *DivisionBRVisitor::VisitNode(const ExplodedNode *Succ,
const ExplodedNode *Pred,
BugReporterContext &BRC,
BugReport &BR) {
if (Satisfied)
return nullptr;
const Expr *E = nullptr;
if (Optional<PostStmt> P = Succ->getLocationAs<PostStmt>())
if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>()) {
BinaryOperator::Opcode Op = BO->getOpcode();
if (Op == BO_Div || Op == BO_Rem || Op == BO_DivAssign ||
Op == BO_RemAssign) {
E = BO->getRHS();
}
}
if (!E)
return nullptr;
ProgramStateRef State = Succ->getState();
SVal S = State->getSVal(E, Succ->getLocationContext());
if (ZeroSymbol == S.getAsSymbol() && SFC == Succ->getStackFrame()) {
Satisfied = true;
// Construct a new PathDiagnosticPiece.
ProgramPoint P = Succ->getLocation();
PathDiagnosticLocation L =
PathDiagnosticLocation::create(P, BRC.getSourceManager());
if (!L.isValid() || !L.asLocation().isValid())
return nullptr;
return new PathDiagnosticEventPiece(
L, "Division with compared value made here");
}
return nullptr;
}
bool TestAfterDivZeroChecker::isZero(SVal S, CheckerContext &C) const {
Optional<DefinedSVal> DSV = S.getAs<DefinedSVal>();
if (!DSV)
return false;
ConstraintManager &CM = C.getConstraintManager();
return !CM.assume(C.getState(), *DSV, true);
}
void TestAfterDivZeroChecker::setDivZeroMap(SVal Var, CheckerContext &C) const {
SymbolRef SR = Var.getAsSymbol();
if (!SR)
return;
ProgramStateRef State = C.getState();
State =
State->add<DivZeroMap>(ZeroState(SR, C.getBlockID(), C.getStackFrame()));
C.addTransition(State);
}
bool TestAfterDivZeroChecker::hasDivZeroMap(SVal Var,
const CheckerContext &C) const {
SymbolRef SR = Var.getAsSymbol();
if (!SR)
return false;
ZeroState ZS(SR, C.getBlockID(), C.getStackFrame());
return C.getState()->contains<DivZeroMap>(ZS);
}
void TestAfterDivZeroChecker::reportBug(SVal Val, CheckerContext &C) const {
if (ExplodedNode *N = C.generateSink(C.getState())) {
if (!DivZeroBug)
DivZeroBug.reset(new BuiltinBug(this, "Division by zero"));
auto R = llvm::make_unique<BugReport>(
*DivZeroBug, "Value being compared against zero has already been used "
"for division",
N);
R->addVisitor(llvm::make_unique<DivisionBRVisitor>(Val.getAsSymbol(),
C.getStackFrame()));
C.emitReport(std::move(R));
}
}
void TestAfterDivZeroChecker::checkEndFunction(CheckerContext &C) const {
ProgramStateRef State = C.getState();
DivZeroMapTy DivZeroes = State->get<DivZeroMap>();
if (DivZeroes.isEmpty())
return;
DivZeroMapTy::Factory &F = State->get_context<DivZeroMap>();
for (llvm::ImmutableSet<ZeroState>::iterator I = DivZeroes.begin(),
E = DivZeroes.end();
I != E; ++I) {
ZeroState ZS = *I;
if (ZS.getStackFrameContext() == C.getStackFrame())
DivZeroes = F.remove(DivZeroes, ZS);
}
C.addTransition(State->set<DivZeroMap>(DivZeroes));
}
void TestAfterDivZeroChecker::checkPreStmt(const BinaryOperator *B,
CheckerContext &C) const {
BinaryOperator::Opcode Op = B->getOpcode();
if (Op == BO_Div || Op == BO_Rem || Op == BO_DivAssign ||
Op == BO_RemAssign) {
SVal S = C.getSVal(B->getRHS());
if (!isZero(S, C))
setDivZeroMap(S, C);
}
}
void TestAfterDivZeroChecker::checkBranchCondition(const Stmt *Condition,
CheckerContext &C) const {
if (const BinaryOperator *B = dyn_cast<BinaryOperator>(Condition)) {
if (B->isComparisonOp()) {
const IntegerLiteral *IntLiteral = dyn_cast<IntegerLiteral>(B->getRHS());
bool LRHS = true;
if (!IntLiteral) {
IntLiteral = dyn_cast<IntegerLiteral>(B->getLHS());
LRHS = false;
}
if (!IntLiteral || IntLiteral->getValue() != 0)
return;
SVal Val = C.getSVal(LRHS ? B->getLHS() : B->getRHS());
if (hasDivZeroMap(Val, C))
reportBug(Val, C);
}
} else if (const UnaryOperator *U = dyn_cast<UnaryOperator>(Condition)) {
if (U->getOpcode() == UO_LNot) {
SVal Val;
if (const ImplicitCastExpr *I =
dyn_cast<ImplicitCastExpr>(U->getSubExpr()))
Val = C.getSVal(I->getSubExpr());
if (hasDivZeroMap(Val, C))
reportBug(Val, C);
else {
Val = C.getSVal(U->getSubExpr());
if (hasDivZeroMap(Val, C))
reportBug(Val, C);
}
}
} else if (const ImplicitCastExpr *IE =
dyn_cast<ImplicitCastExpr>(Condition)) {
SVal Val = C.getSVal(IE->getSubExpr());
if (hasDivZeroMap(Val, C))
reportBug(Val, C);
else {
SVal Val = C.getSVal(Condition);
if (hasDivZeroMap(Val, C))
reportBug(Val, C);
}
}
}
void ento::registerTestAfterDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<TestAfterDivZeroChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp | //=- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ ----*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Check that Objective C properties are set with the setter, not though a
// direct assignment.
//
// Two versions of a checker exist: one that checks all methods and the other
// that only checks the methods annotated with
// __attribute__((annotate("objc_no_direct_instance_variable_assignment")))
//
// The checker does not warn about assignments to Ivars, annotated with
// __attribute__((objc_allow_direct_instance_variable_assignment"))). This
// annotation serves as a false positive suppression mechanism for the
// checker. The annotation is allowed on properties and Ivars.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/DenseMap.h"
using namespace clang;
using namespace ento;
namespace {
/// The default method filter, which is used to filter out the methods on which
/// the check should not be performed.
///
/// Checks for the init, dealloc, and any other functions that might be allowed
/// to perform direct instance variable assignment based on their name.
static bool DefaultMethodFilter(const ObjCMethodDecl *M) {
if (M->getMethodFamily() == OMF_init || M->getMethodFamily() == OMF_dealloc ||
M->getMethodFamily() == OMF_copy ||
M->getMethodFamily() == OMF_mutableCopy ||
M->getSelector().getNameForSlot(0).find("init") != StringRef::npos ||
M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos)
return true;
return false;
}
class DirectIvarAssignment :
public Checker<check::ASTDecl<ObjCImplementationDecl> > {
typedef llvm::DenseMap<const ObjCIvarDecl*,
const ObjCPropertyDecl*> IvarToPropertyMapTy;
/// A helper class, which walks the AST and locates all assignments to ivars
/// in the given function.
class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
const IvarToPropertyMapTy &IvarToPropMap;
const ObjCMethodDecl *MD;
const ObjCInterfaceDecl *InterfD;
BugReporter &BR;
const CheckerBase *Checker;
LocationOrAnalysisDeclContext DCtx;
public:
MethodCrawler(const IvarToPropertyMapTy &InMap, const ObjCMethodDecl *InMD,
const ObjCInterfaceDecl *InID, BugReporter &InBR,
const CheckerBase *Checker, AnalysisDeclContext *InDCtx)
: IvarToPropMap(InMap), MD(InMD), InterfD(InID), BR(InBR),
Checker(Checker), DCtx(InDCtx) {}
void VisitStmt(const Stmt *S) { VisitChildren(S); }
void VisitBinaryOperator(const BinaryOperator *BO);
void VisitChildren(const Stmt *S) {
for (const Stmt *Child : S->children())
if (Child)
this->Visit(Child);
}
};
public:
bool (*ShouldSkipMethod)(const ObjCMethodDecl *);
DirectIvarAssignment() : ShouldSkipMethod(&DefaultMethodFilter) {}
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
BugReporter &BR) const;
};
static const ObjCIvarDecl *findPropertyBackingIvar(const ObjCPropertyDecl *PD,
const ObjCInterfaceDecl *InterD,
ASTContext &Ctx) {
// Check for synthesized ivars.
ObjCIvarDecl *ID = PD->getPropertyIvarDecl();
if (ID)
return ID;
ObjCInterfaceDecl *NonConstInterD = const_cast<ObjCInterfaceDecl*>(InterD);
// Check for existing "_PropName".
ID = NonConstInterD->lookupInstanceVariable(PD->getDefaultSynthIvarName(Ctx));
if (ID)
return ID;
// Check for existing "PropName".
IdentifierInfo *PropIdent = PD->getIdentifier();
ID = NonConstInterD->lookupInstanceVariable(PropIdent);
return ID;
}
void DirectIvarAssignment::checkASTDecl(const ObjCImplementationDecl *D,
AnalysisManager& Mgr,
BugReporter &BR) const {
const ObjCInterfaceDecl *InterD = D->getClassInterface();
IvarToPropertyMapTy IvarToPropMap;
// Find all properties for this class.
for (const auto *PD : InterD->properties()) {
// Find the corresponding IVar.
const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterD,
Mgr.getASTContext());
if (!ID)
continue;
// Store the IVar to property mapping.
IvarToPropMap[ID] = PD;
}
if (IvarToPropMap.empty())
return;
for (const auto *M : D->instance_methods()) {
AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M);
if ((*ShouldSkipMethod)(M))
continue;
const Stmt *Body = M->getBody();
assert(Body);
MethodCrawler MC(IvarToPropMap, M->getCanonicalDecl(), InterD, BR, this,
DCtx);
MC.VisitStmt(Body);
}
}
static bool isAnnotatedToAllowDirectAssignment(const Decl *D) {
for (const auto *Ann : D->specific_attrs<AnnotateAttr>())
if (Ann->getAnnotation() ==
"objc_allow_direct_instance_variable_assignment")
return true;
return false;
}
void DirectIvarAssignment::MethodCrawler::VisitBinaryOperator(
const BinaryOperator *BO) {
if (!BO->isAssignmentOp())
return;
const ObjCIvarRefExpr *IvarRef =
dyn_cast<ObjCIvarRefExpr>(BO->getLHS()->IgnoreParenCasts());
if (!IvarRef)
return;
if (const ObjCIvarDecl *D = IvarRef->getDecl()) {
IvarToPropertyMapTy::const_iterator I = IvarToPropMap.find(D);
if (I != IvarToPropMap.end()) {
const ObjCPropertyDecl *PD = I->second;
// Skip warnings on Ivars, annotated with
// objc_allow_direct_instance_variable_assignment. This annotation serves
// as a false positive suppression mechanism for the checker. The
// annotation is allowed on properties and ivars.
if (isAnnotatedToAllowDirectAssignment(PD) ||
isAnnotatedToAllowDirectAssignment(D))
return;
ObjCMethodDecl *GetterMethod =
InterfD->getInstanceMethod(PD->getGetterName());
ObjCMethodDecl *SetterMethod =
InterfD->getInstanceMethod(PD->getSetterName());
if (SetterMethod && SetterMethod->getCanonicalDecl() == MD)
return;
if (GetterMethod && GetterMethod->getCanonicalDecl() == MD)
return;
BR.EmitBasicReport(
MD, Checker, "Property access", categories::CoreFoundationObjectiveC,
"Direct assignment to an instance variable backing a property; "
"use the setter instead",
PathDiagnosticLocation(IvarRef, BR.getSourceManager(), DCtx));
}
}
}
}
// Register the checker that checks for direct accesses in all functions,
// except for the initialization and copy routines.
void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
mgr.registerChecker<DirectIvarAssignment>();
}
// Register the checker that checks for direct accesses in functions annotated
// with __attribute__((annotate("objc_no_direct_instance_variable_assignment"))).
static bool AttrFilter(const ObjCMethodDecl *M) {
for (const auto *Ann : M->specific_attrs<AnnotateAttr>())
if (Ann->getAnnotation() == "objc_no_direct_instance_variable_assignment")
return false;
return true;
}
void ento::registerDirectIvarAssignmentForAnnotatedFunctions(
CheckerManager &mgr) {
mgr.registerChecker<DirectIvarAssignment>()->ShouldSkipMethod = &AttrFilter;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp | //===--- CallAndMessageChecker.cpp ------------------------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines CallAndMessageChecker, a builtin checker that checks for various
// errors of call and objc message expressions.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
struct ChecksFilter {
DefaultBool Check_CallAndMessageUnInitRefArg;
DefaultBool Check_CallAndMessageChecker;
CheckName CheckName_CallAndMessageUnInitRefArg;
CheckName CheckName_CallAndMessageChecker;
};
class CallAndMessageChecker
: public Checker< check::PreStmt<CallExpr>,
check::PreStmt<CXXDeleteExpr>,
check::PreObjCMessage,
check::PreCall > {
mutable std::unique_ptr<BugType> BT_call_null;
mutable std::unique_ptr<BugType> BT_call_undef;
mutable std::unique_ptr<BugType> BT_cxx_call_null;
mutable std::unique_ptr<BugType> BT_cxx_call_undef;
mutable std::unique_ptr<BugType> BT_call_arg;
mutable std::unique_ptr<BugType> BT_cxx_delete_undef;
mutable std::unique_ptr<BugType> BT_msg_undef;
mutable std::unique_ptr<BugType> BT_objc_prop_undef;
mutable std::unique_ptr<BugType> BT_objc_subscript_undef;
mutable std::unique_ptr<BugType> BT_msg_arg;
mutable std::unique_ptr<BugType> BT_msg_ret;
mutable std::unique_ptr<BugType> BT_call_few_args;
public:
ChecksFilter Filter;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
private:
bool PreVisitProcessArg(CheckerContext &C, SVal V, SourceRange ArgRange,
const Expr *ArgEx, bool IsFirstArgument,
bool CheckUninitFields, const CallEvent &Call,
std::unique_ptr<BugType> &BT,
const ParmVarDecl *ParamDecl) const;
static void emitBadCall(BugType *BT, CheckerContext &C, const Expr *BadE);
void emitNilReceiverBug(CheckerContext &C, const ObjCMethodCall &msg,
ExplodedNode *N) const;
void HandleNilReceiver(CheckerContext &C,
ProgramStateRef state,
const ObjCMethodCall &msg) const;
void LazyInit_BT(const char *desc, std::unique_ptr<BugType> &BT) const {
if (!BT)
BT.reset(new BuiltinBug(this, desc));
}
bool uninitRefOrPointer(CheckerContext &C, const SVal &V,
const SourceRange &ArgRange,
const Expr *ArgEx, std::unique_ptr<BugType> &BT,
const ParmVarDecl *ParamDecl, const char *BD) const;
};
} // end anonymous namespace
void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
const Expr *BadE) {
ExplodedNode *N = C.generateSink();
if (!N)
return;
auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
if (BadE) {
R->addRange(BadE->getSourceRange());
if (BadE->isGLValue())
BadE = bugreporter::getDerefExpr(BadE);
bugreporter::trackNullOrUndefValue(N, BadE, *R);
}
C.emitReport(std::move(R));
}
static StringRef describeUninitializedArgumentInCall(const CallEvent &Call,
bool IsFirstArgument) {
switch (Call.getKind()) {
case CE_ObjCMessage: {
const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
switch (Msg.getMessageKind()) {
case OCM_Message:
return "Argument in message expression is an uninitialized value";
case OCM_PropertyAccess:
assert(Msg.isSetter() && "Getters have no args");
return "Argument for property setter is an uninitialized value";
case OCM_Subscript:
if (Msg.isSetter() && IsFirstArgument)
return "Argument for subscript setter is an uninitialized value";
return "Subscript index is an uninitialized value";
}
llvm_unreachable("Unknown message kind.");
}
case CE_Block:
return "Block call argument is an uninitialized value";
default:
return "Function call argument is an uninitialized value";
}
}
bool CallAndMessageChecker::uninitRefOrPointer(CheckerContext &C,
const SVal &V,
const SourceRange &ArgRange,
const Expr *ArgEx,
std::unique_ptr<BugType> &BT,
const ParmVarDecl *ParamDecl,
const char *BD) const {
if (!Filter.Check_CallAndMessageUnInitRefArg)
return false;
// No parameter declaration available, i.e. variadic function argument.
if(!ParamDecl)
return false;
// If parameter is declared as pointer to const in function declaration,
// then check if corresponding argument in function call is
// pointing to undefined symbol value (uninitialized memory).
StringRef Message;
if (ParamDecl->getType()->isPointerType()) {
Message = "Function call argument is a pointer to uninitialized value";
} else if (ParamDecl->getType()->isReferenceType()) {
Message = "Function call argument is an uninitialized value";
} else
return false;
if(!ParamDecl->getType()->getPointeeType().isConstQualified())
return false;
if (const MemRegion *SValMemRegion = V.getAsRegion()) {
const ProgramStateRef State = C.getState();
const SVal PSV = State->getSVal(SValMemRegion);
if (PSV.isUndef()) {
if (ExplodedNode *N = C.generateSink()) {
LazyInit_BT(BD, BT);
auto R = llvm::make_unique<BugReport>(*BT, Message, N);
R->addRange(ArgRange);
if (ArgEx) {
bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
}
C.emitReport(std::move(R));
}
return true;
}
}
return false;
}
bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
SVal V,
SourceRange ArgRange,
const Expr *ArgEx,
bool IsFirstArgument,
bool CheckUninitFields,
const CallEvent &Call,
std::unique_ptr<BugType> &BT,
const ParmVarDecl *ParamDecl
) const {
const char *BD = "Uninitialized argument value";
if (uninitRefOrPointer(C, V, ArgRange, ArgEx, BT, ParamDecl, BD))
return true;
if (V.isUndef()) {
if (ExplodedNode *N = C.generateSink()) {
LazyInit_BT(BD, BT);
// Generate a report for this bug.
StringRef Desc =
describeUninitializedArgumentInCall(Call, IsFirstArgument);
auto R = llvm::make_unique<BugReport>(*BT, Desc, N);
R->addRange(ArgRange);
if (ArgEx)
bugreporter::trackNullOrUndefValue(N, ArgEx, *R);
C.emitReport(std::move(R));
}
return true;
}
if (!CheckUninitFields)
return false;
if (Optional<nonloc::LazyCompoundVal> LV =
V.getAs<nonloc::LazyCompoundVal>()) {
class FindUninitializedField {
public:
SmallVector<const FieldDecl *, 10> FieldChain;
private:
StoreManager &StoreMgr;
MemRegionManager &MrMgr;
Store store;
public:
FindUninitializedField(StoreManager &storeMgr,
MemRegionManager &mrMgr, Store s)
: StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
bool Find(const TypedValueRegion *R) {
QualType T = R->getValueType();
if (const RecordType *RT = T->getAsStructureType()) {
const RecordDecl *RD = RT->getDecl()->getDefinition();
assert(RD && "Referred record has no definition");
for (const auto *I : RD->fields()) {
const FieldRegion *FR = MrMgr.getFieldRegion(I, R);
FieldChain.push_back(I);
T = I->getType();
if (T->getAsStructureType()) {
if (Find(FR))
return true;
}
else {
const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
if (V.isUndef())
return true;
}
FieldChain.pop_back();
}
}
return false;
}
};
const LazyCompoundValData *D = LV->getCVData();
FindUninitializedField F(C.getState()->getStateManager().getStoreManager(),
C.getSValBuilder().getRegionManager(),
D->getStore());
if (F.Find(D->getRegion())) {
if (ExplodedNode *N = C.generateSink()) {
LazyInit_BT(BD, BT);
SmallString<512> Str;
llvm::raw_svector_ostream os(Str);
os << "Passed-by-value struct argument contains uninitialized data";
if (F.FieldChain.size() == 1)
os << " (e.g., field: '" << *F.FieldChain[0] << "')";
else {
os << " (e.g., via the field chain: '";
bool first = true;
for (SmallVectorImpl<const FieldDecl *>::iterator
DI = F.FieldChain.begin(), DE = F.FieldChain.end(); DI!=DE;++DI){
if (first)
first = false;
else
os << '.';
os << **DI;
}
os << "')";
}
// Generate a report for this bug.
auto R = llvm::make_unique<BugReport>(*BT, os.str(), N);
R->addRange(ArgRange);
// FIXME: enhance track back for uninitialized value for arbitrary
// memregions
C.emitReport(std::move(R));
}
return true;
}
}
return false;
}
void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const{
const Expr *Callee = CE->getCallee()->IgnoreParens();
ProgramStateRef State = C.getState();
const LocationContext *LCtx = C.getLocationContext();
SVal L = State->getSVal(Callee, LCtx);
if (L.isUndef()) {
if (!BT_call_undef)
BT_call_undef.reset(new BuiltinBug(
this, "Called function pointer is an uninitalized pointer value"));
emitBadCall(BT_call_undef.get(), C, Callee);
return;
}
ProgramStateRef StNonNull, StNull;
std::tie(StNonNull, StNull) = State->assume(L.castAs<DefinedOrUnknownSVal>());
if (StNull && !StNonNull) {
if (!BT_call_null)
BT_call_null.reset(new BuiltinBug(
this, "Called function pointer is null (null dereference)"));
emitBadCall(BT_call_null.get(), C, Callee);
return;
}
C.addTransition(StNonNull);
}
void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
CheckerContext &C) const {
SVal Arg = C.getSVal(DE->getArgument());
if (Arg.isUndef()) {
StringRef Desc;
ExplodedNode *N = C.generateSink();
if (!N)
return;
if (!BT_cxx_delete_undef)
BT_cxx_delete_undef.reset(
new BuiltinBug(this, "Uninitialized argument value"));
if (DE->isArrayFormAsWritten())
Desc = "Argument to 'delete[]' is uninitialized";
else
Desc = "Argument to 'delete' is uninitialized";
BugType *BT = BT_cxx_delete_undef.get();
auto R = llvm::make_unique<BugReport>(*BT, Desc, N);
bugreporter::trackNullOrUndefValue(N, DE, *R);
C.emitReport(std::move(R));
return;
}
}
void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
// If this is a call to a C++ method, check if the callee is null or
// undefined.
if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
SVal V = CC->getCXXThisVal();
if (V.isUndef()) {
if (!BT_cxx_call_undef)
BT_cxx_call_undef.reset(
new BuiltinBug(this, "Called C++ object pointer is uninitialized"));
emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
return;
}
ProgramStateRef StNonNull, StNull;
std::tie(StNonNull, StNull) =
State->assume(V.castAs<DefinedOrUnknownSVal>());
if (StNull && !StNonNull) {
if (!BT_cxx_call_null)
BT_cxx_call_null.reset(
new BuiltinBug(this, "Called C++ object pointer is null"));
emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
return;
}
State = StNonNull;
}
const Decl *D = Call.getDecl();
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (FD) {
// If we have a declaration, we can make sure we pass enough parameters to
// the function.
unsigned Params = FD->getNumParams();
if (Call.getNumArgs() < Params) {
ExplodedNode *N = C.generateSink();
if (!N)
return;
LazyInit_BT("Function call with too few arguments", BT_call_few_args);
SmallString<512> Str;
llvm::raw_svector_ostream os(Str);
os << "Function taking " << Params << " argument"
<< (Params == 1 ? "" : "s") << " is called with less ("
<< Call.getNumArgs() << ")";
C.emitReport(
llvm::make_unique<BugReport>(*BT_call_few_args, os.str(), N));
}
}
// Don't check for uninitialized field values in arguments if the
// caller has a body that is available and we have the chance to inline it.
// This is a hack, but is a reasonable compromise betweens sometimes warning
// and sometimes not depending on if we decide to inline a function.
const bool checkUninitFields =
!(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
std::unique_ptr<BugType> *BT;
if (isa<ObjCMethodCall>(Call))
BT = &BT_msg_arg;
else
BT = &BT_call_arg;
for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i) {
const ParmVarDecl *ParamDecl = nullptr;
if(FD && i < FD->getNumParams())
ParamDecl = FD->getParamDecl(i);
if (PreVisitProcessArg(C, Call.getArgSVal(i), Call.getArgSourceRange(i),
Call.getArgExpr(i), /*IsFirstArgument=*/i == 0,
checkUninitFields, Call, *BT, ParamDecl))
return;
}
// If we make it here, record our assumptions about the callee.
C.addTransition(State);
}
void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
SVal recVal = msg.getReceiverSVal();
if (recVal.isUndef()) {
if (ExplodedNode *N = C.generateSink()) {
BugType *BT = nullptr;
switch (msg.getMessageKind()) {
case OCM_Message:
if (!BT_msg_undef)
BT_msg_undef.reset(new BuiltinBug(this,
"Receiver in message expression "
"is an uninitialized value"));
BT = BT_msg_undef.get();
break;
case OCM_PropertyAccess:
if (!BT_objc_prop_undef)
BT_objc_prop_undef.reset(new BuiltinBug(
this, "Property access on an uninitialized object pointer"));
BT = BT_objc_prop_undef.get();
break;
case OCM_Subscript:
if (!BT_objc_subscript_undef)
BT_objc_subscript_undef.reset(new BuiltinBug(
this, "Subscript access on an uninitialized object pointer"));
BT = BT_objc_subscript_undef.get();
break;
}
assert(BT && "Unknown message kind.");
auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
const ObjCMessageExpr *ME = msg.getOriginExpr();
R->addRange(ME->getReceiverRange());
// FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet.
if (const Expr *ReceiverE = ME->getInstanceReceiver())
bugreporter::trackNullOrUndefValue(N, ReceiverE, *R);
C.emitReport(std::move(R));
}
return;
} else {
// Bifurcate the state into nil and non-nil ones.
DefinedOrUnknownSVal receiverVal = recVal.castAs<DefinedOrUnknownSVal>();
ProgramStateRef state = C.getState();
ProgramStateRef notNilState, nilState;
std::tie(notNilState, nilState) = state->assume(receiverVal);
// Handle receiver must be nil.
if (nilState && !notNilState) {
HandleNilReceiver(C, state, msg);
return;
}
}
}
void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
const ObjCMethodCall &msg,
ExplodedNode *N) const {
if (!BT_msg_ret)
BT_msg_ret.reset(
new BuiltinBug(this, "Receiver in message expression is 'nil'"));
const ObjCMessageExpr *ME = msg.getOriginExpr();
QualType ResTy = msg.getResultType();
SmallString<200> buf;
llvm::raw_svector_ostream os(buf);
os << "The receiver of message '";
ME->getSelector().print(os);
os << "' is nil";
if (ResTy->isReferenceType()) {
os << ", which results in forming a null reference";
} else {
os << " and returns a value of type '";
msg.getResultType().print(os, C.getLangOpts());
os << "' that will be garbage";
}
auto report = llvm::make_unique<BugReport>(*BT_msg_ret, os.str(), N);
report->addRange(ME->getReceiverRange());
// FIXME: This won't track "self" in messages to super.
if (const Expr *receiver = ME->getInstanceReceiver()) {
bugreporter::trackNullOrUndefValue(N, receiver, *report);
}
C.emitReport(std::move(report));
}
static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
return (triple.getVendor() == llvm::Triple::Apple &&
(triple.isiOS() || !triple.isMacOSXVersionLT(10,5)));
}
void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
ProgramStateRef state,
const ObjCMethodCall &Msg) const {
ASTContext &Ctx = C.getASTContext();
static CheckerProgramPointTag Tag(this, "NilReceiver");
// Check the return type of the message expression. A message to nil will
// return different values depending on the return type and the architecture.
QualType RetTy = Msg.getResultType();
CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
const LocationContext *LCtx = C.getLocationContext();
if (CanRetTy->isStructureOrClassType()) {
// Structure returns are safe since the compiler zeroes them out.
SVal V = C.getSValBuilder().makeZeroVal(RetTy);
C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
return;
}
// Other cases: check if sizeof(return type) > sizeof(void*)
if (CanRetTy != Ctx.VoidTy && C.getLocationContext()->getParentMap()
.isConsumedExpr(Msg.getOriginExpr())) {
// Compute: sizeof(void *) and sizeof(return type)
const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
if (CanRetTy.getTypePtr()->isReferenceType()||
(voidPtrSize < returnTypeSize &&
!(supportsNilWithFloatRet(Ctx.getTargetInfo().getTriple()) &&
(Ctx.FloatTy == CanRetTy ||
Ctx.DoubleTy == CanRetTy ||
Ctx.LongDoubleTy == CanRetTy ||
Ctx.LongLongTy == CanRetTy ||
Ctx.UnsignedLongLongTy == CanRetTy)))) {
if (ExplodedNode *N = C.generateSink(state, nullptr, &Tag))
emitNilReceiverBug(C, Msg, N);
return;
}
// Handle the safe cases where the return value is 0 if the
// receiver is nil.
//
// FIXME: For now take the conservative approach that we only
// return null values if we *know* that the receiver is nil.
// This is because we can have surprises like:
//
// ... = [[NSScreens screens] objectAtIndex:0];
//
// What can happen is that [... screens] could return nil, but
// it most likely isn't nil. We should assume the semantics
// of this case unless we have *a lot* more knowledge.
//
SVal V = C.getSValBuilder().makeZeroVal(RetTy);
C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
return;
}
C.addTransition(state);
}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
CallAndMessageChecker *Checker = \
mgr.registerChecker<CallAndMessageChecker>(); \
Checker->Filter.Check_##name = true; \
Checker->Filter.CheckName_##name = mgr.getCurrentCheckName(); \
}
REGISTER_CHECKER(CallAndMessageUnInitRefArg)
REGISTER_CHECKER(CallAndMessageChecker)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp | //=== UndefBranchChecker.cpp -----------------------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines UndefBranchChecker, which checks for undefined branch
// condition.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class UndefBranchChecker : public Checker<check::BranchCondition> {
mutable std::unique_ptr<BuiltinBug> BT;
struct FindUndefExpr {
ProgramStateRef St;
const LocationContext *LCtx;
FindUndefExpr(ProgramStateRef S, const LocationContext *L)
: St(S), LCtx(L) {}
const Expr *FindExpr(const Expr *Ex) {
if (!MatchesCriteria(Ex))
return nullptr;
for (const Stmt *SubStmt : Ex->children())
if (const Expr *ExI = dyn_cast_or_null<Expr>(SubStmt))
if (const Expr *E2 = FindExpr(ExI))
return E2;
return Ex;
}
bool MatchesCriteria(const Expr *Ex) {
return St->getSVal(Ex, LCtx).isUndef();
}
};
public:
void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
};
}
void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
CheckerContext &Ctx) const {
SVal X = Ctx.getState()->getSVal(Condition, Ctx.getLocationContext());
if (X.isUndef()) {
// Generate a sink node, which implicitly marks both outgoing branches as
// infeasible.
ExplodedNode *N = Ctx.generateSink();
if (N) {
if (!BT)
BT.reset(new BuiltinBug(
this, "Branch condition evaluates to a garbage value"));
// What's going on here: we want to highlight the subexpression of the
// condition that is the most likely source of the "uninitialized
// branch condition." We do a recursive walk of the condition's
// subexpressions and roughly look for the most nested subexpression
// that binds to Undefined. We then highlight that expression's range.
// Get the predecessor node and check if is a PostStmt with the Stmt
// being the terminator condition. We want to inspect the state
// of that node instead because it will contain main information about
// the subexpressions.
// Note: any predecessor will do. They should have identical state,
// since all the BlockEdge did was act as an error sink since the value
// had to already be undefined.
assert (!N->pred_empty());
const Expr *Ex = cast<Expr>(Condition);
ExplodedNode *PrevN = *N->pred_begin();
ProgramPoint P = PrevN->getLocation();
ProgramStateRef St = N->getState();
if (Optional<PostStmt> PS = P.getAs<PostStmt>())
if (PS->getStmt() == Ex)
St = PrevN->getState();
FindUndefExpr FindIt(St, Ctx.getLocationContext());
Ex = FindIt.FindExpr(Ex);
// Emit the bug report.
auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
bugreporter::trackNullOrUndefValue(N, Ex, *R);
R->addRange(Ex->getSourceRange());
Ctx.emitReport(std::move(R));
}
}
}
void ento::registerUndefBranchChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefBranchChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp | //== ObjCContainersASTChecker.cpp - CoreFoundation containers API *- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// An AST checker that looks for common pitfalls when using 'CFArray',
// 'CFDictionary', 'CFSet' APIs.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
using namespace ento;
namespace {
class WalkAST : public StmtVisitor<WalkAST> {
BugReporter &BR;
const CheckerBase *Checker;
AnalysisDeclContext* AC;
ASTContext &ASTC;
uint64_t PtrWidth;
/// Check if the type has pointer size (very conservative).
inline bool isPointerSize(const Type *T) {
if (!T)
return true;
if (T->isIncompleteType())
return true;
return (ASTC.getTypeSize(T) == PtrWidth);
}
/// Check if the type is a pointer/array to pointer sized values.
inline bool hasPointerToPointerSizedType(const Expr *E) {
QualType T = E->getType();
// The type could be either a pointer or array.
const Type *TP = T.getTypePtr();
QualType PointeeT = TP->getPointeeType();
if (!PointeeT.isNull()) {
// If the type is a pointer to an array, check the size of the array
// elements. To avoid false positives coming from assumption that the
// values x and &x are equal when x is an array.
if (const Type *TElem = PointeeT->getArrayElementTypeNoTypeQual())
if (isPointerSize(TElem))
return true;
// Else, check the pointee size.
return isPointerSize(PointeeT.getTypePtr());
}
if (const Type *TElem = TP->getArrayElementTypeNoTypeQual())
return isPointerSize(TElem);
// The type must be an array/pointer type.
// This could be a null constant, which is allowed.
if (E->isNullPointerConstant(ASTC, Expr::NPC_ValueDependentIsNull))
return true;
return false;
}
public:
WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
: BR(br), Checker(checker), AC(ac), ASTC(AC->getASTContext()),
PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
// Statement visitor methods.
void VisitChildren(Stmt *S);
void VisitStmt(Stmt *S) { VisitChildren(S); }
void VisitCallExpr(CallExpr *CE);
};
} // end anonymous namespace
static StringRef getCalleeName(CallExpr *CE) {
const FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
return StringRef();
IdentifierInfo *II = FD->getIdentifier();
if (!II) // if no identifier, not a simple C function
return StringRef();
return II->getName();
}
void WalkAST::VisitCallExpr(CallExpr *CE) {
StringRef Name = getCalleeName(CE);
if (Name.empty())
return;
const Expr *Arg = nullptr;
unsigned ArgNum;
if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) {
if (CE->getNumArgs() != 4)
return;
ArgNum = 1;
Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
if (hasPointerToPointerSizedType(Arg))
return;
} else if (Name.equals("CFDictionaryCreate")) {
if (CE->getNumArgs() != 6)
return;
// Check first argument.
ArgNum = 1;
Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
if (hasPointerToPointerSizedType(Arg)) {
// Check second argument.
ArgNum = 2;
Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
if (hasPointerToPointerSizedType(Arg))
// Both are good, return.
return;
}
}
if (Arg) {
assert(ArgNum == 1 || ArgNum == 2);
SmallString<64> BufName;
llvm::raw_svector_ostream OsName(BufName);
OsName << " Invalid use of '" << Name << "'" ;
SmallString<256> Buf;
llvm::raw_svector_ostream Os(Buf);
// Use "second" and "third" since users will expect 1-based indexing
// for parameter names when mentioned in prose.
Os << " The "<< ((ArgNum == 1) ? "second" : "third") << " argument to '"
<< Name << "' must be a C array of pointer-sized values, not '"
<< Arg->getType().getAsString() << "'";
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
BR.EmitBasicReport(AC->getDecl(), Checker, OsName.str(),
categories::CoreFoundationObjectiveC, Os.str(), CELoc,
Arg->getSourceRange());
}
// Recurse and check children.
VisitChildren(CE);
}
void WalkAST::VisitChildren(Stmt *S) {
for (Stmt *Child : S->children())
if (Child)
Visit(Child);
}
namespace {
class ObjCContainersASTChecker : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
BugReporter &BR) const {
WalkAST walker(BR, this, Mgr.getAnalysisDeclContext(D));
walker.Visit(D->getBody());
}
};
}
void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCContainersASTChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp | //==- DebugCheckers.cpp - Debugging Checkers ---------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines checkers that display debugging information.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/Analysis/Analyses/Dominators.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/CallGraph.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/Support/Process.h"
using namespace clang;
using namespace ento;
//===----------------------------------------------------------------------===//
// DominatorsTreeDumper
//===----------------------------------------------------------------------===//
namespace {
class DominatorsTreeDumper : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
DominatorTree dom;
dom.buildDominatorTree(*AC);
dom.dump();
}
}
};
}
void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
mgr.registerChecker<DominatorsTreeDumper>();
}
//===----------------------------------------------------------------------===//
// LiveVariablesDumper
//===----------------------------------------------------------------------===//
namespace {
class LiveVariablesDumper : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
if (LiveVariables* L = mgr.getAnalysis<LiveVariables>(D)) {
L->dumpBlockLiveness(mgr.getSourceManager());
}
}
};
}
void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
mgr.registerChecker<LiveVariablesDumper>();
}
//===----------------------------------------------------------------------===//
// CFGViewer
//===----------------------------------------------------------------------===//
namespace {
class CFGViewer : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
if (CFG *cfg = mgr.getCFG(D)) {
cfg->viewCFG(mgr.getLangOpts());
}
}
};
}
void ento::registerCFGViewer(CheckerManager &mgr) {
mgr.registerChecker<CFGViewer>();
}
//===----------------------------------------------------------------------===//
// CFGDumper
//===----------------------------------------------------------------------===//
namespace {
class CFGDumper : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
PrintingPolicy Policy(mgr.getLangOpts());
Policy.TerseOutput = true;
Policy.PolishForDeclaration = true;
D->print(llvm::errs(), Policy);
if (CFG *cfg = mgr.getCFG(D)) {
cfg->dump(mgr.getLangOpts(),
llvm::sys::Process::StandardErrHasColors());
}
}
};
}
void ento::registerCFGDumper(CheckerManager &mgr) {
mgr.registerChecker<CFGDumper>();
}
//===----------------------------------------------------------------------===//
// CallGraphViewer
//===----------------------------------------------------------------------===//
namespace {
class CallGraphViewer : public Checker< check::ASTDecl<TranslationUnitDecl> > {
public:
void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
BugReporter &BR) const {
CallGraph CG;
CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
CG.viewGraph();
}
};
}
void ento::registerCallGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<CallGraphViewer>();
}
//===----------------------------------------------------------------------===//
// CallGraphDumper
//===----------------------------------------------------------------------===//
namespace {
class CallGraphDumper : public Checker< check::ASTDecl<TranslationUnitDecl> > {
public:
void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
BugReporter &BR) const {
CallGraph CG;
CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
CG.dump();
}
};
}
void ento::registerCallGraphDumper(CheckerManager &mgr) {
mgr.registerChecker<CallGraphDumper>();
}
//===----------------------------------------------------------------------===//
// ConfigDumper
//===----------------------------------------------------------------------===//
namespace {
class ConfigDumper : public Checker< check::EndOfTranslationUnit > {
typedef AnalyzerOptions::ConfigTable Table;
// HLSL Change: changed calling convention to __cdecl
static int __cdecl compareEntry(const Table::MapEntryTy *const *LHS,
const Table::MapEntryTy *const *RHS) {
return (*LHS)->getKey().compare((*RHS)->getKey());
}
public:
void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
AnalysisManager& mgr,
BugReporter &BR) const {
const Table &Config = mgr.options.Config;
SmallVector<const Table::MapEntryTy *, 32> Keys;
for (Table::const_iterator I = Config.begin(), E = Config.end(); I != E;
++I)
Keys.push_back(&*I);
llvm::array_pod_sort(Keys.begin(), Keys.end(), compareEntry);
llvm::errs() << "[config]\n";
for (unsigned I = 0, E = Keys.size(); I != E; ++I)
llvm::errs() << Keys[I]->getKey() << " = " << Keys[I]->second << '\n';
llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
}
};
}
void ento::registerConfigDumper(CheckerManager &mgr) {
mgr.registerChecker<ConfigDumper>();
}
//===----------------------------------------------------------------------===//
// ExplodedGraph Viewer
//===----------------------------------------------------------------------===//
namespace {
class ExplodedGraphViewer : public Checker< check::EndAnalysis > {
public:
ExplodedGraphViewer() {}
void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const {
Eng.ViewGraph(0);
}
};
}
void ento::registerExplodedGraphViewer(CheckerManager &mgr) {
mgr.registerChecker<ExplodedGraphViewer>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp | //= CheckerDocumentation.cpp - Documentation checker ---------------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This checker lists all the checker callbacks and provides documentation for
// checker writers.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
using namespace clang;
using namespace ento;
// All checkers should be placed into anonymous namespace.
// We place the CheckerDocumentation inside ento namespace to make the
// it visible in doxygen.
namespace clang {
namespace ento {
/// This checker documents the callback functions checkers can use to implement
/// the custom handling of the specific events during path exploration as well
/// as reporting bugs. Most of the callbacks are targeted at path-sensitive
/// checking.
///
/// \sa CheckerContext
class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
check::PostStmt<DeclStmt>,
check::PreObjCMessage,
check::PostObjCMessage,
check::PreCall,
check::PostCall,
check::BranchCondition,
check::Location,
check::Bind,
check::DeadSymbols,
check::EndFunction,
check::EndAnalysis,
check::EndOfTranslationUnit,
eval::Call,
eval::Assume,
check::LiveSymbols,
check::RegionChanges,
check::PointerEscape,
check::ConstPointerEscape,
check::Event<ImplicitNullDerefEvent>,
check::ASTDecl<FunctionDecl> > {
public:
/// \brief Pre-visit the Statement.
///
/// The method will be called before the analyzer core processes the
/// statement. The notification is performed for every explored CFGElement,
/// which does not include the control flow statements such as IfStmt. The
/// callback can be specialized to be called with any subclass of Stmt.
///
/// See checkBranchCondition() callback for performing custom processing of
/// the branching statements.
///
/// check::PreStmt<ReturnStmt>
void checkPreStmt(const ReturnStmt *DS, CheckerContext &C) const {}
/// \brief Post-visit the Statement.
///
/// The method will be called after the analyzer core processes the
/// statement. The notification is performed for every explored CFGElement,
/// which does not include the control flow statements such as IfStmt. The
/// callback can be specialized to be called with any subclass of Stmt.
///
/// check::PostStmt<DeclStmt>
void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
/// \brief Pre-visit the Objective C message.
///
/// This will be called before the analyzer core processes the method call.
/// This is called for any action which produces an Objective-C message send,
/// including explicit message syntax and property access.
///
/// check::PreObjCMessage
void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
/// \brief Post-visit the Objective C message.
/// \sa checkPreObjCMessage()
///
/// check::PostObjCMessage
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
/// \brief Pre-visit an abstract "call" event.
///
/// This is used for checkers that want to check arguments or attributed
/// behavior for functions and methods no matter how they are being invoked.
///
/// Note that this includes ALL cross-body invocations, so if you want to
/// limit your checks to, say, function calls, you should test for that at the
/// beginning of your callback function.
///
/// check::PreCall
void checkPreCall(const CallEvent &Call, CheckerContext &C) const {}
/// \brief Post-visit an abstract "call" event.
/// \sa checkPreObjCMessage()
///
/// check::PostCall
void checkPostCall(const CallEvent &Call, CheckerContext &C) const {}
/// \brief Pre-visit of the condition statement of a branch (such as IfStmt).
void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const {}
/// \brief Called on a load from and a store to a location.
///
/// The method will be called each time a location (pointer) value is
/// accessed.
/// \param Loc The value of the location (pointer).
/// \param IsLoad The flag specifying if the location is a store or a load.
/// \param S The load is performed while processing the statement.
///
/// check::Location
void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
CheckerContext &) const {}
/// \brief Called on binding of a value to a location.
///
/// \param Loc The value of the location (pointer).
/// \param Val The value which will be stored at the location Loc.
/// \param S The bind is performed while processing the statement S.
///
/// check::Bind
void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &) const {}
/// \brief Called whenever a symbol becomes dead.
///
/// This callback should be used by the checkers to aggressively clean
/// up/reduce the checker state, which is important for reducing the overall
/// memory usage. Specifically, if a checker keeps symbol specific information
/// in the sate, it can and should be dropped after the symbol becomes dead.
/// In addition, reporting a bug as soon as the checker becomes dead leads to
/// more precise diagnostics. (For example, one should report that a malloced
/// variable is not freed right after it goes out of scope.)
///
/// \param SR The SymbolReaper object can be queried to determine which
/// symbols are dead.
///
/// check::DeadSymbols
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {}
/// \brief Called when the analyzer core reaches the end of a
/// function being analyzed.
///
/// check::EndFunction
void checkEndFunction(CheckerContext &Ctx) const {}
/// \brief Called after all the paths in the ExplodedGraph reach end of path
/// - the symbolic execution graph is fully explored.
///
/// This callback should be used in cases when a checker needs to have a
/// global view of the information generated on all paths. For example, to
/// compare execution summary/result several paths.
/// See IdempotentOperationChecker for a usage example.
///
/// check::EndAnalysis
void checkEndAnalysis(ExplodedGraph &G,
BugReporter &BR,
ExprEngine &Eng) const {}
/// \brief Called after analysis of a TranslationUnit is complete.
///
/// check::EndOfTranslationUnit
void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
AnalysisManager &Mgr,
BugReporter &BR) const {}
/// \brief Evaluates function call.
///
/// The analysis core threats all function calls in the same way. However, some
/// functions have special meaning, which should be reflected in the program
/// state. This callback allows a checker to provide domain specific knowledge
/// about the particular functions it knows about.
///
/// \returns true if the call has been successfully evaluated
/// and false otherwise. Note, that only one checker can evaluate a call. If
/// more than one checker claims that they can evaluate the same call the
/// first one wins.
///
/// eval::Call
bool evalCall(const CallExpr *CE, CheckerContext &C) const { return true; }
/// \brief Handles assumptions on symbolic values.
///
/// This method is called when a symbolic expression is assumed to be true or
/// false. For example, the assumptions are performed when evaluating a
/// condition at a branch. The callback allows checkers track the assumptions
/// performed on the symbols of interest and change the state accordingly.
///
/// eval::Assume
ProgramStateRef evalAssume(ProgramStateRef State,
SVal Cond,
bool Assumption) const { return State; }
/// Allows modifying SymbolReaper object. For example, checkers can explicitly
/// register symbols of interest as live. These symbols will not be marked
/// dead and removed.
///
/// check::LiveSymbols
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {}
/// \brief Called to determine if the checker currently needs to know if when
/// contents of any regions change.
///
/// Since it is not necessarily cheap to compute which regions are being
/// changed, this allows the analyzer core to skip the more expensive
/// #checkRegionChanges when no checkers are tracking any state.
bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
/// \brief Called when the contents of one or more regions change.
///
/// This can occur in many different ways: an explicit bind, a blanket
/// invalidation of the region contents, or by passing a region to a function
/// call whose behavior the analyzer cannot model perfectly.
///
/// \param State The current program state.
/// \param Invalidated A set of all symbols potentially touched by the change.
/// \param ExplicitRegions The regions explicitly requested for invalidation.
/// For a function call, this would be the arguments. For a bind, this
/// would be the region being bound to.
/// \param Regions The transitive closure of regions accessible from,
/// \p ExplicitRegions, i.e. all regions that may have been touched
/// by this change. For a simple bind, this list will be the same as
/// \p ExplicitRegions, since a bind does not affect the contents of
/// anything accessible through the base region.
/// \param Call The opaque call triggering this invalidation. Will be 0 if the
/// change was not triggered by a call.
///
/// Note that this callback will not be invoked unless
/// #wantsRegionChangeUpdate returns \c true.
///
/// check::RegionChanges
ProgramStateRef
checkRegionChanges(ProgramStateRef State,
const InvalidatedSymbols *Invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const {
return State;
}
/// \brief Called when pointers escape.
///
/// This notifies the checkers about pointer escape, which occurs whenever
/// the analyzer cannot track the symbol any more. For example, as a
/// result of assigning a pointer into a global or when it's passed to a
/// function call the analyzer cannot model.
///
/// \param State The state at the point of escape.
/// \param Escaped The list of escaped symbols.
/// \param Call The corresponding CallEvent, if the symbols escape as
/// parameters to the given call.
/// \param Kind How the symbols have escaped.
/// \returns Checkers can modify the state by returning a new state.
ProgramStateRef checkPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const {
return State;
}
/// \brief Called when const pointers escape.
///
/// Note: in most cases checkPointerEscape callback is sufficient.
/// \sa checkPointerEscape
ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
const InvalidatedSymbols &Escaped,
const CallEvent *Call,
PointerEscapeKind Kind) const {
return State;
}
/// check::Event<ImplicitNullDerefEvent>
void checkEvent(ImplicitNullDerefEvent Event) const {}
/// \brief Check every declaration in the AST.
///
/// An AST traversal callback, which should only be used when the checker is
/// not path sensitive. It will be called for every Declaration in the AST and
/// can be specialized to only be called on subclasses of Decl, for example,
/// FunctionDecl.
///
/// check::ASTDecl<FunctionDecl>
void checkASTDecl(const FunctionDecl *D,
AnalysisManager &Mgr,
BugReporter &BR) const {}
};
void CheckerDocumentation::checkPostStmt(const DeclStmt *DS,
CheckerContext &C) const {
return;
}
} // end namespace ento
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp | //=- AllocationDiagnostics.cpp - Config options for allocation diags *- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Declares the configuration functions for leaks/allocation diagnostics.
//
//===--------------------------
#include "AllocationDiagnostics.h"
namespace clang {
namespace ento {
bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts) {
return AOpts.getBooleanOption("leak-diagnostics-reference-allocation",
false);
}
}}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/IdenticalExprChecker.cpp | //== IdenticalExprChecker.cpp - Identical expression checker----------------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This defines IdenticalExprChecker, a check that warns about
/// unintended use of identical expressions.
///
/// It checks for use of identical expressions with comparison operators and
/// inside conditional expressions.
///
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
const Stmt *Stmt2, bool IgnoreSideEffects = false);
//===----------------------------------------------------------------------===//
// FindIdenticalExprVisitor - Identify nodes using identical expressions.
//===----------------------------------------------------------------------===//
namespace {
class FindIdenticalExprVisitor
: public RecursiveASTVisitor<FindIdenticalExprVisitor> {
BugReporter &BR;
const CheckerBase *Checker;
AnalysisDeclContext *AC;
public:
explicit FindIdenticalExprVisitor(BugReporter &B,
const CheckerBase *Checker,
AnalysisDeclContext *A)
: BR(B), Checker(Checker), AC(A) {}
// FindIdenticalExprVisitor only visits nodes
// that are binary operators, if statements or
// conditional operators.
bool VisitBinaryOperator(const BinaryOperator *B);
bool VisitIfStmt(const IfStmt *I);
bool VisitConditionalOperator(const ConditionalOperator *C);
private:
void reportIdenticalExpr(const BinaryOperator *B, bool CheckBitwise,
ArrayRef<SourceRange> Sr);
void checkBitwiseOrLogicalOp(const BinaryOperator *B, bool CheckBitwise);
void checkComparisonOp(const BinaryOperator *B);
};
} // end anonymous namespace
void FindIdenticalExprVisitor::reportIdenticalExpr(const BinaryOperator *B,
bool CheckBitwise,
ArrayRef<SourceRange> Sr) {
StringRef Message;
if (CheckBitwise)
Message = "identical expressions on both sides of bitwise operator";
else
Message = "identical expressions on both sides of logical operator";
PathDiagnosticLocation ELoc =
PathDiagnosticLocation::createOperatorLoc(B, BR.getSourceManager());
BR.EmitBasicReport(AC->getDecl(), Checker,
"Use of identical expressions",
categories::LogicError,
Message, ELoc, Sr);
}
void FindIdenticalExprVisitor::checkBitwiseOrLogicalOp(const BinaryOperator *B,
bool CheckBitwise) {
SourceRange Sr[2];
const Expr *LHS = B->getLHS();
const Expr *RHS = B->getRHS();
// Split operators as long as we still have operators to split on. We will
// get called for every binary operator in an expression so there is no need
// to check every one against each other here, just the right most one with
// the others.
while (const BinaryOperator *B2 = dyn_cast<BinaryOperator>(LHS)) {
if (B->getOpcode() != B2->getOpcode())
break;
if (isIdenticalStmt(AC->getASTContext(), RHS, B2->getRHS())) {
Sr[0] = RHS->getSourceRange();
Sr[1] = B2->getRHS()->getSourceRange();
reportIdenticalExpr(B, CheckBitwise, Sr);
}
LHS = B2->getLHS();
}
if (isIdenticalStmt(AC->getASTContext(), RHS, LHS)) {
Sr[0] = RHS->getSourceRange();
Sr[1] = LHS->getSourceRange();
reportIdenticalExpr(B, CheckBitwise, Sr);
}
}
bool FindIdenticalExprVisitor::VisitIfStmt(const IfStmt *I) {
const Stmt *Stmt1 = I->getThen();
const Stmt *Stmt2 = I->getElse();
// Check for identical conditions:
//
// if (b) {
// foo1();
// } else if (b) {
// foo2();
// }
if (Stmt1 && Stmt2) {
const Expr *Cond1 = I->getCond();
const Stmt *Else = Stmt2;
while (const IfStmt *I2 = dyn_cast_or_null<IfStmt>(Else)) {
const Expr *Cond2 = I2->getCond();
if (isIdenticalStmt(AC->getASTContext(), Cond1, Cond2, false)) {
SourceRange Sr = Cond1->getSourceRange();
PathDiagnosticLocation ELoc(Cond2, BR.getSourceManager(), AC);
BR.EmitBasicReport(AC->getDecl(), Checker, "Identical conditions",
categories::LogicError,
"expression is identical to previous condition",
ELoc, Sr);
}
Else = I2->getElse();
}
}
if (!Stmt1 || !Stmt2)
return true;
// Special handling for code like:
//
// if (b) {
// i = 1;
// } else
// i = 1;
if (const CompoundStmt *CompStmt = dyn_cast<CompoundStmt>(Stmt1)) {
if (CompStmt->size() == 1)
Stmt1 = CompStmt->body_back();
}
if (const CompoundStmt *CompStmt = dyn_cast<CompoundStmt>(Stmt2)) {
if (CompStmt->size() == 1)
Stmt2 = CompStmt->body_back();
}
if (isIdenticalStmt(AC->getASTContext(), Stmt1, Stmt2, true)) {
PathDiagnosticLocation ELoc =
PathDiagnosticLocation::createBegin(I, BR.getSourceManager(), AC);
BR.EmitBasicReport(AC->getDecl(), Checker,
"Identical branches",
categories::LogicError,
"true and false branches are identical", ELoc);
}
return true;
}
bool FindIdenticalExprVisitor::VisitBinaryOperator(const BinaryOperator *B) {
BinaryOperator::Opcode Op = B->getOpcode();
if (BinaryOperator::isBitwiseOp(Op))
checkBitwiseOrLogicalOp(B, true);
if (BinaryOperator::isLogicalOp(Op))
checkBitwiseOrLogicalOp(B, false);
if (BinaryOperator::isComparisonOp(Op))
checkComparisonOp(B);
// We want to visit ALL nodes (subexpressions of binary comparison
// expressions too) that contains comparison operators.
// True is always returned to traverse ALL nodes.
return true;
}
void FindIdenticalExprVisitor::checkComparisonOp(const BinaryOperator *B) {
BinaryOperator::Opcode Op = B->getOpcode();
//
// Special case for floating-point representation.
//
// If expressions on both sides of comparison operator are of type float,
// then for some comparison operators no warning shall be
// reported even if the expressions are identical from a symbolic point of
// view. Comparison between expressions, declared variables and literals
// are treated differently.
//
// != and == between float literals that have the same value should NOT warn.
// < > between float literals that have the same value SHOULD warn.
//
// != and == between the same float declaration should NOT warn.
// < > between the same float declaration SHOULD warn.
//
// != and == between eq. expressions that evaluates into float
// should NOT warn.
// < > between eq. expressions that evaluates into float
// should NOT warn.
//
const Expr *LHS = B->getLHS()->IgnoreParenImpCasts();
const Expr *RHS = B->getRHS()->IgnoreParenImpCasts();
const DeclRefExpr *DeclRef1 = dyn_cast<DeclRefExpr>(LHS);
const DeclRefExpr *DeclRef2 = dyn_cast<DeclRefExpr>(RHS);
const FloatingLiteral *FloatLit1 = dyn_cast<FloatingLiteral>(LHS);
const FloatingLiteral *FloatLit2 = dyn_cast<FloatingLiteral>(RHS);
if ((DeclRef1) && (DeclRef2)) {
if ((DeclRef1->getType()->hasFloatingRepresentation()) &&
(DeclRef2->getType()->hasFloatingRepresentation())) {
if (DeclRef1->getDecl() == DeclRef2->getDecl()) {
if ((Op == BO_EQ) || (Op == BO_NE)) {
return;
}
}
}
} else if ((FloatLit1) && (FloatLit2)) {
if (FloatLit1->getValue().bitwiseIsEqual(FloatLit2->getValue())) {
if ((Op == BO_EQ) || (Op == BO_NE)) {
return;
}
}
} else if (LHS->getType()->hasFloatingRepresentation()) {
// If any side of comparison operator still has floating-point
// representation, then it's an expression. Don't warn.
// Here only LHS is checked since RHS will be implicit casted to float.
return;
} else {
// No special case with floating-point representation, report as usual.
}
if (isIdenticalStmt(AC->getASTContext(), B->getLHS(), B->getRHS())) {
PathDiagnosticLocation ELoc =
PathDiagnosticLocation::createOperatorLoc(B, BR.getSourceManager());
StringRef Message;
if (((Op == BO_EQ) || (Op == BO_LE) || (Op == BO_GE)))
Message = "comparison of identical expressions always evaluates to true";
else
Message = "comparison of identical expressions always evaluates to false";
BR.EmitBasicReport(AC->getDecl(), Checker,
"Compare of identical expressions",
categories::LogicError, Message, ELoc);
}
}
bool FindIdenticalExprVisitor::VisitConditionalOperator(
const ConditionalOperator *C) {
// Check if expressions in conditional expression are identical
// from a symbolic point of view.
if (isIdenticalStmt(AC->getASTContext(), C->getTrueExpr(),
C->getFalseExpr(), true)) {
PathDiagnosticLocation ELoc =
PathDiagnosticLocation::createConditionalColonLoc(
C, BR.getSourceManager());
SourceRange Sr[2];
Sr[0] = C->getTrueExpr()->getSourceRange();
Sr[1] = C->getFalseExpr()->getSourceRange();
BR.EmitBasicReport(
AC->getDecl(), Checker,
"Identical expressions in conditional expression",
categories::LogicError,
"identical expressions on both sides of ':' in conditional expression",
ELoc, Sr);
}
// We want to visit ALL nodes (expressions in conditional
// expressions too) that contains conditional operators,
// thus always return true to traverse ALL nodes.
return true;
}
/// \brief Determines whether two statement trees are identical regarding
/// operators and symbols.
///
/// Exceptions: expressions containing macros or functions with possible side
/// effects are never considered identical.
/// Limitations: (t + u) and (u + t) are not considered identical.
/// t*(u + t) and t*u + t*t are not considered identical.
///
static bool isIdenticalStmt(const ASTContext &Ctx, const Stmt *Stmt1,
const Stmt *Stmt2, bool IgnoreSideEffects) {
if (!Stmt1 || !Stmt2) {
if (!Stmt1 && !Stmt2)
return true;
return false;
}
// If Stmt1 & Stmt2 are of different class then they are not
// identical statements.
if (Stmt1->getStmtClass() != Stmt2->getStmtClass())
return false;
const Expr *Expr1 = dyn_cast<Expr>(Stmt1);
const Expr *Expr2 = dyn_cast<Expr>(Stmt2);
if (Expr1 && Expr2) {
// If Stmt1 has side effects then don't warn even if expressions
// are identical.
if (!IgnoreSideEffects && Expr1->HasSideEffects(Ctx))
return false;
// If either expression comes from a macro then don't warn even if
// the expressions are identical.
if ((Expr1->getExprLoc().isMacroID()) || (Expr2->getExprLoc().isMacroID()))
return false;
// If all children of two expressions are identical, return true.
Expr::const_child_iterator I1 = Expr1->child_begin();
Expr::const_child_iterator I2 = Expr2->child_begin();
while (I1 != Expr1->child_end() && I2 != Expr2->child_end()) {
if (!*I1 || !*I2 || !isIdenticalStmt(Ctx, *I1, *I2, IgnoreSideEffects))
return false;
++I1;
++I2;
}
// If there are different number of children in the statements, return
// false.
if (I1 != Expr1->child_end())
return false;
if (I2 != Expr2->child_end())
return false;
}
switch (Stmt1->getStmtClass()) {
default:
return false;
case Stmt::CallExprClass:
case Stmt::ArraySubscriptExprClass:
case Stmt::ImplicitCastExprClass:
case Stmt::ParenExprClass:
case Stmt::BreakStmtClass:
case Stmt::ContinueStmtClass:
case Stmt::NullStmtClass:
return true;
case Stmt::CStyleCastExprClass: {
const CStyleCastExpr* CastExpr1 = cast<CStyleCastExpr>(Stmt1);
const CStyleCastExpr* CastExpr2 = cast<CStyleCastExpr>(Stmt2);
return CastExpr1->getTypeAsWritten() == CastExpr2->getTypeAsWritten();
}
case Stmt::ReturnStmtClass: {
const ReturnStmt *ReturnStmt1 = cast<ReturnStmt>(Stmt1);
const ReturnStmt *ReturnStmt2 = cast<ReturnStmt>(Stmt2);
return isIdenticalStmt(Ctx, ReturnStmt1->getRetValue(),
ReturnStmt2->getRetValue(), IgnoreSideEffects);
}
case Stmt::ForStmtClass: {
const ForStmt *ForStmt1 = cast<ForStmt>(Stmt1);
const ForStmt *ForStmt2 = cast<ForStmt>(Stmt2);
if (!isIdenticalStmt(Ctx, ForStmt1->getInit(), ForStmt2->getInit(),
IgnoreSideEffects))
return false;
if (!isIdenticalStmt(Ctx, ForStmt1->getCond(), ForStmt2->getCond(),
IgnoreSideEffects))
return false;
if (!isIdenticalStmt(Ctx, ForStmt1->getInc(), ForStmt2->getInc(),
IgnoreSideEffects))
return false;
if (!isIdenticalStmt(Ctx, ForStmt1->getBody(), ForStmt2->getBody(),
IgnoreSideEffects))
return false;
return true;
}
case Stmt::DoStmtClass: {
const DoStmt *DStmt1 = cast<DoStmt>(Stmt1);
const DoStmt *DStmt2 = cast<DoStmt>(Stmt2);
if (!isIdenticalStmt(Ctx, DStmt1->getCond(), DStmt2->getCond(),
IgnoreSideEffects))
return false;
if (!isIdenticalStmt(Ctx, DStmt1->getBody(), DStmt2->getBody(),
IgnoreSideEffects))
return false;
return true;
}
case Stmt::WhileStmtClass: {
const WhileStmt *WStmt1 = cast<WhileStmt>(Stmt1);
const WhileStmt *WStmt2 = cast<WhileStmt>(Stmt2);
if (!isIdenticalStmt(Ctx, WStmt1->getCond(), WStmt2->getCond(),
IgnoreSideEffects))
return false;
if (!isIdenticalStmt(Ctx, WStmt1->getBody(), WStmt2->getBody(),
IgnoreSideEffects))
return false;
return true;
}
case Stmt::IfStmtClass: {
const IfStmt *IStmt1 = cast<IfStmt>(Stmt1);
const IfStmt *IStmt2 = cast<IfStmt>(Stmt2);
if (!isIdenticalStmt(Ctx, IStmt1->getCond(), IStmt2->getCond(),
IgnoreSideEffects))
return false;
if (!isIdenticalStmt(Ctx, IStmt1->getThen(), IStmt2->getThen(),
IgnoreSideEffects))
return false;
if (!isIdenticalStmt(Ctx, IStmt1->getElse(), IStmt2->getElse(),
IgnoreSideEffects))
return false;
return true;
}
case Stmt::CompoundStmtClass: {
const CompoundStmt *CompStmt1 = cast<CompoundStmt>(Stmt1);
const CompoundStmt *CompStmt2 = cast<CompoundStmt>(Stmt2);
if (CompStmt1->size() != CompStmt2->size())
return false;
CompoundStmt::const_body_iterator I1 = CompStmt1->body_begin();
CompoundStmt::const_body_iterator I2 = CompStmt2->body_begin();
while (I1 != CompStmt1->body_end() && I2 != CompStmt2->body_end()) {
if (!isIdenticalStmt(Ctx, *I1, *I2, IgnoreSideEffects))
return false;
++I1;
++I2;
}
return true;
}
case Stmt::CompoundAssignOperatorClass:
case Stmt::BinaryOperatorClass: {
const BinaryOperator *BinOp1 = cast<BinaryOperator>(Stmt1);
const BinaryOperator *BinOp2 = cast<BinaryOperator>(Stmt2);
return BinOp1->getOpcode() == BinOp2->getOpcode();
}
case Stmt::CharacterLiteralClass: {
const CharacterLiteral *CharLit1 = cast<CharacterLiteral>(Stmt1);
const CharacterLiteral *CharLit2 = cast<CharacterLiteral>(Stmt2);
return CharLit1->getValue() == CharLit2->getValue();
}
case Stmt::DeclRefExprClass: {
const DeclRefExpr *DeclRef1 = cast<DeclRefExpr>(Stmt1);
const DeclRefExpr *DeclRef2 = cast<DeclRefExpr>(Stmt2);
return DeclRef1->getDecl() == DeclRef2->getDecl();
}
case Stmt::IntegerLiteralClass: {
const IntegerLiteral *IntLit1 = cast<IntegerLiteral>(Stmt1);
const IntegerLiteral *IntLit2 = cast<IntegerLiteral>(Stmt2);
llvm::APInt I1 = IntLit1->getValue();
llvm::APInt I2 = IntLit2->getValue();
if (I1.getBitWidth() != I2.getBitWidth())
return false;
return I1 == I2;
}
case Stmt::FloatingLiteralClass: {
const FloatingLiteral *FloatLit1 = cast<FloatingLiteral>(Stmt1);
const FloatingLiteral *FloatLit2 = cast<FloatingLiteral>(Stmt2);
return FloatLit1->getValue().bitwiseIsEqual(FloatLit2->getValue());
}
case Stmt::StringLiteralClass: {
const StringLiteral *StringLit1 = cast<StringLiteral>(Stmt1);
const StringLiteral *StringLit2 = cast<StringLiteral>(Stmt2);
return StringLit1->getBytes() == StringLit2->getBytes();
}
case Stmt::MemberExprClass: {
const MemberExpr *MemberStmt1 = cast<MemberExpr>(Stmt1);
const MemberExpr *MemberStmt2 = cast<MemberExpr>(Stmt2);
return MemberStmt1->getMemberDecl() == MemberStmt2->getMemberDecl();
}
case Stmt::UnaryOperatorClass: {
const UnaryOperator *UnaryOp1 = cast<UnaryOperator>(Stmt1);
const UnaryOperator *UnaryOp2 = cast<UnaryOperator>(Stmt2);
return UnaryOp1->getOpcode() == UnaryOp2->getOpcode();
}
}
}
//===----------------------------------------------------------------------===//
// FindIdenticalExprChecker
//===----------------------------------------------------------------------===//
namespace {
class FindIdenticalExprChecker : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
BugReporter &BR) const {
FindIdenticalExprVisitor Visitor(BR, this, Mgr.getAnalysisDeclContext(D));
Visitor.TraverseDecl(const_cast<Decl *>(D));
}
};
} // end anonymous namespace
void ento::registerIdenticalExprChecker(CheckerManager &Mgr) {
Mgr.registerChecker<FindIdenticalExprChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp | //===--- UndefinedArraySubscriptChecker.h ----------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines UndefinedArraySubscriptChecker, a builtin check in ExprEngine
// that performs checks for undefined array subscripts.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/DeclCXX.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class UndefinedArraySubscriptChecker
: public Checker< check::PreStmt<ArraySubscriptExpr> > {
mutable std::unique_ptr<BugType> BT;
public:
void checkPreStmt(const ArraySubscriptExpr *A, CheckerContext &C) const;
};
} // end anonymous namespace
void
UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
CheckerContext &C) const {
const Expr *Index = A->getIdx();
if (!C.getSVal(Index).isUndef())
return;
// Sema generates anonymous array variables for copying array struct fields.
// Don't warn if we're in an implicitly-generated constructor.
const Decl *D = C.getLocationContext()->getDecl();
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D))
if (Ctor->isDefaulted())
return;
ExplodedNode *N = C.generateSink();
if (!N)
return;
if (!BT)
BT.reset(new BuiltinBug(this, "Array subscript is undefined"));
// Generate a report for this bug.
auto R = llvm::make_unique<BugReport>(*BT, BT->getName(), N);
R->addRange(A->getIdx()->getSourceRange());
bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
C.emitReport(std::move(R));
}
void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefinedArraySubscriptChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp | // UndefCapturedBlockVarChecker.cpp - Uninitialized captured vars -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This checker detects blocks that capture uninitialized values.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Attr.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class UndefCapturedBlockVarChecker
: public Checker< check::PostStmt<BlockExpr> > {
mutable std::unique_ptr<BugType> BT;
public:
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
};
} // end anonymous namespace
static const DeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
const VarDecl *VD) {
if (const DeclRefExpr *BR = dyn_cast<DeclRefExpr>(S))
if (BR->getDecl() == VD)
return BR;
for (const Stmt *Child : S->children())
if (Child)
if (const DeclRefExpr *BR = FindBlockDeclRefExpr(Child, VD))
return BR;
return nullptr;
}
void
UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
CheckerContext &C) const {
if (!BE->getBlockDecl()->hasCaptures())
return;
ProgramStateRef state = C.getState();
const BlockDataRegion *R =
cast<BlockDataRegion>(state->getSVal(BE,
C.getLocationContext()).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
for (; I != E; ++I) {
// This VarRegion is the region associated with the block; we need
// the one associated with the encompassing context.
const VarRegion *VR = I.getCapturedRegion();
const VarDecl *VD = VR->getDecl();
if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage())
continue;
// Get the VarRegion associated with VD in the local stack frame.
if (Optional<UndefinedVal> V =
state->getSVal(I.getOriginalRegion()).getAs<UndefinedVal>()) {
if (ExplodedNode *N = C.generateSink()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "uninitialized variable captured by block"));
// Generate a bug report.
SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
os << "Variable '" << VD->getName()
<< "' is uninitialized when captured by block";
auto R = llvm::make_unique<BugReport>(*BT, os.str(), N);
if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
R->addRange(Ex->getSourceRange());
R->addVisitor(llvm::make_unique<FindLastStoreBRVisitor>(
*V, VR, /*EnableNullFPSuppression*/ false));
R->disablePathPruning();
// need location of block
C.emitReport(std::move(R));
}
}
}
}
void ento::registerUndefCapturedBlockVarChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefCapturedBlockVarChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp | //==--- MacOSKeychainAPIChecker.cpp ------------------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This checker flags misuses of KeyChainAPI. In particular, the password data
// allocated/returned by SecKeychainItemCopyContent,
// SecKeychainFindGenericPassword, SecKeychainFindInternetPassword functions has
// to be freed using a call to SecKeychainItemFreeContent.
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
check::PostStmt<CallExpr>,
check::DeadSymbols> {
mutable std::unique_ptr<BugType> BT;
public:
/// AllocationState is a part of the checker specific state together with the
/// MemRegion corresponding to the allocated data.
struct AllocationState {
/// The index of the allocator function.
unsigned int AllocatorIdx;
SymbolRef Region;
AllocationState(const Expr *E, unsigned int Idx, SymbolRef R) :
AllocatorIdx(Idx),
Region(R) {}
bool operator==(const AllocationState &X) const {
return (AllocatorIdx == X.AllocatorIdx &&
Region == X.Region);
}
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger(AllocatorIdx);
ID.AddPointer(Region);
}
};
void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
void checkPostStmt(const CallExpr *S, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
private:
typedef std::pair<SymbolRef, const AllocationState*> AllocationPair;
typedef SmallVector<AllocationPair, 2> AllocationPairVec;
enum APIKind {
/// Denotes functions tracked by this checker.
ValidAPI = 0,
/// The functions commonly/mistakenly used in place of the given API.
ErrorAPI = 1,
/// The functions which may allocate the data. These are tracked to reduce
/// the false alarm rate.
PossibleAPI = 2
};
/// Stores the information about the allocator and deallocator functions -
/// these are the functions the checker is tracking.
struct ADFunctionInfo {
const char* Name;
unsigned int Param;
unsigned int DeallocatorIdx;
APIKind Kind;
};
static const unsigned InvalidIdx = 100000;
static const unsigned FunctionsToTrackSize = 8;
static const ADFunctionInfo FunctionsToTrack[FunctionsToTrackSize];
/// The value, which represents no error return value for allocator functions.
static const unsigned NoErr = 0;
/// Given the function name, returns the index of the allocator/deallocator
/// function.
static unsigned getTrackedFunctionIndex(StringRef Name, bool IsAllocator);
inline void initBugType() const {
if (!BT)
BT.reset(new BugType(this, "Improper use of SecKeychain API",
"API Misuse (Apple)"));
}
void generateDeallocatorMismatchReport(const AllocationPair &AP,
const Expr *ArgExpr,
CheckerContext &C) const;
/// Find the allocation site for Sym on the path leading to the node N.
const ExplodedNode *getAllocationNode(const ExplodedNode *N, SymbolRef Sym,
CheckerContext &C) const;
std::unique_ptr<BugReport> generateAllocatedDataNotReleasedReport(
const AllocationPair &AP, ExplodedNode *N, CheckerContext &C) const;
/// Check if RetSym evaluates to an error value in the current state.
bool definitelyReturnedError(SymbolRef RetSym,
ProgramStateRef State,
SValBuilder &Builder,
bool noError = false) const;
/// Check if RetSym evaluates to a NoErr value in the current state.
bool definitelyDidnotReturnError(SymbolRef RetSym,
ProgramStateRef State,
SValBuilder &Builder) const {
return definitelyReturnedError(RetSym, State, Builder, true);
}
/// Mark an AllocationPair interesting for diagnostic reporting.
void markInteresting(BugReport *R, const AllocationPair &AP) const {
R->markInteresting(AP.first);
R->markInteresting(AP.second->Region);
}
/// The bug visitor which allows us to print extra diagnostics along the
/// BugReport path. For example, showing the allocation site of the leaked
/// region.
class SecKeychainBugVisitor
: public BugReporterVisitorImpl<SecKeychainBugVisitor> {
protected:
// The allocated region symbol tracked by the main analysis.
SymbolRef Sym;
public:
SecKeychainBugVisitor(SymbolRef S) : Sym(S) {}
~SecKeychainBugVisitor() override {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int X = 0;
ID.AddPointer(&X);
ID.AddPointer(Sym);
}
PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
};
};
}
/// ProgramState traits to store the currently allocated (and not yet freed)
/// symbols. This is a map from the allocated content symbol to the
/// corresponding AllocationState.
REGISTER_MAP_WITH_PROGRAMSTATE(AllocatedData,
SymbolRef,
MacOSKeychainAPIChecker::AllocationState)
static bool isEnclosingFunctionParam(const Expr *E) {
E = E->IgnoreParenCasts();
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
const ValueDecl *VD = DRE->getDecl();
if (isa<ImplicitParamDecl>(VD) || isa<ParmVarDecl>(VD))
return true;
}
return false;
}
const MacOSKeychainAPIChecker::ADFunctionInfo
MacOSKeychainAPIChecker::FunctionsToTrack[FunctionsToTrackSize] = {
{"SecKeychainItemCopyContent", 4, 3, ValidAPI}, // 0
{"SecKeychainFindGenericPassword", 6, 3, ValidAPI}, // 1
{"SecKeychainFindInternetPassword", 13, 3, ValidAPI}, // 2
{"SecKeychainItemFreeContent", 1, InvalidIdx, ValidAPI}, // 3
{"SecKeychainItemCopyAttributesAndData", 5, 5, ValidAPI}, // 4
{"SecKeychainItemFreeAttributesAndData", 1, InvalidIdx, ValidAPI}, // 5
{"free", 0, InvalidIdx, ErrorAPI}, // 6
{"CFStringCreateWithBytesNoCopy", 1, InvalidIdx, PossibleAPI}, // 7
};
unsigned MacOSKeychainAPIChecker::getTrackedFunctionIndex(StringRef Name,
bool IsAllocator) {
for (unsigned I = 0; I < FunctionsToTrackSize; ++I) {
ADFunctionInfo FI = FunctionsToTrack[I];
if (FI.Name != Name)
continue;
// Make sure the function is of the right type (allocator vs deallocator).
if (IsAllocator && (FI.DeallocatorIdx == InvalidIdx))
return InvalidIdx;
if (!IsAllocator && (FI.DeallocatorIdx != InvalidIdx))
return InvalidIdx;
return I;
}
// The function is not tracked.
return InvalidIdx;
}
static bool isBadDeallocationArgument(const MemRegion *Arg) {
if (!Arg)
return false;
if (isa<AllocaRegion>(Arg) ||
isa<BlockDataRegion>(Arg) ||
isa<TypedRegion>(Arg)) {
return true;
}
return false;
}
/// Given the address expression, retrieve the value it's pointing to. Assume
/// that value is itself an address, and return the corresponding symbol.
static SymbolRef getAsPointeeSymbol(const Expr *Expr,
CheckerContext &C) {
ProgramStateRef State = C.getState();
SVal ArgV = State->getSVal(Expr, C.getLocationContext());
if (Optional<loc::MemRegionVal> X = ArgV.getAs<loc::MemRegionVal>()) {
StoreManager& SM = C.getStoreManager();
SymbolRef sym = SM.getBinding(State->getStore(), *X).getAsLocSymbol();
if (sym)
return sym;
}
return nullptr;
}
// When checking for error code, we need to consider the following cases:
// 1) noErr / [0]
// 2) someErr / [1, inf]
// 3) unknown
// If noError, returns true iff (1).
// If !noError, returns true iff (2).
bool MacOSKeychainAPIChecker::definitelyReturnedError(SymbolRef RetSym,
ProgramStateRef State,
SValBuilder &Builder,
bool noError) const {
DefinedOrUnknownSVal NoErrVal = Builder.makeIntVal(NoErr,
Builder.getSymbolManager().getType(RetSym));
DefinedOrUnknownSVal NoErr = Builder.evalEQ(State, NoErrVal,
nonloc::SymbolVal(RetSym));
ProgramStateRef ErrState = State->assume(NoErr, noError);
if (ErrState == State) {
return true;
}
return false;
}
// Report deallocator mismatch. Remove the region from tracking - reporting a
// missing free error after this one is redundant.
void MacOSKeychainAPIChecker::
generateDeallocatorMismatchReport(const AllocationPair &AP,
const Expr *ArgExpr,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
State = State->remove<AllocatedData>(AP.first);
ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
SmallString<80> sbuf;
llvm::raw_svector_ostream os(sbuf);
unsigned int PDeallocIdx =
FunctionsToTrack[AP.second->AllocatorIdx].DeallocatorIdx;
os << "Deallocator doesn't match the allocator: '"
<< FunctionsToTrack[PDeallocIdx].Name << "' should be used.";
auto Report = llvm::make_unique<BugReport>(*BT, os.str(), N);
Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(AP.first));
Report->addRange(ArgExpr->getSourceRange());
markInteresting(Report.get(), AP);
C.emitReport(std::move(Report));
}
void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
unsigned idx = InvalidIdx;
ProgramStateRef State = C.getState();
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD || FD->getKind() != Decl::Function)
return;
StringRef funName = C.getCalleeName(FD);
if (funName.empty())
return;
// If it is a call to an allocator function, it could be a double allocation.
idx = getTrackedFunctionIndex(funName, true);
if (idx != InvalidIdx) {
unsigned paramIdx = FunctionsToTrack[idx].Param;
if (CE->getNumArgs() <= paramIdx)
return;
const Expr *ArgExpr = CE->getArg(paramIdx);
if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C))
if (const AllocationState *AS = State->get<AllocatedData>(V)) {
if (!definitelyReturnedError(AS->Region, State, C.getSValBuilder())) {
// Remove the value from the state. The new symbol will be added for
// tracking when the second allocator is processed in checkPostStmt().
State = State->remove<AllocatedData>(V);
ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
unsigned int DIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
os << "Allocated data should be released before another call to "
<< "the allocator: missing a call to '"
<< FunctionsToTrack[DIdx].Name
<< "'.";
auto Report = llvm::make_unique<BugReport>(*BT, os.str(), N);
Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(V));
Report->addRange(ArgExpr->getSourceRange());
Report->markInteresting(AS->Region);
C.emitReport(std::move(Report));
}
}
return;
}
// Is it a call to one of deallocator functions?
idx = getTrackedFunctionIndex(funName, false);
if (idx == InvalidIdx)
return;
unsigned paramIdx = FunctionsToTrack[idx].Param;
if (CE->getNumArgs() <= paramIdx)
return;
// Check the argument to the deallocator.
const Expr *ArgExpr = CE->getArg(paramIdx);
SVal ArgSVal = State->getSVal(ArgExpr, C.getLocationContext());
// Undef is reported by another checker.
if (ArgSVal.isUndef())
return;
SymbolRef ArgSM = ArgSVal.getAsLocSymbol();
// If the argument is coming from the heap, globals, or unknown, do not
// report it.
bool RegionArgIsBad = false;
if (!ArgSM) {
if (!isBadDeallocationArgument(ArgSVal.getAsRegion()))
return;
RegionArgIsBad = true;
}
// Is the argument to the call being tracked?
const AllocationState *AS = State->get<AllocatedData>(ArgSM);
if (!AS && FunctionsToTrack[idx].Kind != ValidAPI) {
return;
}
// If trying to free data which has not been allocated yet, report as a bug.
// TODO: We might want a more precise diagnostic for double free
// (that would involve tracking all the freed symbols in the checker state).
if (!AS || RegionArgIsBad) {
// It is possible that this is a false positive - the argument might
// have entered as an enclosing function parameter.
if (isEnclosingFunctionParam(ArgExpr))
return;
ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
auto Report = llvm::make_unique<BugReport>(
*BT, "Trying to free data which has not been allocated.", N);
Report->addRange(ArgExpr->getSourceRange());
if (AS)
Report->markInteresting(AS->Region);
C.emitReport(std::move(Report));
return;
}
// Process functions which might deallocate.
if (FunctionsToTrack[idx].Kind == PossibleAPI) {
if (funName == "CFStringCreateWithBytesNoCopy") {
const Expr *DeallocatorExpr = CE->getArg(5)->IgnoreParenCasts();
// NULL ~ default deallocator, so warn.
if (DeallocatorExpr->isNullPointerConstant(C.getASTContext(),
Expr::NPC_ValueDependentIsNotNull)) {
const AllocationPair AP = std::make_pair(ArgSM, AS);
generateDeallocatorMismatchReport(AP, ArgExpr, C);
return;
}
// One of the default allocators, so warn.
if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(DeallocatorExpr)) {
StringRef DeallocatorName = DE->getFoundDecl()->getName();
if (DeallocatorName == "kCFAllocatorDefault" ||
DeallocatorName == "kCFAllocatorSystemDefault" ||
DeallocatorName == "kCFAllocatorMalloc") {
const AllocationPair AP = std::make_pair(ArgSM, AS);
generateDeallocatorMismatchReport(AP, ArgExpr, C);
return;
}
// If kCFAllocatorNull, which does not deallocate, we still have to
// find the deallocator.
if (DE->getFoundDecl()->getName() == "kCFAllocatorNull")
return;
}
// In all other cases, assume the user supplied a correct deallocator
// that will free memory so stop tracking.
State = State->remove<AllocatedData>(ArgSM);
C.addTransition(State);
return;
}
llvm_unreachable("We know of no other possible APIs.");
}
// The call is deallocating a value we previously allocated, so remove it
// from the next state.
State = State->remove<AllocatedData>(ArgSM);
// Check if the proper deallocator is used.
unsigned int PDeallocIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
if (PDeallocIdx != idx || (FunctionsToTrack[idx].Kind == ErrorAPI)) {
const AllocationPair AP = std::make_pair(ArgSM, AS);
generateDeallocatorMismatchReport(AP, ArgExpr, C);
return;
}
// If the buffer can be null and the return status can be an error,
// report a bad call to free.
if (State->assume(ArgSVal.castAs<DefinedSVal>(), false) &&
!definitelyDidnotReturnError(AS->Region, State, C.getSValBuilder())) {
ExplodedNode *N = C.addTransition(State);
if (!N)
return;
initBugType();
auto Report = llvm::make_unique<BugReport>(
*BT, "Only call free if a valid (non-NULL) buffer was returned.", N);
Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(ArgSM));
Report->addRange(ArgExpr->getSourceRange());
Report->markInteresting(AS->Region);
C.emitReport(std::move(Report));
return;
}
C.addTransition(State);
}
void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD || FD->getKind() != Decl::Function)
return;
StringRef funName = C.getCalleeName(FD);
// If a value has been allocated, add it to the set for tracking.
unsigned idx = getTrackedFunctionIndex(funName, true);
if (idx == InvalidIdx)
return;
const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
// If the argument entered as an enclosing function parameter, skip it to
// avoid false positives.
if (isEnclosingFunctionParam(ArgExpr) &&
C.getLocationContext()->getParent() == nullptr)
return;
if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C)) {
// If the argument points to something that's not a symbolic region, it
// can be:
// - unknown (cannot reason about it)
// - undefined (already reported by other checker)
// - constant (null - should not be tracked,
// other constant will generate a compiler warning)
// - goto (should be reported by other checker)
// The call return value symbol should stay alive for as long as the
// allocated value symbol, since our diagnostics depend on the value
// returned by the call. Ex: Data should only be freed if noErr was
// returned during allocation.)
SymbolRef RetStatusSymbol =
State->getSVal(CE, C.getLocationContext()).getAsSymbol();
C.getSymbolManager().addSymbolDependency(V, RetStatusSymbol);
// Track the allocated value in the checker state.
State = State->set<AllocatedData>(V, AllocationState(ArgExpr, idx,
RetStatusSymbol));
assert(State);
C.addTransition(State);
}
}
// TODO: This logic is the same as in Malloc checker.
const ExplodedNode *
MacOSKeychainAPIChecker::getAllocationNode(const ExplodedNode *N,
SymbolRef Sym,
CheckerContext &C) const {
const LocationContext *LeakContext = N->getLocationContext();
// Walk the ExplodedGraph backwards and find the first node that referred to
// the tracked symbol.
const ExplodedNode *AllocNode = N;
while (N) {
if (!N->getState()->get<AllocatedData>(Sym))
break;
// Allocation node, is the last node in the current or parent context in
// which the symbol was tracked.
const LocationContext *NContext = N->getLocationContext();
if (NContext == LeakContext ||
NContext->isParentOf(LeakContext))
AllocNode = N;
N = N->pred_empty() ? nullptr : *(N->pred_begin());
}
return AllocNode;
}
std::unique_ptr<BugReport>
MacOSKeychainAPIChecker::generateAllocatedDataNotReleasedReport(
const AllocationPair &AP, ExplodedNode *N, CheckerContext &C) const {
const ADFunctionInfo &FI = FunctionsToTrack[AP.second->AllocatorIdx];
initBugType();
SmallString<70> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << "Allocated data is not released: missing a call to '"
<< FunctionsToTrack[FI.DeallocatorIdx].Name << "'.";
// Most bug reports are cached at the location where they occurred.
// With leaks, we want to unique them by the location where they were
// allocated, and only report a single path.
PathDiagnosticLocation LocUsedForUniqueing;
const ExplodedNode *AllocNode = getAllocationNode(N, AP.first, C);
const Stmt *AllocStmt = nullptr;
ProgramPoint P = AllocNode->getLocation();
if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
AllocStmt = Exit->getCalleeContext()->getCallSite();
else if (Optional<clang::PostStmt> PS = P.getAs<clang::PostStmt>())
AllocStmt = PS->getStmt();
if (AllocStmt)
LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
C.getSourceManager(),
AllocNode->getLocationContext());
auto Report =
llvm::make_unique<BugReport>(*BT, os.str(), N, LocUsedForUniqueing,
AllocNode->getLocationContext()->getDecl());
Report->addVisitor(llvm::make_unique<SecKeychainBugVisitor>(AP.first));
markInteresting(Report.get(), AP);
return Report;
}
void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
AllocatedDataTy ASet = State->get<AllocatedData>();
if (ASet.isEmpty())
return;
bool Changed = false;
AllocationPairVec Errors;
for (AllocatedDataTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) {
if (SR.isLive(I->first))
continue;
Changed = true;
State = State->remove<AllocatedData>(I->first);
// If the allocated symbol is null or if the allocation call might have
// returned an error, do not report.
ConstraintManager &CMgr = State->getConstraintManager();
ConditionTruthVal AllocFailed = CMgr.isNull(State, I.getKey());
if (AllocFailed.isConstrainedTrue() ||
definitelyReturnedError(I->second.Region, State, C.getSValBuilder()))
continue;
Errors.push_back(std::make_pair(I->first, &I->second));
}
if (!Changed) {
// Generate the new, cleaned up state.
C.addTransition(State);
return;
}
static CheckerProgramPointTag Tag(this, "DeadSymbolsLeak");
ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
// Generate the error reports.
for (const auto P : Errors)
C.emitReport(generateAllocatedDataNotReleasedReport(P, N, C));
// Generate the new, cleaned up state.
C.addTransition(State, N);
}
PathDiagnosticPiece *MacOSKeychainAPIChecker::SecKeychainBugVisitor::VisitNode(
const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
const AllocationState *AS = N->getState()->get<AllocatedData>(Sym);
if (!AS)
return nullptr;
const AllocationState *ASPrev = PrevN->getState()->get<AllocatedData>(Sym);
if (ASPrev)
return nullptr;
// (!ASPrev && AS) ~ We started tracking symbol in node N, it must be the
// allocation site.
const CallExpr *CE =
cast<CallExpr>(N->getLocation().castAs<StmtPoint>().getStmt());
const FunctionDecl *funDecl = CE->getDirectCallee();
assert(funDecl && "We do not support indirect function calls as of now.");
StringRef funName = funDecl->getName();
// Get the expression of the corresponding argument.
unsigned Idx = getTrackedFunctionIndex(funName, true);
assert(Idx != InvalidIdx && "This should be a call to an allocator.");
const Expr *ArgExpr = CE->getArg(FunctionsToTrack[Idx].Param);
PathDiagnosticLocation Pos(ArgExpr, BRC.getSourceManager(),
N->getLocationContext());
return new PathDiagnosticEventPiece(Pos, "Data is allocated here.");
}
void ento::registerMacOSKeychainAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<MacOSKeychainAPIChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt | clang_tablegen(Checkers.inc -gen-clang-sa-checkers
-I ${CMAKE_CURRENT_SOURCE_DIR}/../../../include
SOURCE Checkers.td
TARGET ClangSACheckers)
set(LLVM_LINK_COMPONENTS
Support
)
add_clang_library(clangStaticAnalyzerCheckers
AllocationDiagnostics.cpp
AnalyzerStatsChecker.cpp
ArrayBoundChecker.cpp
ArrayBoundCheckerV2.cpp
BasicObjCFoundationChecks.cpp
BoolAssignmentChecker.cpp
BuiltinFunctionChecker.cpp
CStringChecker.cpp
CStringSyntaxChecker.cpp
CallAndMessageChecker.cpp
CastSizeChecker.cpp
CastToStructChecker.cpp
CheckObjCDealloc.cpp
CheckObjCInstMethSignature.cpp
CheckSecuritySyntaxOnly.cpp
CheckSizeofPointer.cpp
CheckerDocumentation.cpp
ChrootChecker.cpp
ClangCheckers.cpp
DeadStoresChecker.cpp
DebugCheckers.cpp
DereferenceChecker.cpp
DirectIvarAssignment.cpp
DivZeroChecker.cpp
DynamicTypePropagation.cpp
ExprInspectionChecker.cpp
FixedAddressChecker.cpp
GenericTaintChecker.cpp
IdenticalExprChecker.cpp
IvarInvalidationChecker.cpp
LLVMConventionsChecker.cpp
MacOSKeychainAPIChecker.cpp
MacOSXAPIChecker.cpp
MallocChecker.cpp
MallocOverflowSecurityChecker.cpp
MallocSizeofChecker.cpp
NSAutoreleasePoolChecker.cpp
NSErrorChecker.cpp
NoReturnFunctionChecker.cpp
NonNullParamChecker.cpp
ObjCAtSyncChecker.cpp
ObjCContainersASTChecker.cpp
ObjCContainersChecker.cpp
ObjCMissingSuperCallChecker.cpp
ObjCSelfInitChecker.cpp
ObjCUnusedIVarsChecker.cpp
PointerArithChecker.cpp
PointerSubChecker.cpp
PthreadLockChecker.cpp
RetainCountChecker.cpp
ReturnPointerRangeChecker.cpp
ReturnUndefChecker.cpp
SimpleStreamChecker.cpp
StackAddrEscapeChecker.cpp
StreamChecker.cpp
TaintTesterChecker.cpp
TestAfterDivZeroChecker.cpp
TraversalChecker.cpp
UndefBranchChecker.cpp
UndefCapturedBlockVarChecker.cpp
UndefResultChecker.cpp
UndefinedArraySubscriptChecker.cpp
UndefinedAssignmentChecker.cpp
UnixAPIChecker.cpp
UnreachableCodeChecker.cpp
VLASizeChecker.cpp
VirtualCallChecker.cpp
DEPENDS
ClangSACheckers
LINK_LIBS
clangAST
clangAnalysis
clangBasic
clangStaticAnalyzerCore
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp | //=== CastToStructChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This files defines CastToStructChecker, a builtin checker that checks for
// cast from non-struct pointer to struct pointer.
// This check corresponds to CWE-588.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class CastToStructChecker : public Checker< check::PreStmt<CastExpr> > {
mutable std::unique_ptr<BuiltinBug> BT;
public:
void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
};
}
void CastToStructChecker::checkPreStmt(const CastExpr *CE,
CheckerContext &C) const {
const Expr *E = CE->getSubExpr();
ASTContext &Ctx = C.getASTContext();
QualType OrigTy = Ctx.getCanonicalType(E->getType());
QualType ToTy = Ctx.getCanonicalType(CE->getType());
const PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
if (!ToPTy || !OrigPTy)
return;
QualType OrigPointeeTy = OrigPTy->getPointeeType();
QualType ToPointeeTy = ToPTy->getPointeeType();
if (!ToPointeeTy->isStructureOrClassType())
return;
// We allow cast from void*.
if (OrigPointeeTy->isVoidType())
return;
// Now the cast-to-type is struct pointer, the original type is not void*.
if (!OrigPointeeTy->isRecordType()) {
if (ExplodedNode *N = C.addTransition()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "Cast from non-struct type to struct type",
"Casting a non-structure type to a structure type "
"and accessing a field can lead to memory access "
"errors or data corruption."));
auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
R->addRange(CE->getSourceRange());
C.emitReport(std::move(R));
}
}
}
void ento::registerCastToStructChecker(CheckerManager &mgr) {
mgr.registerChecker<CastToStructChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp | //=- NSAutoreleasePoolChecker.cpp --------------------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a NSAutoreleasePoolChecker, a small checker that warns
// about subpar uses of NSAutoreleasePool. Note that while the check itself
// (in its current form) could be written as a flow-insensitive check, in
// can be potentially enhanced in the future with flow-sensitive information.
// It is also a good example of the CheckerVisitor interface.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
using namespace ento;
namespace {
class NSAutoreleasePoolChecker
: public Checker<check::PreObjCMessage> {
mutable std::unique_ptr<BugType> BT;
mutable Selector releaseS;
public:
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
};
} // end anonymous namespace
void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
if (!msg.isInstanceMessage())
return;
const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
if (!OD)
return;
if (!OD->getIdentifier()->isStr("NSAutoreleasePool"))
return;
if (releaseS.isNull())
releaseS = GetNullarySelector("release", C.getASTContext());
// Sending 'release' message?
if (msg.getSelector() != releaseS)
return;
if (!BT)
BT.reset(new BugType(this, "Use -drain instead of -release",
"API Upgrade (Apple)"));
ExplodedNode *N = C.addTransition();
if (!N) {
assert(0);
return;
}
auto Report = llvm::make_unique<BugReport>(
*BT, "Use -drain instead of -release when using NSAutoreleasePool and "
"garbage collection", N);
Report->addRange(msg.getSourceRange());
C.emitReport(std::move(Report));
}
void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
if (mgr.getLangOpts().getGC() != LangOptions::NonGC)
mgr.registerChecker<NSAutoreleasePoolChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp | //=- IvarInvalidationChecker.cpp - -*- C++ -------------------------------*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This checker implements annotation driven invalidation checking. If a class
// contains a method annotated with 'objc_instance_variable_invalidator',
// - (void) foo
// __attribute__((annotate("objc_instance_variable_invalidator")));
// all the "ivalidatable" instance variables of this class should be
// invalidated. We call an instance variable ivalidatable if it is an object of
// a class which contains an invalidation method. There could be multiple
// methods annotated with such annotations per class, either one can be used
// to invalidate the ivar. An ivar or property are considered to be
// invalidated if they are being assigned 'nil' or an invalidation method has
// been called on them. An invalidation method should either invalidate all
// the ivars or call another invalidation method (on self).
//
// Partial invalidor annotation allows to addess cases when ivars are
// invalidated by other methods, which might or might not be called from
// the invalidation method. The checker checks that each invalidation
// method and all the partial methods cumulatively invalidate all ivars.
// __attribute__((annotate("objc_instance_variable_invalidator_partial")));
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
using namespace ento;
namespace {
struct ChecksFilter {
/// Check for missing invalidation method declarations.
DefaultBool check_MissingInvalidationMethod;
/// Check that all ivars are invalidated.
DefaultBool check_InstanceVariableInvalidation;
CheckName checkName_MissingInvalidationMethod;
CheckName checkName_InstanceVariableInvalidation;
};
class IvarInvalidationCheckerImpl {
typedef llvm::SmallSetVector<const ObjCMethodDecl*, 2> MethodSet;
typedef llvm::DenseMap<const ObjCMethodDecl*,
const ObjCIvarDecl*> MethToIvarMapTy;
typedef llvm::DenseMap<const ObjCPropertyDecl*,
const ObjCIvarDecl*> PropToIvarMapTy;
typedef llvm::DenseMap<const ObjCIvarDecl*,
const ObjCPropertyDecl*> IvarToPropMapTy;
struct InvalidationInfo {
/// Has the ivar been invalidated?
bool IsInvalidated;
/// The methods which can be used to invalidate the ivar.
MethodSet InvalidationMethods;
InvalidationInfo() : IsInvalidated(false) {}
void addInvalidationMethod(const ObjCMethodDecl *MD) {
InvalidationMethods.insert(MD);
}
bool needsInvalidation() const {
return !InvalidationMethods.empty();
}
bool hasMethod(const ObjCMethodDecl *MD) {
if (IsInvalidated)
return true;
for (MethodSet::iterator I = InvalidationMethods.begin(),
E = InvalidationMethods.end(); I != E; ++I) {
if (*I == MD) {
IsInvalidated = true;
return true;
}
}
return false;
}
};
typedef llvm::DenseMap<const ObjCIvarDecl*, InvalidationInfo> IvarSet;
/// Statement visitor, which walks the method body and flags the ivars
/// referenced in it (either directly or via property).
class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
/// The set of Ivars which need to be invalidated.
IvarSet &IVars;
/// Flag is set as the result of a message send to another
/// invalidation method.
bool &CalledAnotherInvalidationMethod;
/// Property setter to ivar mapping.
const MethToIvarMapTy &PropertySetterToIvarMap;
/// Property getter to ivar mapping.
const MethToIvarMapTy &PropertyGetterToIvarMap;
/// Property to ivar mapping.
const PropToIvarMapTy &PropertyToIvarMap;
/// The invalidation method being currently processed.
const ObjCMethodDecl *InvalidationMethod;
ASTContext &Ctx;
/// Peel off parens, casts, OpaqueValueExpr, and PseudoObjectExpr.
const Expr *peel(const Expr *E) const;
/// Does this expression represent zero: '0'?
bool isZero(const Expr *E) const;
/// Mark the given ivar as invalidated.
void markInvalidated(const ObjCIvarDecl *Iv);
/// Checks if IvarRef refers to the tracked IVar, if yes, marks it as
/// invalidated.
void checkObjCIvarRefExpr(const ObjCIvarRefExpr *IvarRef);
/// Checks if ObjCPropertyRefExpr refers to the tracked IVar, if yes, marks
/// it as invalidated.
void checkObjCPropertyRefExpr(const ObjCPropertyRefExpr *PA);
/// Checks if ObjCMessageExpr refers to (is a getter for) the tracked IVar,
/// if yes, marks it as invalidated.
void checkObjCMessageExpr(const ObjCMessageExpr *ME);
/// Checks if the Expr refers to an ivar, if yes, marks it as invalidated.
void check(const Expr *E);
public:
MethodCrawler(IvarSet &InIVars,
bool &InCalledAnotherInvalidationMethod,
const MethToIvarMapTy &InPropertySetterToIvarMap,
const MethToIvarMapTy &InPropertyGetterToIvarMap,
const PropToIvarMapTy &InPropertyToIvarMap,
ASTContext &InCtx)
: IVars(InIVars),
CalledAnotherInvalidationMethod(InCalledAnotherInvalidationMethod),
PropertySetterToIvarMap(InPropertySetterToIvarMap),
PropertyGetterToIvarMap(InPropertyGetterToIvarMap),
PropertyToIvarMap(InPropertyToIvarMap),
InvalidationMethod(nullptr),
Ctx(InCtx) {}
void VisitStmt(const Stmt *S) { VisitChildren(S); }
void VisitBinaryOperator(const BinaryOperator *BO);
void VisitObjCMessageExpr(const ObjCMessageExpr *ME);
void VisitChildren(const Stmt *S) {
for (const Stmt *Child : S->children()) {
if (Child)
this->Visit(Child);
if (CalledAnotherInvalidationMethod)
return;
}
}
};
/// Check if the any of the methods inside the interface are annotated with
/// the invalidation annotation, update the IvarInfo accordingly.
/// \param LookForPartial is set when we are searching for partial
/// invalidators.
static void containsInvalidationMethod(const ObjCContainerDecl *D,
InvalidationInfo &Out,
bool LookForPartial);
/// Check if ivar should be tracked and add to TrackedIvars if positive.
/// Returns true if ivar should be tracked.
static bool trackIvar(const ObjCIvarDecl *Iv, IvarSet &TrackedIvars,
const ObjCIvarDecl **FirstIvarDecl);
/// Given the property declaration, and the list of tracked ivars, finds
/// the ivar backing the property when possible. Returns '0' when no such
/// ivar could be found.
static const ObjCIvarDecl *findPropertyBackingIvar(
const ObjCPropertyDecl *Prop,
const ObjCInterfaceDecl *InterfaceD,
IvarSet &TrackedIvars,
const ObjCIvarDecl **FirstIvarDecl);
/// Print ivar name or the property if the given ivar backs a property.
static void printIvar(llvm::raw_svector_ostream &os,
const ObjCIvarDecl *IvarDecl,
const IvarToPropMapTy &IvarToPopertyMap);
void reportNoInvalidationMethod(CheckName CheckName,
const ObjCIvarDecl *FirstIvarDecl,
const IvarToPropMapTy &IvarToPopertyMap,
const ObjCInterfaceDecl *InterfaceD,
bool MissingDeclaration) const;
void reportIvarNeedsInvalidation(const ObjCIvarDecl *IvarD,
const IvarToPropMapTy &IvarToPopertyMap,
const ObjCMethodDecl *MethodD) const;
AnalysisManager& Mgr;
BugReporter &BR;
/// Filter on the checks performed.
const ChecksFilter &Filter;
public:
IvarInvalidationCheckerImpl(AnalysisManager& InMgr,
BugReporter &InBR,
const ChecksFilter &InFilter) :
Mgr (InMgr), BR(InBR), Filter(InFilter) {}
void visit(const ObjCImplementationDecl *D) const;
};
static bool isInvalidationMethod(const ObjCMethodDecl *M, bool LookForPartial) {
for (const auto *Ann : M->specific_attrs<AnnotateAttr>()) {
if (!LookForPartial &&
Ann->getAnnotation() == "objc_instance_variable_invalidator")
return true;
if (LookForPartial &&
Ann->getAnnotation() == "objc_instance_variable_invalidator_partial")
return true;
}
return false;
}
void IvarInvalidationCheckerImpl::containsInvalidationMethod(
const ObjCContainerDecl *D, InvalidationInfo &OutInfo, bool Partial) {
if (!D)
return;
assert(!isa<ObjCImplementationDecl>(D));
// TODO: Cache the results.
// Check all methods.
for (const auto *MDI : D->methods())
if (isInvalidationMethod(MDI, Partial))
OutInfo.addInvalidationMethod(
cast<ObjCMethodDecl>(MDI->getCanonicalDecl()));
// If interface, check all parent protocols and super.
if (const ObjCInterfaceDecl *InterfD = dyn_cast<ObjCInterfaceDecl>(D)) {
// Visit all protocols.
for (const auto *I : InterfD->protocols())
containsInvalidationMethod(I->getDefinition(), OutInfo, Partial);
// Visit all categories in case the invalidation method is declared in
// a category.
for (const auto *Ext : InterfD->visible_extensions())
containsInvalidationMethod(Ext, OutInfo, Partial);
containsInvalidationMethod(InterfD->getSuperClass(), OutInfo, Partial);
return;
}
// If protocol, check all parent protocols.
if (const ObjCProtocolDecl *ProtD = dyn_cast<ObjCProtocolDecl>(D)) {
for (const auto *I : ProtD->protocols()) {
containsInvalidationMethod(I->getDefinition(), OutInfo, Partial);
}
return;
}
return;
}
bool IvarInvalidationCheckerImpl::trackIvar(const ObjCIvarDecl *Iv,
IvarSet &TrackedIvars,
const ObjCIvarDecl **FirstIvarDecl) {
QualType IvQTy = Iv->getType();
const ObjCObjectPointerType *IvTy = IvQTy->getAs<ObjCObjectPointerType>();
if (!IvTy)
return false;
const ObjCInterfaceDecl *IvInterf = IvTy->getInterfaceDecl();
InvalidationInfo Info;
containsInvalidationMethod(IvInterf, Info, /*LookForPartial*/ false);
if (Info.needsInvalidation()) {
const ObjCIvarDecl *I = cast<ObjCIvarDecl>(Iv->getCanonicalDecl());
TrackedIvars[I] = Info;
if (!*FirstIvarDecl)
*FirstIvarDecl = I;
return true;
}
return false;
}
const ObjCIvarDecl *IvarInvalidationCheckerImpl::findPropertyBackingIvar(
const ObjCPropertyDecl *Prop,
const ObjCInterfaceDecl *InterfaceD,
IvarSet &TrackedIvars,
const ObjCIvarDecl **FirstIvarDecl) {
const ObjCIvarDecl *IvarD = nullptr;
// Lookup for the synthesized case.
IvarD = Prop->getPropertyIvarDecl();
// We only track the ivars/properties that are defined in the current
// class (not the parent).
if (IvarD && IvarD->getContainingInterface() == InterfaceD) {
if (TrackedIvars.count(IvarD)) {
return IvarD;
}
// If the ivar is synthesized we still want to track it.
if (trackIvar(IvarD, TrackedIvars, FirstIvarDecl))
return IvarD;
}
// Lookup IVars named "_PropName"or "PropName" among the tracked Ivars.
StringRef PropName = Prop->getIdentifier()->getName();
for (IvarSet::const_iterator I = TrackedIvars.begin(),
E = TrackedIvars.end(); I != E; ++I) {
const ObjCIvarDecl *Iv = I->first;
StringRef IvarName = Iv->getName();
if (IvarName == PropName)
return Iv;
SmallString<128> PropNameWithUnderscore;
{
llvm::raw_svector_ostream os(PropNameWithUnderscore);
os << '_' << PropName;
}
if (IvarName == PropNameWithUnderscore)
return Iv;
}
// Note, this is a possible source of false positives. We could look at the
// getter implementation to find the ivar when its name is not derived from
// the property name.
return nullptr;
}
void IvarInvalidationCheckerImpl::printIvar(llvm::raw_svector_ostream &os,
const ObjCIvarDecl *IvarDecl,
const IvarToPropMapTy &IvarToPopertyMap) {
if (IvarDecl->getSynthesize()) {
const ObjCPropertyDecl *PD = IvarToPopertyMap.lookup(IvarDecl);
assert(PD &&"Do we synthesize ivars for something other than properties?");
os << "Property "<< PD->getName() << " ";
} else {
os << "Instance variable "<< IvarDecl->getName() << " ";
}
}
// Check that the invalidatable interfaces with ivars/properties implement the
// invalidation methods.
void IvarInvalidationCheckerImpl::
visit(const ObjCImplementationDecl *ImplD) const {
// Collect all ivars that need cleanup.
IvarSet Ivars;
// Record the first Ivar needing invalidation; used in reporting when only
// one ivar is sufficient. Cannot grab the first on the Ivars set to ensure
// deterministic output.
const ObjCIvarDecl *FirstIvarDecl = nullptr;
const ObjCInterfaceDecl *InterfaceD = ImplD->getClassInterface();
// Collect ivars declared in this class, its extensions and its implementation
ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(InterfaceD);
for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
Iv= Iv->getNextIvar())
trackIvar(Iv, Ivars, &FirstIvarDecl);
// Construct Property/Property Accessor to Ivar maps to assist checking if an
// ivar which is backing a property has been reset.
MethToIvarMapTy PropSetterToIvarMap;
MethToIvarMapTy PropGetterToIvarMap;
PropToIvarMapTy PropertyToIvarMap;
IvarToPropMapTy IvarToPopertyMap;
ObjCInterfaceDecl::PropertyMap PropMap;
ObjCInterfaceDecl::PropertyDeclOrder PropOrder;
InterfaceD->collectPropertiesToImplement(PropMap, PropOrder);
for (ObjCInterfaceDecl::PropertyMap::iterator
I = PropMap.begin(), E = PropMap.end(); I != E; ++I) {
const ObjCPropertyDecl *PD = I->second;
const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterfaceD, Ivars,
&FirstIvarDecl);
if (!ID)
continue;
// Store the mappings.
PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
PropertyToIvarMap[PD] = ID;
IvarToPopertyMap[ID] = PD;
// Find the setter and the getter.
const ObjCMethodDecl *SetterD = PD->getSetterMethodDecl();
if (SetterD) {
SetterD = cast<ObjCMethodDecl>(SetterD->getCanonicalDecl());
PropSetterToIvarMap[SetterD] = ID;
}
const ObjCMethodDecl *GetterD = PD->getGetterMethodDecl();
if (GetterD) {
GetterD = cast<ObjCMethodDecl>(GetterD->getCanonicalDecl());
PropGetterToIvarMap[GetterD] = ID;
}
}
// If no ivars need invalidation, there is nothing to check here.
if (Ivars.empty())
return;
// Find all partial invalidation methods.
InvalidationInfo PartialInfo;
containsInvalidationMethod(InterfaceD, PartialInfo, /*LookForPartial*/ true);
// Remove ivars invalidated by the partial invalidation methods. They do not
// need to be invalidated in the regular invalidation methods.
bool AtImplementationContainsAtLeastOnePartialInvalidationMethod = false;
for (MethodSet::iterator
I = PartialInfo.InvalidationMethods.begin(),
E = PartialInfo.InvalidationMethods.end(); I != E; ++I) {
const ObjCMethodDecl *InterfD = *I;
// Get the corresponding method in the @implementation.
const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
InterfD->isInstanceMethod());
if (D && D->hasBody()) {
AtImplementationContainsAtLeastOnePartialInvalidationMethod = true;
bool CalledAnotherInvalidationMethod = false;
// The MethodCrowler is going to remove the invalidated ivars.
MethodCrawler(Ivars,
CalledAnotherInvalidationMethod,
PropSetterToIvarMap,
PropGetterToIvarMap,
PropertyToIvarMap,
BR.getContext()).VisitStmt(D->getBody());
// If another invalidation method was called, trust that full invalidation
// has occurred.
if (CalledAnotherInvalidationMethod)
Ivars.clear();
}
}
// If all ivars have been invalidated by partial invalidators, there is
// nothing to check here.
if (Ivars.empty())
return;
// Find all invalidation methods in this @interface declaration and parents.
InvalidationInfo Info;
containsInvalidationMethod(InterfaceD, Info, /*LookForPartial*/ false);
// Report an error in case none of the invalidation methods are declared.
if (!Info.needsInvalidation() && !PartialInfo.needsInvalidation()) {
if (Filter.check_MissingInvalidationMethod)
reportNoInvalidationMethod(Filter.checkName_MissingInvalidationMethod,
FirstIvarDecl, IvarToPopertyMap, InterfaceD,
/*MissingDeclaration*/ true);
// If there are no invalidation methods, there is no ivar validation work
// to be done.
return;
}
// Only check if Ivars are invalidated when InstanceVariableInvalidation
// has been requested.
if (!Filter.check_InstanceVariableInvalidation)
return;
// Check that all ivars are invalidated by the invalidation methods.
bool AtImplementationContainsAtLeastOneInvalidationMethod = false;
for (MethodSet::iterator I = Info.InvalidationMethods.begin(),
E = Info.InvalidationMethods.end(); I != E; ++I) {
const ObjCMethodDecl *InterfD = *I;
// Get the corresponding method in the @implementation.
const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
InterfD->isInstanceMethod());
if (D && D->hasBody()) {
AtImplementationContainsAtLeastOneInvalidationMethod = true;
// Get a copy of ivars needing invalidation.
IvarSet IvarsI = Ivars;
bool CalledAnotherInvalidationMethod = false;
MethodCrawler(IvarsI,
CalledAnotherInvalidationMethod,
PropSetterToIvarMap,
PropGetterToIvarMap,
PropertyToIvarMap,
BR.getContext()).VisitStmt(D->getBody());
// If another invalidation method was called, trust that full invalidation
// has occurred.
if (CalledAnotherInvalidationMethod)
continue;
// Warn on the ivars that were not invalidated by the method.
for (IvarSet::const_iterator
I = IvarsI.begin(), E = IvarsI.end(); I != E; ++I)
reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, D);
}
}
// Report an error in case none of the invalidation methods are implemented.
if (!AtImplementationContainsAtLeastOneInvalidationMethod) {
if (AtImplementationContainsAtLeastOnePartialInvalidationMethod) {
// Warn on the ivars that were not invalidated by the prrtial
// invalidation methods.
for (IvarSet::const_iterator
I = Ivars.begin(), E = Ivars.end(); I != E; ++I)
reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, nullptr);
} else {
// Otherwise, no invalidation methods were implemented.
reportNoInvalidationMethod(Filter.checkName_InstanceVariableInvalidation,
FirstIvarDecl, IvarToPopertyMap, InterfaceD,
/*MissingDeclaration*/ false);
}
}
}
void IvarInvalidationCheckerImpl::reportNoInvalidationMethod(
CheckName CheckName, const ObjCIvarDecl *FirstIvarDecl,
const IvarToPropMapTy &IvarToPopertyMap,
const ObjCInterfaceDecl *InterfaceD, bool MissingDeclaration) const {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
assert(FirstIvarDecl);
printIvar(os, FirstIvarDecl, IvarToPopertyMap);
os << "needs to be invalidated; ";
if (MissingDeclaration)
os << "no invalidation method is declared for ";
else
os << "no invalidation method is defined in the @implementation for ";
os << InterfaceD->getName();
PathDiagnosticLocation IvarDecLocation =
PathDiagnosticLocation::createBegin(FirstIvarDecl, BR.getSourceManager());
BR.EmitBasicReport(FirstIvarDecl, CheckName, "Incomplete invalidation",
categories::CoreFoundationObjectiveC, os.str(),
IvarDecLocation);
}
void IvarInvalidationCheckerImpl::
reportIvarNeedsInvalidation(const ObjCIvarDecl *IvarD,
const IvarToPropMapTy &IvarToPopertyMap,
const ObjCMethodDecl *MethodD) const {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
printIvar(os, IvarD, IvarToPopertyMap);
os << "needs to be invalidated or set to nil";
if (MethodD) {
PathDiagnosticLocation MethodDecLocation =
PathDiagnosticLocation::createEnd(MethodD->getBody(),
BR.getSourceManager(),
Mgr.getAnalysisDeclContext(MethodD));
BR.EmitBasicReport(MethodD, Filter.checkName_InstanceVariableInvalidation,
"Incomplete invalidation",
categories::CoreFoundationObjectiveC, os.str(),
MethodDecLocation);
} else {
BR.EmitBasicReport(
IvarD, Filter.checkName_InstanceVariableInvalidation,
"Incomplete invalidation", categories::CoreFoundationObjectiveC,
os.str(),
PathDiagnosticLocation::createBegin(IvarD, BR.getSourceManager()));
}
}
void IvarInvalidationCheckerImpl::MethodCrawler::markInvalidated(
const ObjCIvarDecl *Iv) {
IvarSet::iterator I = IVars.find(Iv);
if (I != IVars.end()) {
// If InvalidationMethod is present, we are processing the message send and
// should ensure we are invalidating with the appropriate method,
// otherwise, we are processing setting to 'nil'.
if (!InvalidationMethod ||
(InvalidationMethod && I->second.hasMethod(InvalidationMethod)))
IVars.erase(I);
}
}
const Expr *IvarInvalidationCheckerImpl::MethodCrawler::peel(const Expr *E) const {
E = E->IgnoreParenCasts();
if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
E = POE->getSyntacticForm()->IgnoreParenCasts();
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E))
E = OVE->getSourceExpr()->IgnoreParenCasts();
return E;
}
void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCIvarRefExpr(
const ObjCIvarRefExpr *IvarRef) {
if (const Decl *D = IvarRef->getDecl())
markInvalidated(cast<ObjCIvarDecl>(D->getCanonicalDecl()));
}
void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCMessageExpr(
const ObjCMessageExpr *ME) {
const ObjCMethodDecl *MD = ME->getMethodDecl();
if (MD) {
MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
MethToIvarMapTy::const_iterator IvI = PropertyGetterToIvarMap.find(MD);
if (IvI != PropertyGetterToIvarMap.end())
markInvalidated(IvI->second);
}
}
void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCPropertyRefExpr(
const ObjCPropertyRefExpr *PA) {
if (PA->isExplicitProperty()) {
const ObjCPropertyDecl *PD = PA->getExplicitProperty();
if (PD) {
PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
PropToIvarMapTy::const_iterator IvI = PropertyToIvarMap.find(PD);
if (IvI != PropertyToIvarMap.end())
markInvalidated(IvI->second);
return;
}
}
if (PA->isImplicitProperty()) {
const ObjCMethodDecl *MD = PA->getImplicitPropertySetter();
if (MD) {
MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
MethToIvarMapTy::const_iterator IvI =PropertyGetterToIvarMap.find(MD);
if (IvI != PropertyGetterToIvarMap.end())
markInvalidated(IvI->second);
return;
}
}
}
bool IvarInvalidationCheckerImpl::MethodCrawler::isZero(const Expr *E) const {
E = peel(E);
return (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull)
!= Expr::NPCK_NotNull);
}
void IvarInvalidationCheckerImpl::MethodCrawler::check(const Expr *E) {
E = peel(E);
if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
checkObjCIvarRefExpr(IvarRef);
return;
}
if (const ObjCPropertyRefExpr *PropRef = dyn_cast<ObjCPropertyRefExpr>(E)) {
checkObjCPropertyRefExpr(PropRef);
return;
}
if (const ObjCMessageExpr *MsgExpr = dyn_cast<ObjCMessageExpr>(E)) {
checkObjCMessageExpr(MsgExpr);
return;
}
}
void IvarInvalidationCheckerImpl::MethodCrawler::VisitBinaryOperator(
const BinaryOperator *BO) {
VisitStmt(BO);
// Do we assign/compare against zero? If yes, check the variable we are
// assigning to.
BinaryOperatorKind Opcode = BO->getOpcode();
if (Opcode != BO_Assign &&
Opcode != BO_EQ &&
Opcode != BO_NE)
return;
if (isZero(BO->getRHS())) {
check(BO->getLHS());
return;
}
if (Opcode != BO_Assign && isZero(BO->getLHS())) {
check(BO->getRHS());
return;
}
}
void IvarInvalidationCheckerImpl::MethodCrawler::VisitObjCMessageExpr(
const ObjCMessageExpr *ME) {
const ObjCMethodDecl *MD = ME->getMethodDecl();
const Expr *Receiver = ME->getInstanceReceiver();
// Stop if we are calling '[self invalidate]'.
if (Receiver && isInvalidationMethod(MD, /*LookForPartial*/ false))
if (Receiver->isObjCSelfExpr()) {
CalledAnotherInvalidationMethod = true;
return;
}
// Check if we call a setter and set the property to 'nil'.
if (MD && (ME->getNumArgs() == 1) && isZero(ME->getArg(0))) {
MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
MethToIvarMapTy::const_iterator IvI = PropertySetterToIvarMap.find(MD);
if (IvI != PropertySetterToIvarMap.end()) {
markInvalidated(IvI->second);
return;
}
}
// Check if we call the 'invalidation' routine on the ivar.
if (Receiver) {
InvalidationMethod = MD;
check(Receiver->IgnoreParenCasts());
InvalidationMethod = nullptr;
}
VisitStmt(ME);
}
}
// Register the checkers.
namespace {
class IvarInvalidationChecker :
public Checker<check::ASTDecl<ObjCImplementationDecl> > {
public:
ChecksFilter Filter;
public:
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
BugReporter &BR) const {
IvarInvalidationCheckerImpl Walker(Mgr, BR, Filter);
Walker.visit(D);
}
};
}
#define REGISTER_CHECKER(name) \
void ento::register##name(CheckerManager &mgr) { \
IvarInvalidationChecker *checker = \
mgr.registerChecker<IvarInvalidationChecker>(); \
checker->Filter.check_##name = true; \
checker->Filter.checkName_##name = mgr.getCurrentCheckName(); \
}
REGISTER_CHECKER(InstanceVariableInvalidation)
REGISTER_CHECKER(MissingInvalidationMethod)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp | //==- ObjCUnusedIVarsChecker.cpp - Check for unused ivars --------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a CheckObjCUnusedIvars, a checker that
// analyzes an Objective-C class's interface/implementation to determine if it
// has any ivars that are never accessed.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
using namespace clang;
using namespace ento;
enum IVarState { Unused, Used };
typedef llvm::DenseMap<const ObjCIvarDecl*,IVarState> IvarUsageMap;
static void Scan(IvarUsageMap& M, const Stmt *S) {
if (!S)
return;
if (const ObjCIvarRefExpr *Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
const ObjCIvarDecl *D = Ex->getDecl();
IvarUsageMap::iterator I = M.find(D);
if (I != M.end())
I->second = Used;
return;
}
// Blocks can reference an instance variable of a class.
if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
Scan(M, BE->getBody());
return;
}
if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(S))
for (PseudoObjectExpr::const_semantics_iterator
i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
const Expr *sub = *i;
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
sub = OVE->getSourceExpr();
Scan(M, sub);
}
for (const Stmt *SubStmt : S->children())
Scan(M, SubStmt);
}
static void Scan(IvarUsageMap& M, const ObjCPropertyImplDecl *D) {
if (!D)
return;
const ObjCIvarDecl *ID = D->getPropertyIvarDecl();
if (!ID)
return;
IvarUsageMap::iterator I = M.find(ID);
if (I != M.end())
I->second = Used;
}
static void Scan(IvarUsageMap& M, const ObjCContainerDecl *D) {
// Scan the methods for accesses.
for (const auto *I : D->instance_methods())
Scan(M, I->getBody());
if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
// Scan for @synthesized property methods that act as setters/getters
// to an ivar.
for (const auto *I : ID->property_impls())
Scan(M, I);
// Scan the associated categories as well.
for (const auto *Cat : ID->getClassInterface()->visible_categories()) {
if (const ObjCCategoryImplDecl *CID = Cat->getImplementation())
Scan(M, CID);
}
}
}
static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
SourceManager &SM) {
for (const auto *I : C->decls())
if (const auto *FD = dyn_cast<FunctionDecl>(I)) {
SourceLocation L = FD->getLocStart();
if (SM.getFileID(L) == FID)
Scan(M, FD->getBody());
}
}
static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
BugReporter &BR,
const CheckerBase *Checker) {
const ObjCInterfaceDecl *ID = D->getClassInterface();
IvarUsageMap M;
// Iterate over the ivars.
for (const auto *Ivar : ID->ivars()) {
// Ignore ivars that...
// (a) aren't private
// (b) explicitly marked unused
// (c) are iboutlets
// (d) are unnamed bitfields
if (Ivar->getAccessControl() != ObjCIvarDecl::Private ||
Ivar->hasAttr<UnusedAttr>() || Ivar->hasAttr<IBOutletAttr>() ||
Ivar->hasAttr<IBOutletCollectionAttr>() ||
Ivar->isUnnamedBitfield())
continue;
M[Ivar] = Unused;
}
if (M.empty())
return;
// Now scan the implementation declaration.
Scan(M, D);
// Any potentially unused ivars?
bool hasUnused = false;
for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
if (I->second == Unused) {
hasUnused = true;
break;
}
if (!hasUnused)
return;
// We found some potentially unused ivars. Scan the entire translation unit
// for functions inside the @implementation that reference these ivars.
// FIXME: In the future hopefully we can just use the lexical DeclContext
// to go from the ObjCImplementationDecl to the lexically "nested"
// C functions.
SourceManager &SM = BR.getSourceManager();
Scan(M, D->getDeclContext(), SM.getFileID(D->getLocation()), SM);
// Find ivars that are unused.
for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
if (I->second == Unused) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Instance variable '" << *I->first << "' in class '" << *ID
<< "' is never used by the methods in its @implementation "
"(although it may be used by category methods).";
PathDiagnosticLocation L =
PathDiagnosticLocation::create(I->first, BR.getSourceManager());
BR.EmitBasicReport(D, Checker, "Unused instance variable", "Optimization",
os.str(), L);
}
}
//===----------------------------------------------------------------------===//
// ObjCUnusedIvarsChecker
//===----------------------------------------------------------------------===//
namespace {
class ObjCUnusedIvarsChecker : public Checker<
check::ASTDecl<ObjCImplementationDecl> > {
public:
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
BugReporter &BR) const {
checkObjCUnusedIvar(D, BR, this);
}
};
}
void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
mgr.registerChecker<ObjCUnusedIvarsChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp | //=== LLVMConventionsChecker.cpp - Check LLVM codebase conventions ---*- C++ -*-
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines LLVMConventionsChecker, a bunch of small little checks
// for checking specific coding conventions in the LLVM/Clang codebase.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
//===----------------------------------------------------------------------===//
// Generic type checking routines.
//===----------------------------------------------------------------------===//
static bool IsLLVMStringRef(QualType T) {
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return false;
return StringRef(QualType(RT, 0).getAsString()) ==
"class StringRef";
}
/// Check whether the declaration is semantically inside the top-level
/// namespace named by ns.
static bool InNamespace(const Decl *D, StringRef NS) {
const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
if (!ND)
return false;
const IdentifierInfo *II = ND->getIdentifier();
if (!II || !II->getName().equals(NS))
return false;
return isa<TranslationUnitDecl>(ND->getDeclContext());
}
static bool IsStdString(QualType T) {
if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
T = QT->getNamedType();
const TypedefType *TT = T->getAs<TypedefType>();
if (!TT)
return false;
const TypedefNameDecl *TD = TT->getDecl();
if (!TD->isInStdNamespace())
return false;
return TD->getName() == "string";
}
static bool IsClangType(const RecordDecl *RD) {
return RD->getName() == "Type" && InNamespace(RD, "clang");
}
static bool IsClangDecl(const RecordDecl *RD) {
return RD->getName() == "Decl" && InNamespace(RD, "clang");
}
static bool IsClangStmt(const RecordDecl *RD) {
return RD->getName() == "Stmt" && InNamespace(RD, "clang");
}
static bool IsClangAttr(const RecordDecl *RD) {
return RD->getName() == "Attr" && InNamespace(RD, "clang");
}
static bool IsStdVector(QualType T) {
const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
if (!TS)
return false;
TemplateName TM = TS->getTemplateName();
TemplateDecl *TD = TM.getAsTemplateDecl();
if (!TD || !InNamespace(TD, "std"))
return false;
return TD->getName() == "vector";
}
static bool IsSmallVector(QualType T) {
const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
if (!TS)
return false;
TemplateName TM = TS->getTemplateName();
TemplateDecl *TD = TM.getAsTemplateDecl();
if (!TD || !InNamespace(TD, "llvm"))
return false;
return TD->getName() == "SmallVector";
}
//===----------------------------------------------------------------------===//
// CHECK: a StringRef should not be bound to a temporary std::string whose
// lifetime is shorter than the StringRef's.
//===----------------------------------------------------------------------===//
namespace {
class StringRefCheckerVisitor : public StmtVisitor<StringRefCheckerVisitor> {
const Decl *DeclWithIssue;
BugReporter &BR;
const CheckerBase *Checker;
public:
StringRefCheckerVisitor(const Decl *declWithIssue, BugReporter &br,
const CheckerBase *checker)
: DeclWithIssue(declWithIssue), BR(br), Checker(checker) {}
void VisitChildren(Stmt *S) {
for (Stmt *Child : S->children())
if (Child)
Visit(Child);
}
void VisitStmt(Stmt *S) { VisitChildren(S); }
void VisitDeclStmt(DeclStmt *DS);
private:
void VisitVarDecl(VarDecl *VD);
};
} // end anonymous namespace
static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR,
const CheckerBase *Checker) {
StringRefCheckerVisitor walker(D, BR, Checker);
walker.Visit(D->getBody());
}
void StringRefCheckerVisitor::VisitDeclStmt(DeclStmt *S) {
VisitChildren(S);
for (auto *I : S->decls())
if (VarDecl *VD = dyn_cast<VarDecl>(I))
VisitVarDecl(VD);
}
void StringRefCheckerVisitor::VisitVarDecl(VarDecl *VD) {
Expr *Init = VD->getInit();
if (!Init)
return;
// Pattern match for:
// StringRef x = call() (where call returns std::string)
if (!IsLLVMStringRef(VD->getType()))
return;
ExprWithCleanups *Ex1 = dyn_cast<ExprWithCleanups>(Init);
if (!Ex1)
return;
CXXConstructExpr *Ex2 = dyn_cast<CXXConstructExpr>(Ex1->getSubExpr());
if (!Ex2 || Ex2->getNumArgs() != 1)
return;
ImplicitCastExpr *Ex3 = dyn_cast<ImplicitCastExpr>(Ex2->getArg(0));
if (!Ex3)
return;
CXXConstructExpr *Ex4 = dyn_cast<CXXConstructExpr>(Ex3->getSubExpr());
if (!Ex4 || Ex4->getNumArgs() != 1)
return;
ImplicitCastExpr *Ex5 = dyn_cast<ImplicitCastExpr>(Ex4->getArg(0));
if (!Ex5)
return;
CXXBindTemporaryExpr *Ex6 = dyn_cast<CXXBindTemporaryExpr>(Ex5->getSubExpr());
if (!Ex6 || !IsStdString(Ex6->getType()))
return;
// Okay, badness! Report an error.
const char *desc = "StringRef should not be bound to temporary "
"std::string that it outlives";
PathDiagnosticLocation VDLoc =
PathDiagnosticLocation::createBegin(VD, BR.getSourceManager());
BR.EmitBasicReport(DeclWithIssue, Checker, desc, "LLVM Conventions", desc,
VDLoc, Init->getSourceRange());
}
//===----------------------------------------------------------------------===//
// CHECK: Clang AST nodes should not have fields that can allocate
// memory.
//===----------------------------------------------------------------------===//
static bool AllocatesMemory(QualType T) {
return IsStdVector(T) || IsStdString(T) || IsSmallVector(T);
}
// This type checking could be sped up via dynamic programming.
static bool IsPartOfAST(const CXXRecordDecl *R) {
if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
return true;
for (const auto &BS : R->bases()) {
QualType T = BS.getType();
if (const RecordType *baseT = T->getAs<RecordType>()) {
CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
if (IsPartOfAST(baseD))
return true;
}
}
return false;
}
namespace {
class ASTFieldVisitor {
SmallVector<FieldDecl*, 10> FieldChain;
const CXXRecordDecl *Root;
BugReporter &BR;
const CheckerBase *Checker;
public:
ASTFieldVisitor(const CXXRecordDecl *root, BugReporter &br,
const CheckerBase *checker)
: Root(root), BR(br), Checker(checker) {}
void Visit(FieldDecl *D);
void ReportError(QualType T);
};
} // end anonymous namespace
static void CheckASTMemory(const CXXRecordDecl *R, BugReporter &BR,
const CheckerBase *Checker) {
if (!IsPartOfAST(R))
return;
for (auto *I : R->fields()) {
ASTFieldVisitor walker(R, BR, Checker);
walker.Visit(I);
}
}
void ASTFieldVisitor::Visit(FieldDecl *D) {
FieldChain.push_back(D);
QualType T = D->getType();
if (AllocatesMemory(T))
ReportError(T);
if (const RecordType *RT = T->getAs<RecordType>()) {
const RecordDecl *RD = RT->getDecl()->getDefinition();
for (auto *I : RD->fields())
Visit(I);
}
FieldChain.pop_back();
}
void ASTFieldVisitor::ReportError(QualType T) {
SmallString<1024> buf;
llvm::raw_svector_ostream os(buf);
os << "AST class '" << Root->getName() << "' has a field '"
<< FieldChain.front()->getName() << "' that allocates heap memory";
if (FieldChain.size() > 1) {
os << " via the following chain: ";
bool isFirst = true;
for (SmallVectorImpl<FieldDecl*>::iterator I=FieldChain.begin(),
E=FieldChain.end(); I!=E; ++I) {
if (!isFirst)
os << '.';
else
isFirst = false;
os << (*I)->getName();
}
}
os << " (type " << FieldChain.back()->getType().getAsString() << ")";
os.flush();
// Note that this will fire for every translation unit that uses this
// class. This is suboptimal, but at least scan-build will merge
// duplicate HTML reports. In the future we need a unified way of merging
// duplicate reports across translation units. For C++ classes we cannot
// just report warnings when we see an out-of-line method definition for a
// class, as that heuristic doesn't always work (the complete definition of
// the class may be in the header file, for example).
PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
FieldChain.front(), BR.getSourceManager());
BR.EmitBasicReport(Root, Checker, "AST node allocates heap memory",
"LLVM Conventions", os.str(), L);
}
//===----------------------------------------------------------------------===//
// LLVMConventionsChecker
//===----------------------------------------------------------------------===//
namespace {
class LLVMConventionsChecker : public Checker<
check::ASTDecl<CXXRecordDecl>,
check::ASTCodeBody > {
public:
void checkASTDecl(const CXXRecordDecl *R, AnalysisManager& mgr,
BugReporter &BR) const {
if (R->isCompleteDefinition())
CheckASTMemory(R, BR, this);
}
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
CheckStringRefAssignedTemporary(D, BR, this);
}
};
}
void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
mgr.registerChecker<LLVMConventionsChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp | //== ReturnPointerRangeChecker.cpp ------------------------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines ReturnPointerRangeChecker, which is a path-sensitive check
// which looks for an out-of-bound pointer being returned to callers.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
using namespace ento;
namespace {
class ReturnPointerRangeChecker :
public Checker< check::PreStmt<ReturnStmt> > {
mutable std::unique_ptr<BuiltinBug> BT;
public:
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
};
}
void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
SVal V = state->getSVal(RetE, C.getLocationContext());
const MemRegion *R = V.getAsRegion();
const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
if (!ER)
return;
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
// Zero index is always in bound, this also passes ElementRegions created for
// pointer casts.
if (Idx.isZeroConstant())
return;
// FIXME: All of this out-of-bounds checking should eventually be refactored
// into a common place.
DefinedOrUnknownSVal NumElements
= C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
ER->getValueType());
ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateSink(StOutBound);
if (!N)
return;
// FIXME: This bug correspond to CWE-466. Eventually we should have bug
// types explicitly reference such exploit categories (when applicable).
if (!BT)
BT.reset(new BuiltinBug(
this, "Return of pointer value outside of expected range",
"Returned pointer value points outside the original object "
"(potential buffer overflow)"));
// FIXME: It would be nice to eventually make this diagnostic more clear,
// e.g., by referencing the original declaration or by saying *why* this
// reference is outside the range.
// Generate a report for this bug.
auto report = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
report->addRange(RetE->getSourceRange());
C.emitReport(std::move(report));
}
}
void ento::registerReturnPointerRangeChecker(CheckerManager &mgr) {
mgr.registerChecker<ReturnPointerRangeChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp | //== DivZeroChecker.cpp - Division by zero checker --------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines DivZeroChecker, a builtin check in ExprEngine that performs
// checks for division by zeros.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
mutable std::unique_ptr<BuiltinBug> BT;
void reportBug(const char *Msg,
ProgramStateRef StateZero,
CheckerContext &C) const ;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
};
} // end anonymous namespace
void DivZeroChecker::reportBug(const char *Msg,
ProgramStateRef StateZero,
CheckerContext &C) const {
if (ExplodedNode *N = C.generateSink(StateZero)) {
if (!BT)
BT.reset(new BuiltinBug(this, "Division by zero"));
auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
C.emitReport(std::move(R));
}
}
void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
CheckerContext &C) const {
BinaryOperator::Opcode Op = B->getOpcode();
if (Op != BO_Div &&
Op != BO_Rem &&
Op != BO_DivAssign &&
Op != BO_RemAssign)
return;
if (!B->getRHS()->getType()->isScalarType())
return;
SVal Denom = C.getState()->getSVal(B->getRHS(), C.getLocationContext());
Optional<DefinedSVal> DV = Denom.getAs<DefinedSVal>();
// Divide-by-undefined handled in the generic checking for uses of
// undefined values.
if (!DV)
return;
// Check for divide by zero.
ConstraintManager &CM = C.getConstraintManager();
ProgramStateRef stateNotZero, stateZero;
std::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
if (!stateNotZero) {
assert(stateZero);
reportBug("Division by zero", stateZero, C);
return;
}
bool TaintedD = C.getState()->isTainted(*DV);
if ((stateNotZero && stateZero && TaintedD)) {
reportBug("Division by a tainted value, possibly zero", stateZero, C);
return;
}
// If we get here, then the denom should not be zero. We abandon the implicit
// zero denom case for now.
C.addTransition(stateNotZero);
}
void ento::registerDivZeroChecker(CheckerManager &mgr) {
mgr.registerChecker<DivZeroChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp | //=== StackAddrEscapeChecker.cpp ----------------------------------*- C++ -*--//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines stack address leak checker, which checks if an invalid
// stack address is stored into a global or heap location. See CERT DCL30-C.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class StackAddrEscapeChecker : public Checker< check::PreStmt<ReturnStmt>,
check::EndFunction > {
mutable std::unique_ptr<BuiltinBug> BT_stackleak;
mutable std::unique_ptr<BuiltinBug> BT_returnstack;
public:
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
void checkEndFunction(CheckerContext &Ctx) const;
private:
void EmitStackError(CheckerContext &C, const MemRegion *R,
const Expr *RetE) const;
static SourceRange genName(raw_ostream &os, const MemRegion *R,
ASTContext &Ctx);
};
}
SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
ASTContext &Ctx) {
// Get the base region, stripping away fields and elements.
R = R->getBaseRegion();
SourceManager &SM = Ctx.getSourceManager();
SourceRange range;
os << "Address of ";
// Check if the region is a compound literal.
if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
const CompoundLiteralExpr *CL = CR->getLiteralExpr();
os << "stack memory associated with a compound literal "
"declared on line "
<< SM.getExpansionLineNumber(CL->getLocStart())
<< " returned to caller";
range = CL->getSourceRange();
}
else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
const Expr *ARE = AR->getExpr();
SourceLocation L = ARE->getLocStart();
range = ARE->getSourceRange();
os << "stack memory allocated by call to alloca() on line "
<< SM.getExpansionLineNumber(L);
}
else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
const BlockDecl *BD = BR->getCodeRegion()->getDecl();
SourceLocation L = BD->getLocStart();
range = BD->getSourceRange();
os << "stack-allocated block declared on line "
<< SM.getExpansionLineNumber(L);
}
else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
os << "stack memory associated with local variable '"
<< VR->getString() << '\'';
range = VR->getDecl()->getSourceRange();
}
else if (const CXXTempObjectRegion *TOR = dyn_cast<CXXTempObjectRegion>(R)) {
QualType Ty = TOR->getValueType().getLocalUnqualifiedType();
os << "stack memory associated with temporary object of type '";
Ty.print(os, Ctx.getPrintingPolicy());
os << "'";
range = TOR->getExpr()->getSourceRange();
}
else {
llvm_unreachable("Invalid region in ReturnStackAddressChecker.");
}
return range;
}
void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
const Expr *RetE) const {
ExplodedNode *N = C.generateSink();
if (!N)
return;
if (!BT_returnstack)
BT_returnstack.reset(
new BuiltinBug(this, "Return of address to stack-allocated memory"));
// Generate a report for this bug.
SmallString<512> buf;
llvm::raw_svector_ostream os(buf);
SourceRange range = genName(os, R, C.getASTContext());
os << " returned to caller";
auto report = llvm::make_unique<BugReport>(*BT_returnstack, os.str(), N);
report->addRange(RetE->getSourceRange());
if (range.isValid())
report->addRange(range);
C.emitReport(std::move(report));
}
void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
CheckerContext &C) const {
const Expr *RetE = RS->getRetValue();
if (!RetE)
return;
RetE = RetE->IgnoreParens();
const LocationContext *LCtx = C.getLocationContext();
SVal V = C.getState()->getSVal(RetE, LCtx);
const MemRegion *R = V.getAsRegion();
if (!R)
return;
const StackSpaceRegion *SS =
dyn_cast_or_null<StackSpaceRegion>(R->getMemorySpace());
if (!SS)
return;
// Return stack memory in an ancestor stack frame is fine.
const StackFrameContext *CurFrame = LCtx->getCurrentStackFrame();
const StackFrameContext *MemFrame = SS->getStackFrame();
if (MemFrame != CurFrame)
return;
// Automatic reference counting automatically copies blocks.
if (C.getASTContext().getLangOpts().ObjCAutoRefCount &&
isa<BlockDataRegion>(R))
return;
// Returning a record by value is fine. (In this case, the returned
// expression will be a copy-constructor, possibly wrapped in an
// ExprWithCleanups node.)
if (const ExprWithCleanups *Cleanup = dyn_cast<ExprWithCleanups>(RetE))
RetE = Cleanup->getSubExpr();
if (isa<CXXConstructExpr>(RetE) && RetE->getType()->isRecordType())
return;
EmitStackError(C, R, RetE);
}
void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const {
ProgramStateRef state = Ctx.getState();
// Iterate over all bindings to global variables and see if it contains
// a memory region in the stack space.
class CallBack : public StoreManager::BindingsHandler {
private:
CheckerContext &Ctx;
const StackFrameContext *CurSFC;
public:
SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
CallBack(CheckerContext &CC) :
Ctx(CC),
CurSFC(CC.getLocationContext()->getCurrentStackFrame())
{}
bool HandleBinding(StoreManager &SMgr, Store store,
const MemRegion *region, SVal val) override {
if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
return true;
const MemRegion *vR = val.getAsRegion();
if (!vR)
return true;
// Under automated retain release, it is okay to assign a block
// directly to a global variable.
if (Ctx.getASTContext().getLangOpts().ObjCAutoRefCount &&
isa<BlockDataRegion>(vR))
return true;
if (const StackSpaceRegion *SSR =
dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
// If the global variable holds a location in the current stack frame,
// record the binding to emit a warning.
if (SSR->getStackFrame() == CurSFC)
V.push_back(std::make_pair(region, vR));
}
return true;
}
};
CallBack cb(Ctx);
state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
if (cb.V.empty())
return;
// Generate an error node.
ExplodedNode *N = Ctx.addTransition(state);
if (!N)
return;
if (!BT_stackleak)
BT_stackleak.reset(
new BuiltinBug(this, "Stack address stored into global variable",
"Stack address was saved into a global variable. "
"This is dangerous because the address will become "
"invalid after returning from the function"));
for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
// Generate a report for this bug.
SmallString<512> buf;
llvm::raw_svector_ostream os(buf);
SourceRange range = genName(os, cb.V[i].second, Ctx.getASTContext());
os << " is still referred to by the global variable '";
const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion());
os << *VR->getDecl()
<< "' upon returning to the caller. This will be a dangling reference";
auto report = llvm::make_unique<BugReport>(*BT_stackleak, os.str(), N);
if (range.isValid())
report->addRange(range);
Ctx.emitReport(std::move(report));
}
}
void ento::registerStackAddrEscapeChecker(CheckerManager &mgr) {
mgr.registerChecker<StackAddrEscapeChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp | //===-- StreamChecker.cpp -----------------------------------------*- C++ -*--//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines checkers that model and check stream handling functions.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
using namespace ento;
namespace {
struct StreamState {
enum Kind { Opened, Closed, OpenFailed, Escaped } K;
const Stmt *S;
StreamState(Kind k, const Stmt *s) : K(k), S(s) {}
bool isOpened() const { return K == Opened; }
bool isClosed() const { return K == Closed; }
//bool isOpenFailed() const { return K == OpenFailed; }
//bool isEscaped() const { return K == Escaped; }
bool operator==(const StreamState &X) const {
return K == X.K && S == X.S;
}
static StreamState getOpened(const Stmt *s) { return StreamState(Opened, s); }
static StreamState getClosed(const Stmt *s) { return StreamState(Closed, s); }
static StreamState getOpenFailed(const Stmt *s) {
return StreamState(OpenFailed, s);
}
static StreamState getEscaped(const Stmt *s) {
return StreamState(Escaped, s);
}
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddInteger(K);
ID.AddPointer(S);
}
};
class StreamChecker : public Checker<eval::Call,
check::DeadSymbols > {
mutable IdentifierInfo *II_fopen, *II_tmpfile, *II_fclose, *II_fread,
*II_fwrite,
*II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
*II_clearerr, *II_feof, *II_ferror, *II_fileno;
mutable std::unique_ptr<BuiltinBug> BT_nullfp, BT_illegalwhence,
BT_doubleclose, BT_ResourceLeak;
public:
StreamChecker()
: II_fopen(nullptr), II_tmpfile(nullptr), II_fclose(nullptr),
II_fread(nullptr), II_fwrite(nullptr), II_fseek(nullptr),
II_ftell(nullptr), II_rewind(nullptr), II_fgetpos(nullptr),
II_fsetpos(nullptr), II_clearerr(nullptr), II_feof(nullptr),
II_ferror(nullptr), II_fileno(nullptr) {}
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
private:
void Fopen(CheckerContext &C, const CallExpr *CE) const;
void Tmpfile(CheckerContext &C, const CallExpr *CE) const;
void Fclose(CheckerContext &C, const CallExpr *CE) const;
void Fread(CheckerContext &C, const CallExpr *CE) const;
void Fwrite(CheckerContext &C, const CallExpr *CE) const;
void Fseek(CheckerContext &C, const CallExpr *CE) const;
void Ftell(CheckerContext &C, const CallExpr *CE) const;
void Rewind(CheckerContext &C, const CallExpr *CE) const;
void Fgetpos(CheckerContext &C, const CallExpr *CE) const;
void Fsetpos(CheckerContext &C, const CallExpr *CE) const;
void Clearerr(CheckerContext &C, const CallExpr *CE) const;
void Feof(CheckerContext &C, const CallExpr *CE) const;
void Ferror(CheckerContext &C, const CallExpr *CE) const;
void Fileno(CheckerContext &C, const CallExpr *CE) const;
void OpenFileAux(CheckerContext &C, const CallExpr *CE) const;
ProgramStateRef CheckNullStream(SVal SV, ProgramStateRef state,
CheckerContext &C) const;
ProgramStateRef CheckDoubleClose(const CallExpr *CE, ProgramStateRef state,
CheckerContext &C) const;
};
} // end anonymous namespace
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD || FD->getKind() != Decl::Function)
return false;
ASTContext &Ctx = C.getASTContext();
if (!II_fopen)
II_fopen = &Ctx.Idents.get("fopen");
if (!II_tmpfile)
II_tmpfile = &Ctx.Idents.get("tmpfile");
if (!II_fclose)
II_fclose = &Ctx.Idents.get("fclose");
if (!II_fread)
II_fread = &Ctx.Idents.get("fread");
if (!II_fwrite)
II_fwrite = &Ctx.Idents.get("fwrite");
if (!II_fseek)
II_fseek = &Ctx.Idents.get("fseek");
if (!II_ftell)
II_ftell = &Ctx.Idents.get("ftell");
if (!II_rewind)
II_rewind = &Ctx.Idents.get("rewind");
if (!II_fgetpos)
II_fgetpos = &Ctx.Idents.get("fgetpos");
if (!II_fsetpos)
II_fsetpos = &Ctx.Idents.get("fsetpos");
if (!II_clearerr)
II_clearerr = &Ctx.Idents.get("clearerr");
if (!II_feof)
II_feof = &Ctx.Idents.get("feof");
if (!II_ferror)
II_ferror = &Ctx.Idents.get("ferror");
if (!II_fileno)
II_fileno = &Ctx.Idents.get("fileno");
if (FD->getIdentifier() == II_fopen) {
Fopen(C, CE);
return true;
}
if (FD->getIdentifier() == II_tmpfile) {
Tmpfile(C, CE);
return true;
}
if (FD->getIdentifier() == II_fclose) {
Fclose(C, CE);
return true;
}
if (FD->getIdentifier() == II_fread) {
Fread(C, CE);
return true;
}
if (FD->getIdentifier() == II_fwrite) {
Fwrite(C, CE);
return true;
}
if (FD->getIdentifier() == II_fseek) {
Fseek(C, CE);
return true;
}
if (FD->getIdentifier() == II_ftell) {
Ftell(C, CE);
return true;
}
if (FD->getIdentifier() == II_rewind) {
Rewind(C, CE);
return true;
}
if (FD->getIdentifier() == II_fgetpos) {
Fgetpos(C, CE);
return true;
}
if (FD->getIdentifier() == II_fsetpos) {
Fsetpos(C, CE);
return true;
}
if (FD->getIdentifier() == II_clearerr) {
Clearerr(C, CE);
return true;
}
if (FD->getIdentifier() == II_feof) {
Feof(C, CE);
return true;
}
if (FD->getIdentifier() == II_ferror) {
Ferror(C, CE);
return true;
}
if (FD->getIdentifier() == II_fileno) {
Fileno(C, CE);
return true;
}
return false;
}
void StreamChecker::Fopen(CheckerContext &C, const CallExpr *CE) const {
OpenFileAux(C, CE);
}
void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) const {
OpenFileAux(C, CE);
}
void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
SValBuilder &svalBuilder = C.getSValBuilder();
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
DefinedSVal RetVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
C.blockCount())
.castAs<DefinedSVal>();
state = state->BindExpr(CE, C.getLocationContext(), RetVal);
ConstraintManager &CM = C.getConstraintManager();
// Bifurcate the state into two: one with a valid FILE* pointer, the other
// with a NULL.
ProgramStateRef stateNotNull, stateNull;
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
if (SymbolRef Sym = RetVal.getAsSymbol()) {
// if RetVal is not NULL, set the symbol's state to Opened.
stateNotNull =
stateNotNull->set<StreamMap>(Sym,StreamState::getOpened(CE));
stateNull =
stateNull->set<StreamMap>(Sym, StreamState::getOpenFailed(CE));
C.addTransition(stateNotNull);
C.addTransition(stateNull);
}
}
void StreamChecker::Fclose(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = CheckDoubleClose(CE, C.getState(), C);
if (state)
C.addTransition(state);
}
void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!(state = CheckNullStream(state->getSVal(CE->getArg(0),
C.getLocationContext()), state, C)))
return;
// Check the legality of the 'whence' argument of 'fseek'.
SVal Whence = state->getSVal(CE->getArg(2), C.getLocationContext());
Optional<nonloc::ConcreteInt> CI = Whence.getAs<nonloc::ConcreteInt>();
if (!CI)
return;
int64_t x = CI->getValue().getSExtValue();
if (x >= 0 && x <= 2)
return;
if (ExplodedNode *N = C.addTransition(state)) {
if (!BT_illegalwhence)
BT_illegalwhence.reset(
new BuiltinBug(this, "Illegal whence argument",
"The whence argument to fseek() should be "
"SEEK_SET, SEEK_END, or SEEK_CUR."));
C.emitReport(llvm::make_unique<BugReport>(
*BT_illegalwhence, BT_illegalwhence->getDescription(), N));
}
}
void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
state, C))
return;
}
ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
CheckerContext &C) const {
Optional<DefinedSVal> DV = SV.getAs<DefinedSVal>();
if (!DV)
return nullptr;
ConstraintManager &CM = C.getConstraintManager();
ProgramStateRef stateNotNull, stateNull;
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (!stateNotNull && stateNull) {
if (ExplodedNode *N = C.generateSink(stateNull)) {
if (!BT_nullfp)
BT_nullfp.reset(new BuiltinBug(this, "NULL stream pointer",
"Stream pointer might be NULL."));
C.emitReport(llvm::make_unique<BugReport>(
*BT_nullfp, BT_nullfp->getDescription(), N));
}
return nullptr;
}
return stateNotNull;
}
ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
ProgramStateRef state,
CheckerContext &C) const {
SymbolRef Sym =
state->getSVal(CE->getArg(0), C.getLocationContext()).getAsSymbol();
if (!Sym)
return state;
const StreamState *SS = state->get<StreamMap>(Sym);
// If the file stream is not tracked, return.
if (!SS)
return state;
// Check: Double close a File Descriptor could cause undefined behaviour.
// Conforming to man-pages
if (SS->isClosed()) {
ExplodedNode *N = C.generateSink();
if (N) {
if (!BT_doubleclose)
BT_doubleclose.reset(new BuiltinBug(
this, "Double fclose", "Try to close a file Descriptor already"
" closed. Cause undefined behaviour."));
C.emitReport(llvm::make_unique<BugReport>(
*BT_doubleclose, BT_doubleclose->getDescription(), N));
}
return nullptr;
}
// Close the File Descriptor.
return state->set<StreamMap>(Sym, StreamState::getClosed(CE));
}
void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
// TODO: Clean up the state.
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
SymbolRef Sym = *I;
ProgramStateRef state = C.getState();
const StreamState *SS = state->get<StreamMap>(Sym);
if (!SS)
continue;
if (SS->isOpened()) {
ExplodedNode *N = C.generateSink();
if (N) {
if (!BT_ResourceLeak)
BT_ResourceLeak.reset(new BuiltinBug(
this, "Resource Leak",
"Opened File never closed. Potential Resource leak."));
C.emitReport(llvm::make_unique<BugReport>(
*BT_ResourceLeak, BT_ResourceLeak->getDescription(), N));
}
}
}
}
void ento::registerStreamChecker(CheckerManager &mgr) {
mgr.registerChecker<StreamChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp | //= UnixAPIChecker.h - Checks preconditions for various Unix APIs --*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines UnixAPIChecker, which is an assortment of checks on calls
// to various, widely used UNIX/Posix functions.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
#include <fcntl.h>
using namespace clang;
using namespace ento;
namespace {
class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
mutable Optional<uint64_t> Val_O_CREAT;
public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckReallocfZero(CheckerContext &C, const CallExpr *CE) const;
void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const;
void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
const CallExpr *) const;
private:
bool ReportZeroByteAllocation(CheckerContext &C,
ProgramStateRef falseState,
const Expr *arg,
const char *fn_name) const;
void BasicAllocationCheck(CheckerContext &C,
const CallExpr *CE,
const unsigned numArgs,
const unsigned sizeArg,
const char *fn) const;
void LazyInitialize(std::unique_ptr<BugType> &BT, const char *name) const {
if (BT)
return;
BT.reset(new BugType(this, name, categories::UnixAPI));
}
void ReportOpenBug(CheckerContext &C,
ProgramStateRef State,
const char *Msg,
SourceRange SR) const;
};
} //end anonymous namespace
//===----------------------------------------------------------------------===//
// "open" (man 2 open)
//===----------------------------------------------------------------------===//
void UnixAPIChecker::ReportOpenBug(CheckerContext &C,
ProgramStateRef State,
const char *Msg,
SourceRange SR) const {
ExplodedNode *N = C.generateSink(State);
if (!N)
return;
LazyInitialize(BT_open, "Improper use of 'open'");
auto Report = llvm::make_unique<BugReport>(*BT_open, Msg, N);
Report->addRange(SR);
C.emitReport(std::move(Report));
}
void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
ProgramStateRef state = C.getState();
if (CE->getNumArgs() < 2) {
// The frontend should issue a warning for this case, so this is a sanity
// check.
return;
} else if (CE->getNumArgs() == 3) {
const Expr *Arg = CE->getArg(2);
QualType QT = Arg->getType();
if (!QT->isIntegerType()) {
ReportOpenBug(C, state,
"Third argument to 'open' is not an integer",
Arg->getSourceRange());
return;
}
} else if (CE->getNumArgs() > 3) {
ReportOpenBug(C, state,
"Call to 'open' with more than three arguments",
CE->getArg(3)->getSourceRange());
return;
}
// The definition of O_CREAT is platform specific. We need a better way
// of querying this information from the checking environment.
if (!Val_O_CREAT.hasValue()) {
if (C.getASTContext().getTargetInfo().getTriple().getVendor()
== llvm::Triple::Apple)
Val_O_CREAT = 0x0200;
else {
// FIXME: We need a more general way of getting the O_CREAT value.
// We could possibly grovel through the preprocessor state, but
// that would require passing the Preprocessor object to the ExprEngine.
// See also: MallocChecker.cpp / M_ZERO.
return;
}
}
// Now check if oflags has O_CREAT set.
const Expr *oflagsEx = CE->getArg(1);
const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
if (!V.getAs<NonLoc>()) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
return;
}
NonLoc oflags = V.castAs<NonLoc>();
NonLoc ocreateFlag = C.getSValBuilder()
.makeIntVal(Val_O_CREAT.getValue(), oflagsEx->getType()).castAs<NonLoc>();
SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
oflags, ocreateFlag,
oflagsEx->getType());
if (maskedFlagsUC.isUnknownOrUndef())
return;
DefinedSVal maskedFlags = maskedFlagsUC.castAs<DefinedSVal>();
// Check if maskedFlags is non-zero.
ProgramStateRef trueState, falseState;
std::tie(trueState, falseState) = state->assume(maskedFlags);
// Only emit an error if the value of 'maskedFlags' is properly
// constrained;
if (!(trueState && !falseState))
return;
if (CE->getNumArgs() < 3) {
ReportOpenBug(C, trueState,
"Call to 'open' requires a third argument when "
"the 'O_CREAT' flag is set",
oflagsEx->getSourceRange());
}
}
//===----------------------------------------------------------------------===//
// pthread_once
//===----------------------------------------------------------------------===//
void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
const CallExpr *CE) const {
// This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
// They can possibly be refactored.
if (CE->getNumArgs() < 1)
return;
// Check if the first argument is stack allocated. If so, issue a warning
// because that's likely to be bad news.
ProgramStateRef state = C.getState();
const MemRegion *R =
state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
return;
ExplodedNode *N = C.generateSink(state);
if (!N)
return;
SmallString<256> S;
llvm::raw_svector_ostream os(S);
os << "Call to 'pthread_once' uses";
if (const VarRegion *VR = dyn_cast<VarRegion>(R))
os << " the local variable '" << VR->getDecl()->getName() << '\'';
else
os << " stack allocated memory";
os << " for the \"control\" value. Using such transient memory for "
"the control value is potentially dangerous.";
if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
os << " Perhaps you intended to declare the variable as 'static'?";
LazyInitialize(BT_pthreadOnce, "Improper use of 'pthread_once'");
auto report = llvm::make_unique<BugReport>(*BT_pthreadOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
C.emitReport(std::move(report));
}
//===----------------------------------------------------------------------===//
// "calloc", "malloc", "realloc", "reallocf", "alloca" and "valloc"
// with allocation size 0
//===----------------------------------------------------------------------===//
// FIXME: Eventually these should be rolled into the MallocChecker, but right now
// they're more basic and valuable for widespread use.
// Returns true if we try to do a zero byte allocation, false otherwise.
// Fills in trueState and falseState.
static bool IsZeroByteAllocation(ProgramStateRef state,
const SVal argVal,
ProgramStateRef *trueState,
ProgramStateRef *falseState) {
std::tie(*trueState, *falseState) =
state->assume(argVal.castAs<DefinedSVal>());
return (*falseState && !*trueState);
}
// Generates an error report, indicating that the function whose name is given
// will perform a zero byte allocation.
// Returns false if an error occurred, true otherwise.
bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
ProgramStateRef falseState,
const Expr *arg,
const char *fn_name) const {
ExplodedNode *N = C.generateSink(falseState);
if (!N)
return false;
LazyInitialize(BT_mallocZero,
"Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
SmallString<256> S;
llvm::raw_svector_ostream os(S);
os << "Call to '" << fn_name << "' has an allocation size of 0 bytes";
auto report = llvm::make_unique<BugReport>(*BT_mallocZero, os.str(), N);
report->addRange(arg->getSourceRange());
bugreporter::trackNullOrUndefValue(N, arg, *report);
C.emitReport(std::move(report));
return true;
}
// Does a basic check for 0-sized allocations suitable for most of the below
// functions (modulo "calloc")
void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
const CallExpr *CE,
const unsigned numArgs,
const unsigned sizeArg,
const char *fn) const {
// Sanity check for the correct number of arguments
if (CE->getNumArgs() != numArgs)
return;
// Check if the allocation size is 0.
ProgramStateRef state = C.getState();
ProgramStateRef trueState = nullptr, falseState = nullptr;
const Expr *arg = CE->getArg(sizeArg);
SVal argVal = state->getSVal(arg, C.getLocationContext());
if (argVal.isUnknownOrUndef())
return;
// Is the value perfectly constrained to zero?
if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
(void) ReportZeroByteAllocation(C, falseState, arg, fn);
return;
}
// Assume the value is non-zero going forward.
assert(trueState);
if (trueState != state)
C.addTransition(trueState);
}
void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
const CallExpr *CE) const {
unsigned int nArgs = CE->getNumArgs();
if (nArgs != 2)
return;
ProgramStateRef state = C.getState();
ProgramStateRef trueState = nullptr, falseState = nullptr;
unsigned int i;
for (i = 0; i < nArgs; i++) {
const Expr *arg = CE->getArg(i);
SVal argVal = state->getSVal(arg, C.getLocationContext());
if (argVal.isUnknownOrUndef()) {
if (i == 0)
continue;
else
return;
}
if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
if (ReportZeroByteAllocation(C, falseState, arg, "calloc"))
return;
else if (i == 0)
continue;
else
return;
}
}
// Assume the value is non-zero going forward.
assert(trueState);
if (trueState != state)
C.addTransition(trueState);
}
void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "malloc");
}
void UnixAPIChecker::CheckReallocZero(CheckerContext &C,
const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 2, 1, "realloc");
}
void UnixAPIChecker::CheckReallocfZero(CheckerContext &C,
const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 2, 1, "reallocf");
}
void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "alloca");
}
void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "valloc");
}
//===----------------------------------------------------------------------===//
// Central dispatch function.
//===----------------------------------------------------------------------===//
void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD || FD->getKind() != Decl::Function)
return;
StringRef FName = C.getCalleeName(FD);
if (FName.empty())
return;
SubChecker SC =
llvm::StringSwitch<SubChecker>(FName)
.Case("open", &UnixAPIChecker::CheckOpen)
.Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
.Case("calloc", &UnixAPIChecker::CheckCallocZero)
.Case("malloc", &UnixAPIChecker::CheckMallocZero)
.Case("realloc", &UnixAPIChecker::CheckReallocZero)
.Case("reallocf", &UnixAPIChecker::CheckReallocfZero)
.Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero)
.Case("valloc", &UnixAPIChecker::CheckVallocZero)
.Default(nullptr);
if (SC)
(this->*SC)(C, CE);
}
//===----------------------------------------------------------------------===//
// Registration.
//===----------------------------------------------------------------------===//
void ento::registerUnixAPIChecker(CheckerManager &mgr) {
mgr.registerChecker<UnixAPIChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp | //=== PointerSubChecker.cpp - Pointer subtraction checker ------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This files defines PointerSubChecker, a builtin checker that checks for
// pointer subtractions on two pointers pointing to different memory chunks.
// This check corresponds to CWE-469.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class PointerSubChecker
: public Checker< check::PreStmt<BinaryOperator> > {
mutable std::unique_ptr<BuiltinBug> BT;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
};
}
void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
CheckerContext &C) const {
// When doing pointer subtraction, if the two pointers do not point to the
// same memory chunk, emit a warning.
if (B->getOpcode() != BO_Sub)
return;
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
SVal LV = state->getSVal(B->getLHS(), LCtx);
SVal RV = state->getSVal(B->getRHS(), LCtx);
const MemRegion *LR = LV.getAsRegion();
const MemRegion *RR = RV.getAsRegion();
if (!(LR && RR))
return;
const MemRegion *BaseLR = LR->getBaseRegion();
const MemRegion *BaseRR = RR->getBaseRegion();
if (BaseLR == BaseRR)
return;
// Allow arithmetic on different symbolic regions.
if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
return;
if (ExplodedNode *N = C.addTransition()) {
if (!BT)
BT.reset(
new BuiltinBug(this, "Pointer subtraction",
"Subtraction of two pointers that do not point to "
"the same memory chunk may cause incorrect result."));
auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
R->addRange(B->getSourceRange());
C.emitReport(std::move(R));
}
}
void ento::registerPointerSubChecker(CheckerManager &mgr) {
mgr.registerChecker<PointerSubChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp | //=======- VirtualCallChecker.cpp --------------------------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a checker that checks virtual function calls during
// construction or destruction of C++ objects.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class WalkAST : public StmtVisitor<WalkAST> {
const CheckerBase *Checker;
BugReporter &BR;
AnalysisDeclContext *AC;
typedef const CallExpr * WorkListUnit;
typedef SmallVector<WorkListUnit, 20> DFSWorkList;
/// A vector representing the worklist which has a chain of CallExprs.
DFSWorkList WList;
// PreVisited : A CallExpr to this FunctionDecl is in the worklist, but the
// body has not been visited yet.
// PostVisited : A CallExpr to this FunctionDecl is in the worklist, and the
// body has been visited.
enum Kind { NotVisited,
PreVisited, /**< A CallExpr to this FunctionDecl is in the
worklist, but the body has not yet been
visited. */
PostVisited /**< A CallExpr to this FunctionDecl is in the
worklist, and the body has been visited. */
};
/// A DenseMap that records visited states of FunctionDecls.
llvm::DenseMap<const FunctionDecl *, Kind> VisitedFunctions;
/// The CallExpr whose body is currently being visited. This is used for
/// generating bug reports. This is null while visiting the body of a
/// constructor or destructor.
const CallExpr *visitingCallExpr;
public:
WalkAST(const CheckerBase *checker, BugReporter &br,
AnalysisDeclContext *ac)
: Checker(checker), BR(br), AC(ac), visitingCallExpr(nullptr) {}
bool hasWork() const { return !WList.empty(); }
/// This method adds a CallExpr to the worklist and marks the callee as
/// being PreVisited.
void Enqueue(WorkListUnit WLUnit) {
const FunctionDecl *FD = WLUnit->getDirectCallee();
if (!FD || !FD->getBody())
return;
Kind &K = VisitedFunctions[FD];
if (K != NotVisited)
return;
K = PreVisited;
WList.push_back(WLUnit);
}
/// This method returns an item from the worklist without removing it.
WorkListUnit Dequeue() {
assert(!WList.empty());
return WList.back();
}
void Execute() {
while (hasWork()) {
WorkListUnit WLUnit = Dequeue();
const FunctionDecl *FD = WLUnit->getDirectCallee();
assert(FD && FD->getBody());
if (VisitedFunctions[FD] == PreVisited) {
// If the callee is PreVisited, walk its body.
// Visit the body.
SaveAndRestore<const CallExpr *> SaveCall(visitingCallExpr, WLUnit);
Visit(FD->getBody());
// Mark the function as being PostVisited to indicate we have
// scanned the body.
VisitedFunctions[FD] = PostVisited;
continue;
}
// Otherwise, the callee is PostVisited.
// Remove it from the worklist.
assert(VisitedFunctions[FD] == PostVisited);
WList.pop_back();
}
}
// Stmt visitor methods.
void VisitCallExpr(CallExpr *CE);
void VisitCXXMemberCallExpr(CallExpr *CE);
void VisitStmt(Stmt *S) { VisitChildren(S); }
void VisitChildren(Stmt *S);
void ReportVirtualCall(const CallExpr *CE, bool isPure);
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// AST walking.
//===----------------------------------------------------------------------===//
void WalkAST::VisitChildren(Stmt *S) {
for (Stmt *Child : S->children())
if (Child)
Visit(Child);
}
void WalkAST::VisitCallExpr(CallExpr *CE) {
VisitChildren(CE);
Enqueue(CE);
}
void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
VisitChildren(CE);
bool callIsNonVirtual = false;
// Several situations to elide for checking.
if (MemberExpr *CME = dyn_cast<MemberExpr>(CE->getCallee())) {
// If the member access is fully qualified (i.e., X::F), then treat
// this as a non-virtual call and do not warn.
if (CME->getQualifier())
callIsNonVirtual = true;
if (Expr *base = CME->getBase()->IgnoreImpCasts()) {
// Elide analyzing the call entirely if the base pointer is not 'this'.
if (!isa<CXXThisExpr>(base))
return;
// If the most derived class is marked final, we know that now subclass
// can override this member.
if (base->getBestDynamicClassType()->hasAttr<FinalAttr>())
callIsNonVirtual = true;
}
}
// Get the callee.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CE->getDirectCallee());
if (MD && MD->isVirtual() && !callIsNonVirtual && !MD->hasAttr<FinalAttr>() &&
!MD->getParent()->hasAttr<FinalAttr>())
ReportVirtualCall(CE, MD->isPure());
Enqueue(CE);
}
void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
os << "Call Path : ";
// Name of current visiting CallExpr.
os << *CE->getDirectCallee();
// Name of the CallExpr whose body is current walking.
if (visitingCallExpr)
os << " <-- " << *visitingCallExpr->getDirectCallee();
// Names of FunctionDecls in worklist with state PostVisited.
for (SmallVectorImpl<const CallExpr *>::iterator I = WList.end(),
E = WList.begin(); I != E; --I) {
const FunctionDecl *FD = (*(I-1))->getDirectCallee();
assert(FD);
if (VisitedFunctions[FD] == PostVisited)
os << " <-- " << *FD;
}
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
SourceRange R = CE->getCallee()->getSourceRange();
if (isPure) {
os << "\n" << "Call pure virtual functions during construction or "
<< "destruction may leads undefined behaviour";
BR.EmitBasicReport(AC->getDecl(), Checker,
"Call pure virtual function during construction or "
"Destruction",
"Cplusplus", os.str(), CELoc, R);
return;
}
else {
os << "\n" << "Call virtual functions during construction or "
<< "destruction will never go to a more derived class";
BR.EmitBasicReport(AC->getDecl(), Checker,
"Call virtual function during construction or "
"Destruction",
"Cplusplus", os.str(), CELoc, R);
return;
}
}
//===----------------------------------------------------------------------===//
// VirtualCallChecker
//===----------------------------------------------------------------------===//
namespace {
class VirtualCallChecker : public Checker<check::ASTDecl<CXXRecordDecl> > {
public:
void checkASTDecl(const CXXRecordDecl *RD, AnalysisManager& mgr,
BugReporter &BR) const {
WalkAST walker(this, BR, mgr.getAnalysisDeclContext(RD));
// Check the constructors.
for (const auto *I : RD->ctors()) {
if (!I->isCopyOrMoveConstructor())
if (Stmt *Body = I->getBody()) {
walker.Visit(Body);
walker.Execute();
}
}
// Check the destructor.
if (CXXDestructorDecl *DD = RD->getDestructor())
if (Stmt *Body = DD->getBody()) {
walker.Visit(Body);
walker.Execute();
}
}
};
}
void ento::registerVirtualCallChecker(CheckerManager &mgr) {
mgr.registerChecker<VirtualCallChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp | //== GenericTaintChecker.cpp ----------------------------------- -*- C++ -*--=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This checker defines the attack surface for generic taint propagation.
//
// The taint information produced by it might be useful to other checkers. For
// example, checkers should report errors which involve tainted data more
// aggressively, even if the involved symbols are under constrained.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/Attr.h"
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include <climits>
using namespace clang;
using namespace ento;
namespace {
class GenericTaintChecker : public Checker< check::PostStmt<CallExpr>,
check::PreStmt<CallExpr> > {
public:
static void *getTag() { static int Tag; return &Tag; }
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
private:
static const unsigned InvalidArgIndex = UINT_MAX;
/// Denotes the return vale.
static const unsigned ReturnValueIndex = UINT_MAX - 1;
mutable std::unique_ptr<BugType> BT;
inline void initBugType() const {
if (!BT)
BT.reset(new BugType(this, "Use of Untrusted Data", "Untrusted Data"));
}
/// \brief Catch taint related bugs. Check if tainted data is passed to a
/// system call etc.
bool checkPre(const CallExpr *CE, CheckerContext &C) const;
/// \brief Add taint sources on a pre-visit.
void addSourcesPre(const CallExpr *CE, CheckerContext &C) const;
/// \brief Propagate taint generated at pre-visit.
bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
/// \brief Add taint sources on a post visit.
void addSourcesPost(const CallExpr *CE, CheckerContext &C) const;
/// Check if the region the expression evaluates to is the standard input,
/// and thus, is tainted.
static bool isStdin(const Expr *E, CheckerContext &C);
/// \brief Given a pointer argument, get the symbol of the value it contains
/// (points to).
static SymbolRef getPointedToSymbol(CheckerContext &C, const Expr *Arg);
/// Functions defining the attack surface.
typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(const CallExpr *,
CheckerContext &C) const;
ProgramStateRef postScanf(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef postSocket(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef postRetTaint(const CallExpr *CE, CheckerContext &C) const;
/// Taint the scanned input if the file is tainted.
ProgramStateRef preFscanf(const CallExpr *CE, CheckerContext &C) const;
/// Check for CWE-134: Uncontrolled Format String.
static const char MsgUncontrolledFormatString[];
bool checkUncontrolledFormatString(const CallExpr *CE,
CheckerContext &C) const;
/// Check for:
/// CERT/STR02-C. "Sanitize data passed to complex subsystems"
/// CWE-78, "Failure to Sanitize Data into an OS Command"
static const char MsgSanitizeSystemArgs[];
bool checkSystemCall(const CallExpr *CE, StringRef Name,
CheckerContext &C) const;
/// Check if tainted data is used as a buffer size ins strn.. functions,
/// and allocators.
static const char MsgTaintedBufferSize[];
bool checkTaintedBufferSize(const CallExpr *CE, const FunctionDecl *FDecl,
CheckerContext &C) const;
/// Generate a report if the expression is tainted or points to tainted data.
bool generateReportIfTainted(const Expr *E, const char Msg[],
CheckerContext &C) const;
typedef SmallVector<unsigned, 2> ArgVector;
/// \brief A struct used to specify taint propagation rules for a function.
///
/// If any of the possible taint source arguments is tainted, all of the
/// destination arguments should also be tainted. Use InvalidArgIndex in the
/// src list to specify that all of the arguments can introduce taint. Use
/// InvalidArgIndex in the dst arguments to signify that all the non-const
/// pointer and reference arguments might be tainted on return. If
/// ReturnValueIndex is added to the dst list, the return value will be
/// tainted.
struct TaintPropagationRule {
/// List of arguments which can be taint sources and should be checked.
ArgVector SrcArgs;
/// List of arguments which should be tainted on function return.
ArgVector DstArgs;
// TODO: Check if using other data structures would be more optimal.
TaintPropagationRule() {}
TaintPropagationRule(unsigned SArg,
unsigned DArg, bool TaintRet = false) {
SrcArgs.push_back(SArg);
DstArgs.push_back(DArg);
if (TaintRet)
DstArgs.push_back(ReturnValueIndex);
}
TaintPropagationRule(unsigned SArg1, unsigned SArg2,
unsigned DArg, bool TaintRet = false) {
SrcArgs.push_back(SArg1);
SrcArgs.push_back(SArg2);
DstArgs.push_back(DArg);
if (TaintRet)
DstArgs.push_back(ReturnValueIndex);
}
/// Get the propagation rule for a given function.
static TaintPropagationRule
getTaintPropagationRule(const FunctionDecl *FDecl,
StringRef Name,
CheckerContext &C);
inline void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
inline void addDstArg(unsigned A) { DstArgs.push_back(A); }
inline bool isNull() const { return SrcArgs.empty(); }
inline bool isDestinationArgument(unsigned ArgNum) const {
return (std::find(DstArgs.begin(),
DstArgs.end(), ArgNum) != DstArgs.end());
}
static inline bool isTaintedOrPointsToTainted(const Expr *E,
ProgramStateRef State,
CheckerContext &C) {
return (State->isTainted(E, C.getLocationContext()) || isStdin(E, C) ||
(E->getType().getTypePtr()->isPointerType() &&
State->isTainted(getPointedToSymbol(C, E))));
}
/// \brief Pre-process a function which propagates taint according to the
/// taint rule.
ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
};
};
const unsigned GenericTaintChecker::ReturnValueIndex;
const unsigned GenericTaintChecker::InvalidArgIndex;
const char GenericTaintChecker::MsgUncontrolledFormatString[] =
"Untrusted data is used as a format string "
"(CWE-134: Uncontrolled Format String)";
const char GenericTaintChecker::MsgSanitizeSystemArgs[] =
"Untrusted data is passed to a system call "
"(CERT/STR02-C. Sanitize data passed to complex subsystems)";
const char GenericTaintChecker::MsgTaintedBufferSize[] =
"Untrusted data is used to specify the buffer size "
"(CERT/STR31-C. Guarantee that storage for strings has sufficient space for "
"character data and the null terminator)";
} // end of anonymous namespace
/// A set which is used to pass information from call pre-visit instruction
/// to the call post-visit. The values are unsigned integers, which are either
/// ReturnValueIndex, or indexes of the pointer/reference argument, which
/// points to data, which should be tainted on return.
REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
GenericTaintChecker::TaintPropagationRule
GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
const FunctionDecl *FDecl,
StringRef Name,
CheckerContext &C) {
// TODO: Currently, we might lose precision here: we always mark a return
// value as tainted even if it's just a pointer, pointing to tainted data.
// Check for exact name match for functions without builtin substitutes.
TaintPropagationRule Rule = llvm::StringSwitch<TaintPropagationRule>(Name)
.Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
.Case("atol", TaintPropagationRule(0, ReturnValueIndex))
.Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
.Case("getc", TaintPropagationRule(0, ReturnValueIndex))
.Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
.Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
.Case("getw", TaintPropagationRule(0, ReturnValueIndex))
.Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
.Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
.Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
.Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
.Case("read", TaintPropagationRule(0, 2, 1, true))
.Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
.Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
.Case("fgets", TaintPropagationRule(2, 0, true))
.Case("getline", TaintPropagationRule(2, 0))
.Case("getdelim", TaintPropagationRule(3, 0))
.Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
.Default(TaintPropagationRule());
if (!Rule.isNull())
return Rule;
// Check if it's one of the memory setting/copying functions.
// This check is specialized but faster then calling isCLibraryFunction.
unsigned BId = 0;
if ( (BId = FDecl->getMemoryFunctionKind()) )
switch(BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
case Builtin::BIstrncpy:
case Builtin::BIstrncat:
return TaintPropagationRule(1, 2, 0, true);
case Builtin::BIstrlcpy:
case Builtin::BIstrlcat:
return TaintPropagationRule(1, 2, 0, false);
case Builtin::BIstrndup:
return TaintPropagationRule(0, 1, ReturnValueIndex);
default:
break;
};
// Process all other functions which could be defined as builtins.
if (Rule.isNull()) {
if (C.isCLibraryFunction(FDecl, "snprintf") ||
C.isCLibraryFunction(FDecl, "sprintf"))
return TaintPropagationRule(InvalidArgIndex, 0, true);
else if (C.isCLibraryFunction(FDecl, "strcpy") ||
C.isCLibraryFunction(FDecl, "stpcpy") ||
C.isCLibraryFunction(FDecl, "strcat"))
return TaintPropagationRule(1, 0, true);
else if (C.isCLibraryFunction(FDecl, "bcopy"))
return TaintPropagationRule(0, 2, 1, false);
else if (C.isCLibraryFunction(FDecl, "strdup") ||
C.isCLibraryFunction(FDecl, "strdupa"))
return TaintPropagationRule(0, ReturnValueIndex);
else if (C.isCLibraryFunction(FDecl, "wcsdup"))
return TaintPropagationRule(0, ReturnValueIndex);
}
// Skipping the following functions, since they might be used for cleansing
// or smart memory copy:
// - memccpy - copying until hitting a special character.
return TaintPropagationRule();
}
void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
// Check for errors first.
if (checkPre(CE, C))
return;
// Add taint second.
addSourcesPre(CE, C);
}
void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
CheckerContext &C) const {
if (propagateFromPre(CE, C))
return;
addSourcesPost(CE, C);
}
void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = nullptr;
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
if (!FDecl || FDecl->getKind() != Decl::Function)
return;
StringRef Name = C.getCalleeName(FDecl);
if (Name.empty())
return;
// First, try generating a propagation rule for this function.
TaintPropagationRule Rule =
TaintPropagationRule::getTaintPropagationRule(FDecl, Name, C);
if (!Rule.isNull()) {
State = Rule.process(CE, C);
if (!State)
return;
C.addTransition(State);
return;
}
// Otherwise, check if we have custom pre-processing implemented.
FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
.Case("fscanf", &GenericTaintChecker::preFscanf)
.Default(nullptr);
// Check and evaluate the call.
if (evalFunction)
State = (this->*evalFunction)(CE, C);
if (!State)
return;
C.addTransition(State);
}
bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
// Depending on what was tainted at pre-visit, we determined a set of
// arguments which should be tainted after the function returns. These are
// stored in the state as TaintArgsOnPostVisit set.
TaintArgsOnPostVisitTy TaintArgs = State->get<TaintArgsOnPostVisit>();
if (TaintArgs.isEmpty())
return false;
for (llvm::ImmutableSet<unsigned>::iterator
I = TaintArgs.begin(), E = TaintArgs.end(); I != E; ++I) {
unsigned ArgNum = *I;
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
State = State->addTaint(CE, C.getLocationContext());
continue;
}
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
if (CE->getNumArgs() < (ArgNum + 1))
return false;
const Expr* Arg = CE->getArg(ArgNum);
SymbolRef Sym = getPointedToSymbol(C, Arg);
if (Sym)
State = State->addTaint(Sym);
}
// Clear up the taint info from the state.
State = State->remove<TaintArgsOnPostVisit>();
if (State != C.getState()) {
C.addTransition(State);
return true;
}
return false;
}
void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
CheckerContext &C) const {
// Define the attack surface.
// Set the evaluation function by switching on the callee name.
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
if (!FDecl || FDecl->getKind() != Decl::Function)
return;
StringRef Name = C.getCalleeName(FDecl);
if (Name.empty())
return;
FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
.Case("scanf", &GenericTaintChecker::postScanf)
// TODO: Add support for vfscanf & family.
.Case("getchar", &GenericTaintChecker::postRetTaint)
.Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
.Case("getenv", &GenericTaintChecker::postRetTaint)
.Case("fopen", &GenericTaintChecker::postRetTaint)
.Case("fdopen", &GenericTaintChecker::postRetTaint)
.Case("freopen", &GenericTaintChecker::postRetTaint)
.Case("getch", &GenericTaintChecker::postRetTaint)
.Case("wgetch", &GenericTaintChecker::postRetTaint)
.Case("socket", &GenericTaintChecker::postSocket)
.Default(nullptr);
// If the callee isn't defined, it is not of security concern.
// Check and evaluate the call.
ProgramStateRef State = nullptr;
if (evalFunction)
State = (this->*evalFunction)(CE, C);
if (!State)
return;
C.addTransition(State);
}
bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
if (checkUncontrolledFormatString(CE, C))
return true;
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
if (!FDecl || FDecl->getKind() != Decl::Function)
return false;
StringRef Name = C.getCalleeName(FDecl);
if (Name.empty())
return false;
if (checkSystemCall(CE, Name, C))
return true;
if (checkTaintedBufferSize(CE, FDecl, C))
return true;
return false;
}
SymbolRef GenericTaintChecker::getPointedToSymbol(CheckerContext &C,
const Expr* Arg) {
ProgramStateRef State = C.getState();
SVal AddrVal = State->getSVal(Arg->IgnoreParens(), C.getLocationContext());
if (AddrVal.isUnknownOrUndef())
return nullptr;
Optional<Loc> AddrLoc = AddrVal.getAs<Loc>();
if (!AddrLoc)
return nullptr;
const PointerType *ArgTy =
dyn_cast<PointerType>(Arg->getType().getCanonicalType().getTypePtr());
SVal Val = State->getSVal(*AddrLoc,
ArgTy ? ArgTy->getPointeeType(): QualType());
return Val.getAsSymbol();
}
ProgramStateRef
GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
// Check for taint in arguments.
bool IsTainted = false;
for (ArgVector::const_iterator I = SrcArgs.begin(),
E = SrcArgs.end(); I != E; ++I) {
unsigned ArgNum = *I;
if (ArgNum == InvalidArgIndex) {
// Check if any of the arguments is tainted, but skip the
// destination arguments.
for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
if (isDestinationArgument(i))
continue;
if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
break;
}
break;
}
if (CE->getNumArgs() < (ArgNum + 1))
return State;
if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
break;
}
if (!IsTainted)
return State;
// Mark the arguments which should be tainted after the function returns.
for (ArgVector::const_iterator I = DstArgs.begin(),
E = DstArgs.end(); I != E; ++I) {
unsigned ArgNum = *I;
// Should we mark all arguments as tainted?
if (ArgNum == InvalidArgIndex) {
// For all pointer and references that were passed in:
// If they are not pointing to const data, mark data as tainted.
// TODO: So far we are just going one level down; ideally we'd need to
// recurse here.
for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
const Expr *Arg = CE->getArg(i);
// Process pointer argument.
const Type *ArgTy = Arg->getType().getTypePtr();
QualType PType = ArgTy->getPointeeType();
if ((!PType.isNull() && !PType.isConstQualified())
|| (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
State = State->add<TaintArgsOnPostVisit>(i);
}
continue;
}
// Should mark the return value?
if (ArgNum == ReturnValueIndex) {
State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
continue;
}
// Mark the given argument.
assert(ArgNum < CE->getNumArgs());
State = State->add<TaintArgsOnPostVisit>(ArgNum);
}
return State;
}
// If argument 0 (file descriptor) is tainted, all arguments except for arg 0
// and arg 1 should get taint.
ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
CheckerContext &C) const {
assert(CE->getNumArgs() >= 2);
ProgramStateRef State = C.getState();
// Check is the file descriptor is tainted.
if (State->isTainted(CE->getArg(0), C.getLocationContext()) ||
isStdin(CE->getArg(0), C)) {
// All arguments except for the first two should get taint.
for (unsigned int i = 2; i < CE->getNumArgs(); ++i)
State = State->add<TaintArgsOnPostVisit>(i);
return State;
}
return nullptr;
}
// If argument 0(protocol domain) is network, the return value should get taint.
ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
if (CE->getNumArgs() < 3)
return State;
SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
// White list the internal communication protocols.
if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
return State;
State = State->addTaint(CE, C.getLocationContext());
return State;
}
ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
if (CE->getNumArgs() < 2)
return State;
// All arguments except for the very first one should get taint.
for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
const Expr* Arg = CE->getArg(i);
SymbolRef Sym = getPointedToSymbol(C, Arg);
if (Sym)
State = State->addTaint(Sym);
}
return State;
}
ProgramStateRef GenericTaintChecker::postRetTaint(const CallExpr *CE,
CheckerContext &C) const {
return C.getState()->addTaint(CE, C.getLocationContext());
}
bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
ProgramStateRef State = C.getState();
SVal Val = State->getSVal(E, C.getLocationContext());
// stdin is a pointer, so it would be a region.
const MemRegion *MemReg = Val.getAsRegion();
// The region should be symbolic, we do not know it's value.
const SymbolicRegion *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
if (!SymReg)
return false;
// Get it's symbol and find the declaration region it's pointing to.
const SymbolRegionValue *Sm =dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
if (!Sm)
return false;
const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
if (!DeclReg)
return false;
// This region corresponds to a declaration, find out if it's a global/extern
// variable named stdin with the proper type.
if (const VarDecl *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
D = D->getCanonicalDecl();
if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
if (const PointerType * PtrTy =
dyn_cast<PointerType>(D->getType().getTypePtr()))
if (PtrTy->getPointeeType() == C.getASTContext().getFILEType())
return true;
}
return false;
}
static bool getPrintfFormatArgumentNum(const CallExpr *CE,
const CheckerContext &C,
unsigned int &ArgNum) {
// Find if the function contains a format string argument.
// Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
// vsnprintf, syslog, custom annotated functions.
const FunctionDecl *FDecl = C.getCalleeDecl(CE);
if (!FDecl)
return false;
for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
ArgNum = Format->getFormatIdx() - 1;
if ((Format->getType()->getName() == "printf") &&
CE->getNumArgs() > ArgNum)
return true;
}
// Or if a function is named setproctitle (this is a heuristic).
if (C.getCalleeName(CE).find("setproctitle") != StringRef::npos) {
ArgNum = 0;
return true;
}
return false;
}
bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
const char Msg[],
CheckerContext &C) const {
assert(E);
// Check for taint.
ProgramStateRef State = C.getState();
if (!State->isTainted(getPointedToSymbol(C, E)) &&
!State->isTainted(E, C.getLocationContext()))
return false;
// Generate diagnostic.
if (ExplodedNode *N = C.addTransition()) {
initBugType();
auto report = llvm::make_unique<BugReport>(*BT, Msg, N);
report->addRange(E->getSourceRange());
C.emitReport(std::move(report));
return true;
}
return false;
}
bool GenericTaintChecker::checkUncontrolledFormatString(const CallExpr *CE,
CheckerContext &C) const{
// Check if the function contains a format string argument.
unsigned int ArgNum = 0;
if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
return false;
// If either the format string content or the pointer itself are tainted, warn.
if (generateReportIfTainted(CE->getArg(ArgNum),
MsgUncontrolledFormatString, C))
return true;
return false;
}
bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
StringRef Name,
CheckerContext &C) const {
// TODO: It might make sense to run this check on demand. In some cases,
// we should check if the environment has been cleansed here. We also might
// need to know if the user was reset before these calls(seteuid).
unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
.Case("system", 0)
.Case("popen", 0)
.Case("execl", 0)
.Case("execle", 0)
.Case("execlp", 0)
.Case("execv", 0)
.Case("execvp", 0)
.Case("execvP", 0)
.Case("execve", 0)
.Case("dlopen", 0)
.Default(UINT_MAX);
if (ArgNum == UINT_MAX || CE->getNumArgs() < (ArgNum + 1))
return false;
if (generateReportIfTainted(CE->getArg(ArgNum),
MsgSanitizeSystemArgs, C))
return true;
return false;
}
// TODO: Should this check be a part of the CString checker?
// If yes, should taint be a global setting?
bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
const FunctionDecl *FDecl,
CheckerContext &C) const {
// If the function has a buffer size argument, set ArgNum.
unsigned ArgNum = InvalidArgIndex;
unsigned BId = 0;
if ( (BId = FDecl->getMemoryFunctionKind()) )
switch(BId) {
case Builtin::BImemcpy:
case Builtin::BImemmove:
case Builtin::BIstrncpy:
ArgNum = 2;
break;
case Builtin::BIstrndup:
ArgNum = 1;
break;
default:
break;
};
if (ArgNum == InvalidArgIndex) {
if (C.isCLibraryFunction(FDecl, "malloc") ||
C.isCLibraryFunction(FDecl, "calloc") ||
C.isCLibraryFunction(FDecl, "alloca"))
ArgNum = 0;
else if (C.isCLibraryFunction(FDecl, "memccpy"))
ArgNum = 3;
else if (C.isCLibraryFunction(FDecl, "realloc"))
ArgNum = 1;
else if (C.isCLibraryFunction(FDecl, "bcopy"))
ArgNum = 2;
}
if (ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C))
return true;
return false;
}
void ento::registerGenericTaintChecker(CheckerManager &mgr) {
mgr.registerChecker<GenericTaintChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp | //==-- RetainCountChecker.cpp - Checks for leaks and other issues -*- C++ -*--//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the methods for RetainCountChecker, which implements
// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "AllocationDiagnostics.h"
#include "SelectorExtras.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ParentMap.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include <cstdarg>
using namespace clang;
using namespace ento;
using namespace objc_retain;
using llvm::StrInStrNoCase;
//===----------------------------------------------------------------------===//
// Adapters for FoldingSet.
//===----------------------------------------------------------------------===//
namespace llvm {
template <> struct FoldingSetTrait<ArgEffect> {
static inline void Profile(const ArgEffect X, FoldingSetNodeID &ID) {
ID.AddInteger((unsigned) X);
}
};
template <> struct FoldingSetTrait<RetEffect> {
static inline void Profile(const RetEffect &X, FoldingSetNodeID &ID) {
ID.AddInteger((unsigned) X.getKind());
ID.AddInteger((unsigned) X.getObjKind());
}
};
} // end llvm namespace
//===----------------------------------------------------------------------===//
// Reference-counting logic (typestate + counts).
//===----------------------------------------------------------------------===//
/// ArgEffects summarizes the effects of a function/method call on all of
/// its arguments.
typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
namespace {
class RefVal {
public:
enum Kind {
Owned = 0, // Owning reference.
NotOwned, // Reference is not owned by still valid (not freed).
Released, // Object has been released.
ReturnedOwned, // Returned object passes ownership to caller.
ReturnedNotOwned, // Return object does not pass ownership to caller.
ERROR_START,
ErrorDeallocNotOwned, // -dealloc called on non-owned object.
ErrorDeallocGC, // Calling -dealloc with GC enabled.
ErrorUseAfterRelease, // Object used after released.
ErrorReleaseNotOwned, // Release of an object that was not owned.
ERROR_LEAK_START,
ErrorLeak, // A memory leak due to excessive reference counts.
ErrorLeakReturned, // A memory leak due to the returning method not having
// the correct naming conventions.
ErrorGCLeakReturned,
ErrorOverAutorelease,
ErrorReturnedNotOwned
};
/// Tracks how an object referenced by an ivar has been used.
///
/// This accounts for us not knowing if an arbitrary ivar is supposed to be
/// stored at +0 or +1.
enum class IvarAccessHistory {
None,
AccessedDirectly,
ReleasedAfterDirectAccess
};
private:
/// The number of outstanding retains.
unsigned Cnt;
/// The number of outstanding autoreleases.
unsigned ACnt;
/// The (static) type of the object at the time we started tracking it.
QualType T;
/// The current state of the object.
///
/// See the RefVal::Kind enum for possible values.
unsigned RawKind : 5;
/// The kind of object being tracked (CF or ObjC), if known.
///
/// See the RetEffect::ObjKind enum for possible values.
unsigned RawObjectKind : 2;
/// True if the current state and/or retain count may turn out to not be the
/// best possible approximation of the reference counting state.
///
/// If true, the checker may decide to throw away ("override") this state
/// in favor of something else when it sees the object being used in new ways.
///
/// This setting should not be propagated to state derived from this state.
/// Once we start deriving new states, it would be inconsistent to override
/// them.
unsigned RawIvarAccessHistory : 2;
RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t,
IvarAccessHistory IvarAccess)
: Cnt(cnt), ACnt(acnt), T(t), RawKind(static_cast<unsigned>(k)),
RawObjectKind(static_cast<unsigned>(o)),
RawIvarAccessHistory(static_cast<unsigned>(IvarAccess)) {
assert(getKind() == k && "not enough bits for the kind");
assert(getObjKind() == o && "not enough bits for the object kind");
assert(getIvarAccessHistory() == IvarAccess && "not enough bits");
}
public:
Kind getKind() const { return static_cast<Kind>(RawKind); }
RetEffect::ObjKind getObjKind() const {
return static_cast<RetEffect::ObjKind>(RawObjectKind);
}
unsigned getCount() const { return Cnt; }
unsigned getAutoreleaseCount() const { return ACnt; }
unsigned getCombinedCounts() const { return Cnt + ACnt; }
void clearCounts() {
Cnt = 0;
ACnt = 0;
}
void setCount(unsigned i) {
Cnt = i;
}
void setAutoreleaseCount(unsigned i) {
ACnt = i;
}
QualType getType() const { return T; }
/// Returns what the analyzer knows about direct accesses to a particular
/// instance variable.
///
/// If the object with this refcount wasn't originally from an Objective-C
/// ivar region, this should always return IvarAccessHistory::None.
IvarAccessHistory getIvarAccessHistory() const {
return static_cast<IvarAccessHistory>(RawIvarAccessHistory);
}
bool isOwned() const {
return getKind() == Owned;
}
bool isNotOwned() const {
return getKind() == NotOwned;
}
bool isReturnedOwned() const {
return getKind() == ReturnedOwned;
}
bool isReturnedNotOwned() const {
return getKind() == ReturnedNotOwned;
}
/// Create a state for an object whose lifetime is the responsibility of the
/// current function, at least partially.
///
/// Most commonly, this is an owned object with a retain count of +1.
static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 1) {
return RefVal(Owned, o, Count, 0, t, IvarAccessHistory::None);
}
/// Create a state for an object whose lifetime is not the responsibility of
/// the current function.
///
/// Most commonly, this is an unowned object with a retain count of +0.
static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 0) {
return RefVal(NotOwned, o, Count, 0, t, IvarAccessHistory::None);
}
RefVal operator-(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() - i,
getAutoreleaseCount(), getType(), getIvarAccessHistory());
}
RefVal operator+(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() + i,
getAutoreleaseCount(), getType(), getIvarAccessHistory());
}
RefVal operator^(Kind k) const {
return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
getType(), getIvarAccessHistory());
}
RefVal autorelease() const {
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
getType(), getIvarAccessHistory());
}
RefVal withIvarAccess() const {
assert(getIvarAccessHistory() == IvarAccessHistory::None);
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
getType(), IvarAccessHistory::AccessedDirectly);
}
RefVal releaseViaIvar() const {
assert(getIvarAccessHistory() == IvarAccessHistory::AccessedDirectly);
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(),
getType(), IvarAccessHistory::ReleasedAfterDirectAccess);
}
// Comparison, profiling, and pretty-printing.
bool hasSameState(const RefVal &X) const {
return getKind() == X.getKind() && Cnt == X.Cnt && ACnt == X.ACnt &&
getIvarAccessHistory() == X.getIvarAccessHistory();
}
bool operator==(const RefVal& X) const {
return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind();
}
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.Add(T);
ID.AddInteger(RawKind);
ID.AddInteger(Cnt);
ID.AddInteger(ACnt);
ID.AddInteger(RawObjectKind);
ID.AddInteger(RawIvarAccessHistory);
}
void print(raw_ostream &Out) const;
};
void RefVal::print(raw_ostream &Out) const {
if (!T.isNull())
Out << "Tracked " << T.getAsString() << '/';
switch (getKind()) {
default: llvm_unreachable("Invalid RefVal kind");
case Owned: {
Out << "Owned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
case NotOwned: {
Out << "NotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
case ReturnedOwned: {
Out << "ReturnedOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
case ReturnedNotOwned: {
Out << "ReturnedNotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
case Released:
Out << "Released";
break;
case ErrorDeallocGC:
Out << "-dealloc (GC)";
break;
case ErrorDeallocNotOwned:
Out << "-dealloc (not-owned)";
break;
case ErrorLeak:
Out << "Leaked";
break;
case ErrorLeakReturned:
Out << "Leaked (Bad naming)";
break;
case ErrorGCLeakReturned:
Out << "Leaked (GC-ed at return)";
break;
case ErrorUseAfterRelease:
Out << "Use-After-Release [ERROR]";
break;
case ErrorReleaseNotOwned:
Out << "Release of Not-Owned [ERROR]";
break;
case RefVal::ErrorOverAutorelease:
Out << "Over-autoreleased";
break;
case RefVal::ErrorReturnedNotOwned:
Out << "Non-owned object returned instead of owned";
break;
}
switch (getIvarAccessHistory()) {
case IvarAccessHistory::None:
break;
case IvarAccessHistory::AccessedDirectly:
Out << " [direct ivar access]";
break;
case IvarAccessHistory::ReleasedAfterDirectAccess:
Out << " [released after direct ivar access]";
}
if (ACnt) {
Out << " [autorelease -" << ACnt << ']';
}
}
} //end anonymous namespace
//===----------------------------------------------------------------------===//
// RefBindings - State used to track object reference counts.
//===----------------------------------------------------------------------===//
REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
static inline const RefVal *getRefBinding(ProgramStateRef State,
SymbolRef Sym) {
return State->get<RefBindings>(Sym);
}
static inline ProgramStateRef setRefBinding(ProgramStateRef State,
SymbolRef Sym, RefVal Val) {
return State->set<RefBindings>(Sym, Val);
}
static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
return State->remove<RefBindings>(Sym);
}
//===----------------------------------------------------------------------===//
// Function/Method behavior summaries.
//===----------------------------------------------------------------------===//
namespace {
class RetainSummary {
/// Args - a map of (index, ArgEffect) pairs, where index
/// specifies the argument (starting from 0). This can be sparsely
/// populated; arguments with no entry in Args use 'DefaultArgEffect'.
ArgEffects Args;
/// DefaultArgEffect - The default ArgEffect to apply to arguments that
/// do not have an entry in Args.
ArgEffect DefaultArgEffect;
/// Receiver - If this summary applies to an Objective-C message expression,
/// this is the effect applied to the state of the receiver.
ArgEffect Receiver;
/// Ret - The effect on the return value. Used to indicate if the
/// function/method call returns a new tracked symbol.
RetEffect Ret;
public:
RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
ArgEffect ReceiverEff)
: Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R) {}
/// getArg - Return the argument effect on the argument specified by
/// idx (starting from 0).
ArgEffect getArg(unsigned idx) const {
if (const ArgEffect *AE = Args.lookup(idx))
return *AE;
return DefaultArgEffect;
}
void addArg(ArgEffects::Factory &af, unsigned idx, ArgEffect e) {
Args = af.add(Args, idx, e);
}
/// setDefaultArgEffect - Set the default argument effect.
void setDefaultArgEffect(ArgEffect E) {
DefaultArgEffect = E;
}
/// getRetEffect - Returns the effect on the return value of the call.
RetEffect getRetEffect() const { return Ret; }
/// setRetEffect - Set the effect of the return value of the call.
void setRetEffect(RetEffect E) { Ret = E; }
/// Sets the effect on the receiver of the message.
void setReceiverEffect(ArgEffect e) { Receiver = e; }
/// getReceiverEffect - Returns the effect on the receiver of the call.
/// This is only meaningful if the summary applies to an ObjCMessageExpr*.
ArgEffect getReceiverEffect() const { return Receiver; }
/// Test if two retain summaries are identical. Note that merely equivalent
/// summaries are not necessarily identical (for example, if an explicit
/// argument effect matches the default effect).
bool operator==(const RetainSummary &Other) const {
return Args == Other.Args && DefaultArgEffect == Other.DefaultArgEffect &&
Receiver == Other.Receiver && Ret == Other.Ret;
}
/// Profile this summary for inclusion in a FoldingSet.
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.Add(Args);
ID.Add(DefaultArgEffect);
ID.Add(Receiver);
ID.Add(Ret);
}
/// A retain summary is simple if it has no ArgEffects other than the default.
bool isSimple() const {
return Args.isEmpty();
}
private:
ArgEffects getArgEffects() const { return Args; }
ArgEffect getDefaultArgEffect() const { return DefaultArgEffect; }
friend class RetainSummaryManager;
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Data structures for constructing summaries.
//===----------------------------------------------------------------------===//
namespace {
class ObjCSummaryKey {
IdentifierInfo* II;
Selector S;
public:
ObjCSummaryKey(IdentifierInfo* ii, Selector s)
: II(ii), S(s) {}
ObjCSummaryKey(const ObjCInterfaceDecl *d, Selector s)
: II(d ? d->getIdentifier() : nullptr), S(s) {}
ObjCSummaryKey(Selector s)
: II(nullptr), S(s) {}
IdentifierInfo *getIdentifier() const { return II; }
Selector getSelector() const { return S; }
};
}
namespace llvm {
template <> struct DenseMapInfo<ObjCSummaryKey> {
static inline ObjCSummaryKey getEmptyKey() {
return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
DenseMapInfo<Selector>::getEmptyKey());
}
static inline ObjCSummaryKey getTombstoneKey() {
return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
DenseMapInfo<Selector>::getTombstoneKey());
}
static unsigned getHashValue(const ObjCSummaryKey &V) {
typedef std::pair<IdentifierInfo*, Selector> PairTy;
return DenseMapInfo<PairTy>::getHashValue(PairTy(V.getIdentifier(),
V.getSelector()));
}
static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
return LHS.getIdentifier() == RHS.getIdentifier() &&
LHS.getSelector() == RHS.getSelector();
}
};
} // end llvm namespace
namespace {
class ObjCSummaryCache {
typedef llvm::DenseMap<ObjCSummaryKey, const RetainSummary *> MapTy;
MapTy M;
public:
ObjCSummaryCache() {}
const RetainSummary * find(const ObjCInterfaceDecl *D, Selector S) {
// Do a lookup with the (D,S) pair. If we find a match return
// the iterator.
ObjCSummaryKey K(D, S);
MapTy::iterator I = M.find(K);
if (I != M.end())
return I->second;
if (!D)
return nullptr;
// Walk the super chain. If we find a hit with a parent, we'll end
// up returning that summary. We actually allow that key (null,S), as
// we cache summaries for the null ObjCInterfaceDecl* to allow us to
// generate initial summaries without having to worry about NSObject
// being declared.
// FIXME: We may change this at some point.
for (ObjCInterfaceDecl *C=D->getSuperClass() ;; C=C->getSuperClass()) {
if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
break;
if (!C)
return nullptr;
}
// Cache the summary with original key to make the next lookup faster
// and return the iterator.
const RetainSummary *Summ = I->second;
M[K] = Summ;
return Summ;
}
const RetainSummary *find(IdentifierInfo* II, Selector S) {
// FIXME: Class method lookup. Right now we dont' have a good way
// of going between IdentifierInfo* and the class hierarchy.
MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
if (I == M.end())
I = M.find(ObjCSummaryKey(S));
return I == M.end() ? nullptr : I->second;
}
const RetainSummary *& operator[](ObjCSummaryKey K) {
return M[K];
}
const RetainSummary *& operator[](Selector S) {
return M[ ObjCSummaryKey(S) ];
}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Data structures for managing collections of summaries.
//===----------------------------------------------------------------------===//
namespace {
class RetainSummaryManager {
//==-----------------------------------------------------------------==//
// Typedefs.
//==-----------------------------------------------------------------==//
typedef llvm::DenseMap<const FunctionDecl*, const RetainSummary *>
FuncSummariesTy;
typedef ObjCSummaryCache ObjCMethodSummariesTy;
typedef llvm::FoldingSetNodeWrapper<RetainSummary> CachedSummaryNode;
//==-----------------------------------------------------------------==//
// Data.
//==-----------------------------------------------------------------==//
/// Ctx - The ASTContext object for the analyzed ASTs.
ASTContext &Ctx;
/// GCEnabled - Records whether or not the analyzed code runs in GC mode.
const bool GCEnabled;
/// Records whether or not the analyzed code runs in ARC mode.
const bool ARCEnabled;
/// FuncSummaries - A map from FunctionDecls to summaries.
FuncSummariesTy FuncSummaries;
/// ObjCClassMethodSummaries - A map from selectors (for instance methods)
/// to summaries.
ObjCMethodSummariesTy ObjCClassMethodSummaries;
/// ObjCMethodSummaries - A map from selectors to summaries.
ObjCMethodSummariesTy ObjCMethodSummaries;
/// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
/// and all other data used by the checker.
llvm::BumpPtrAllocator BPAlloc;
/// AF - A factory for ArgEffects objects.
ArgEffects::Factory AF;
/// ScratchArgs - A holding buffer for construct ArgEffects.
ArgEffects ScratchArgs;
/// ObjCAllocRetE - Default return effect for methods returning Objective-C
/// objects.
RetEffect ObjCAllocRetE;
/// ObjCInitRetE - Default return effect for init methods returning
/// Objective-C objects.
RetEffect ObjCInitRetE;
/// SimpleSummaries - Used for uniquing summaries that don't have special
/// effects.
llvm::FoldingSet<CachedSummaryNode> SimpleSummaries;
//==-----------------------------------------------------------------==//
// Methods.
//==-----------------------------------------------------------------==//
/// getArgEffects - Returns a persistent ArgEffects object based on the
/// data in ScratchArgs.
ArgEffects getArgEffects();
enum UnaryFuncKind { cfretain, cfrelease, cfautorelease, cfmakecollectable };
const RetainSummary *getUnarySummary(const FunctionType* FT,
UnaryFuncKind func);
const RetainSummary *getCFSummaryCreateRule(const FunctionDecl *FD);
const RetainSummary *getCFSummaryGetRule(const FunctionDecl *FD);
const RetainSummary *getCFCreateGetRuleSummary(const FunctionDecl *FD);
const RetainSummary *getPersistentSummary(const RetainSummary &OldSumm);
const RetainSummary *getPersistentSummary(RetEffect RetEff,
ArgEffect ReceiverEff = DoNothing,
ArgEffect DefaultEff = MayEscape) {
RetainSummary Summ(getArgEffects(), RetEff, DefaultEff, ReceiverEff);
return getPersistentSummary(Summ);
}
const RetainSummary *getDoNothingSummary() {
return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
const RetainSummary *getDefaultSummary() {
return getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, MayEscape);
}
const RetainSummary *getPersistentStopSummary() {
return getPersistentSummary(RetEffect::MakeNoRet(),
StopTracking, StopTracking);
}
void InitializeClassMethodSummaries();
void InitializeMethodSummaries();
private:
void addNSObjectClsMethSummary(Selector S, const RetainSummary *Summ) {
ObjCClassMethodSummaries[S] = Summ;
}
void addNSObjectMethSummary(Selector S, const RetainSummary *Summ) {
ObjCMethodSummaries[S] = Summ;
}
void addClassMethSummary(const char* Cls, const char* name,
const RetainSummary *Summ, bool isNullary = true) {
IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
Selector S = isNullary ? GetNullarySelector(name, Ctx)
: GetUnarySelector(name, Ctx);
ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
void addInstMethSummary(const char* Cls, const char* nullaryName,
const RetainSummary *Summ) {
IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
Selector S = GetNullarySelector(nullaryName, Ctx);
ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy &Summaries,
const RetainSummary *Summ, va_list argp) {
Selector S = getKeywordSelector(Ctx, argp);
Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
}
void addInstMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
va_list argp;
va_start(argp, Summ);
addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
va_end(argp);
}
void addClsMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
va_list argp;
va_start(argp, Summ);
addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
va_end(argp);
}
void addClsMethSummary(IdentifierInfo *II, const RetainSummary * Summ, ...) {
va_list argp;
va_start(argp, Summ);
addMethodSummary(II, ObjCClassMethodSummaries, Summ, argp);
va_end(argp);
}
public:
RetainSummaryManager(ASTContext &ctx, bool gcenabled, bool usesARC)
: Ctx(ctx),
GCEnabled(gcenabled),
ARCEnabled(usesARC),
AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
ObjCAllocRetE(gcenabled
? RetEffect::MakeGCNotOwned()
: (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
: RetEffect::MakeOwned(RetEffect::ObjC, true))),
ObjCInitRetE(gcenabled
? RetEffect::MakeGCNotOwned()
: (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
: RetEffect::MakeOwnedWhenTrackedReceiver())) {
InitializeClassMethodSummaries();
InitializeMethodSummaries();
}
const RetainSummary *getSummary(const CallEvent &Call,
ProgramStateRef State = nullptr);
const RetainSummary *getFunctionSummary(const FunctionDecl *FD);
const RetainSummary *getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
const ObjCMethodDecl *MD,
QualType RetTy,
ObjCMethodSummariesTy &CachedSummaries);
const RetainSummary *getInstanceMethodSummary(const ObjCMethodCall &M,
ProgramStateRef State);
const RetainSummary *getClassMethodSummary(const ObjCMethodCall &M) {
assert(!M.isInstanceMessage());
const ObjCInterfaceDecl *Class = M.getReceiverInterface();
return getMethodSummary(M.getSelector(), Class, M.getDecl(),
M.getResultType(), ObjCClassMethodSummaries);
}
/// getMethodSummary - This version of getMethodSummary is used to query
/// the summary for the current method being analyzed.
const RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
const ObjCInterfaceDecl *ID = MD->getClassInterface();
Selector S = MD->getSelector();
QualType ResultTy = MD->getReturnType();
ObjCMethodSummariesTy *CachedSummaries;
if (MD->isInstanceMethod())
CachedSummaries = &ObjCMethodSummaries;
else
CachedSummaries = &ObjCClassMethodSummaries;
return getMethodSummary(S, ID, MD, ResultTy, *CachedSummaries);
}
const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
Selector S, QualType RetTy);
/// Determine if there is a special return effect for this function or method.
Optional<RetEffect> getRetEffectFromAnnotations(QualType RetTy,
const Decl *D);
void updateSummaryFromAnnotations(const RetainSummary *&Summ,
const ObjCMethodDecl *MD);
void updateSummaryFromAnnotations(const RetainSummary *&Summ,
const FunctionDecl *FD);
void updateSummaryForCall(const RetainSummary *&Summ,
const CallEvent &Call);
bool isGCEnabled() const { return GCEnabled; }
bool isARCEnabled() const { return ARCEnabled; }
bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
friend class RetainSummaryTemplate;
};
// Used to avoid allocating long-term (BPAlloc'd) memory for default retain
// summaries. If a function or method looks like it has a default summary, but
// it has annotations, the annotations are added to the stack-based template
// and then copied into managed memory.
class RetainSummaryTemplate {
RetainSummaryManager &Manager;
const RetainSummary *&RealSummary;
RetainSummary ScratchSummary;
bool Accessed;
public:
RetainSummaryTemplate(const RetainSummary *&real, RetainSummaryManager &mgr)
: Manager(mgr), RealSummary(real), ScratchSummary(*real), Accessed(false) {}
~RetainSummaryTemplate() {
if (Accessed)
RealSummary = Manager.getPersistentSummary(ScratchSummary);
}
RetainSummary &operator*() {
Accessed = true;
return ScratchSummary;
}
RetainSummary *operator->() {
Accessed = true;
return &ScratchSummary;
}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Implementation of checker data structures.
//===----------------------------------------------------------------------===//
ArgEffects RetainSummaryManager::getArgEffects() {
ArgEffects AE = ScratchArgs;
ScratchArgs = AF.getEmptyMap();
return AE;
}
const RetainSummary *
RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
// Unique "simple" summaries -- those without ArgEffects.
if (OldSumm.isSimple()) {
llvm::FoldingSetNodeID ID;
OldSumm.Profile(ID);
void *Pos;
CachedSummaryNode *N = SimpleSummaries.FindNodeOrInsertPos(ID, Pos);
if (!N) {
N = (CachedSummaryNode *) BPAlloc.Allocate<CachedSummaryNode>();
new (N) CachedSummaryNode(OldSumm);
SimpleSummaries.InsertNode(N, Pos);
}
return &N->getValue();
}
RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
new (Summ) RetainSummary(OldSumm);
return Summ;
}
//===----------------------------------------------------------------------===//
// Summary creation for functions (largely uses of Core Foundation).
//===----------------------------------------------------------------------===//
static bool isRetain(const FunctionDecl *FD, StringRef FName) {
return FName.endswith("Retain");
}
static bool isRelease(const FunctionDecl *FD, StringRef FName) {
return FName.endswith("Release");
}
static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
return FName.endswith("Autorelease");
}
static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
// FIXME: Remove FunctionDecl parameter.
// FIXME: Is it really okay if MakeCollectable isn't a suffix?
return FName.find("MakeCollectable") != StringRef::npos;
}
static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
switch (E) {
case DoNothing:
case Autorelease:
case DecRefBridgedTransferred:
case IncRef:
case IncRefMsg:
case MakeCollectable:
case UnretainedOutParameter:
case RetainedOutParameter:
case MayEscape:
case StopTracking:
case StopTrackingHard:
return StopTrackingHard;
case DecRef:
case DecRefAndStopTrackingHard:
return DecRefAndStopTrackingHard;
case DecRefMsg:
case DecRefMsgAndStopTrackingHard:
return DecRefMsgAndStopTrackingHard;
case Dealloc:
return Dealloc;
}
llvm_unreachable("Unknown ArgEffect kind");
}
void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
const CallEvent &Call) {
if (Call.hasNonZeroCallbackArg()) {
ArgEffect RecEffect =
getStopTrackingHardEquivalent(S->getReceiverEffect());
ArgEffect DefEffect =
getStopTrackingHardEquivalent(S->getDefaultArgEffect());
ArgEffects CustomArgEffects = S->getArgEffects();
for (ArgEffects::iterator I = CustomArgEffects.begin(),
E = CustomArgEffects.end();
I != E; ++I) {
ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
if (Translated != DefEffect)
ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
}
RetEffect RE = RetEffect::MakeNoRetHard();
// Special cases where the callback argument CANNOT free the return value.
// This can generally only happen if we know that the callback will only be
// called when the return value is already being deallocated.
if (const SimpleFunctionCall *FC = dyn_cast<SimpleFunctionCall>(&Call)) {
if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
// When the CGBitmapContext is deallocated, the callback here will free
// the associated data buffer.
if (Name->isStr("CGBitmapContextCreateWithData"))
RE = S->getRetEffect();
}
}
S = getPersistentSummary(RE, RecEffect, DefEffect);
}
// Special case '[super init];' and '[self init];'
//
// Even though calling '[super init]' without assigning the result to self
// and checking if the parent returns 'nil' is a bad pattern, it is common.
// Additionally, our Self Init checker already warns about it. To avoid
// overwhelming the user with messages from both checkers, we model the case
// of '[super init]' in cases when it is not consumed by another expression
// as if the call preserves the value of 'self'; essentially, assuming it can
// never fail and return 'nil'.
// Note, we don't want to just stop tracking the value since we want the
// RetainCount checker to report leaks and use-after-free if SelfInit checker
// is turned off.
if (const ObjCMethodCall *MC = dyn_cast<ObjCMethodCall>(&Call)) {
if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) {
// Check if the message is not consumed, we know it will not be used in
// an assignment, ex: "self = [super init]".
const Expr *ME = MC->getOriginExpr();
const LocationContext *LCtx = MC->getLocationContext();
ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap();
if (!PM.isConsumedExpr(ME)) {
RetainSummaryTemplate ModifiableSummaryTemplate(S, *this);
ModifiableSummaryTemplate->setReceiverEffect(DoNothing);
ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
}
}
}
}
const RetainSummary *
RetainSummaryManager::getSummary(const CallEvent &Call,
ProgramStateRef State) {
const RetainSummary *Summ;
switch (Call.getKind()) {
case CE_Function:
Summ = getFunctionSummary(cast<SimpleFunctionCall>(Call).getDecl());
break;
case CE_CXXMember:
case CE_CXXMemberOperator:
case CE_Block:
case CE_CXXConstructor:
case CE_CXXDestructor:
case CE_CXXAllocator:
// FIXME: These calls are currently unsupported.
return getPersistentStopSummary();
case CE_ObjCMessage: {
const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
if (Msg.isInstanceMessage())
Summ = getInstanceMethodSummary(Msg, State);
else
Summ = getClassMethodSummary(Msg);
break;
}
}
updateSummaryForCall(Summ, Call);
assert(Summ && "Unknown call type?");
return Summ;
}
const RetainSummary *
RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
// If we don't know what function we're calling, use our default summary.
if (!FD)
return getDefaultSummary();
// Look up a summary in our cache of FunctionDecls -> Summaries.
FuncSummariesTy::iterator I = FuncSummaries.find(FD);
if (I != FuncSummaries.end())
return I->second;
// No summary? Generate one.
const RetainSummary *S = nullptr;
bool AllowAnnotations = true;
do {
// We generate "stop" summaries for implicitly defined functions.
if (FD->isImplicit()) {
S = getPersistentStopSummary();
break;
}
// [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
// function's type.
const FunctionType* FT = FD->getType()->getAs<FunctionType>();
const IdentifierInfo *II = FD->getIdentifier();
if (!II)
break;
StringRef FName = II->getName();
// Strip away preceding '_'. Doing this here will effect all the checks
// down below.
FName = FName.substr(FName.find_first_not_of('_'));
// Inspect the result type.
QualType RetTy = FT->getReturnType();
// FIXME: This should all be refactored into a chain of "summary lookup"
// filters.
assert(ScratchArgs.isEmpty());
if (FName == "pthread_create" || FName == "pthread_setspecific") {
// Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
// This will be addressed better with IPA.
S = getPersistentStopSummary();
} else if (FName == "NSMakeCollectable") {
// Handle: id NSMakeCollectable(CFTypeRef)
S = (RetTy->isObjCIdType())
? getUnarySummary(FT, cfmakecollectable)
: getPersistentStopSummary();
// The headers on OS X 10.8 use cf_consumed/ns_returns_retained,
// but we can fully model NSMakeCollectable ourselves.
AllowAnnotations = false;
} else if (FName == "CFPlugInInstanceCreate") {
S = getPersistentSummary(RetEffect::MakeNoRet());
} else if (FName == "IOBSDNameMatching" ||
FName == "IOServiceMatching" ||
FName == "IOServiceNameMatching" ||
FName == "IORegistryEntrySearchCFProperty" ||
FName == "IORegistryEntryIDMatching" ||
FName == "IOOpenFirmwarePathMatching") {
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
DoNothing, DoNothing);
} else if (FName == "IOServiceGetMatchingService" ||
FName == "IOServiceGetMatchingServices") {
// FIXES: <rdar://problem/6326900>
// This should be addressed using a API table. This strcmp is also
// a little gross, but there is no need to super optimize here.
ScratchArgs = AF.add(ScratchArgs, 1, DecRef);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "IOServiceAddNotification" ||
FName == "IOServiceAddMatchingNotification") {
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
ScratchArgs = AF.add(ScratchArgs, 2, DecRef);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "CVPixelBufferCreateWithBytes") {
// FIXES: <rdar://problem/7283567>
// Eventually this can be improved by recognizing that the pixel
// buffer passed to CVPixelBufferCreateWithBytes is released via
// a callback and doing full IPA to make sure this is done correctly.
// FIXME: This function has an out parameter that returns an
// allocated object.
ScratchArgs = AF.add(ScratchArgs, 7, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "CGBitmapContextCreateWithData") {
// FIXES: <rdar://problem/7358899>
// Eventually this can be improved by recognizing that 'releaseInfo'
// passed to CGBitmapContextCreateWithData is released via
// a callback and doing full IPA to make sure this is done correctly.
ScratchArgs = AF.add(ScratchArgs, 8, StopTracking);
S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
DoNothing, DoNothing);
} else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
// FIXES: <rdar://problem/7283567>
// Eventually this can be improved by recognizing that the pixel
// buffer passed to CVPixelBufferCreateWithPlanarBytes is released
// via a callback and doing full IPA to make sure this is done
// correctly.
ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "dispatch_set_context" ||
FName == "xpc_connection_set_context") {
// <rdar://problem/11059275> - The analyzer currently doesn't have
// a good way to reason about the finalizer function for libdispatch.
// If we pass a context object that is memory managed, stop tracking it.
// <rdar://problem/13783514> - Same problem, but for XPC.
// FIXME: this hack should possibly go away once we can handle
// libdispatch and XPC finalizers.
ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName.startswith("NSLog")) {
S = getDoNothingSummary();
} else if (FName.startswith("NS") &&
(FName.find("Insert") != StringRef::npos)) {
// Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
// be deallocated by NSMapRemove. (radar://11152419)
ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
ScratchArgs = AF.add(ScratchArgs, 2, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
// Did we get a summary?
if (S)
break;
if (RetTy->isPointerType()) {
// For CoreFoundation ('CF') types.
if (cocoa::isRefType(RetTy, "CF", FName)) {
if (isRetain(FD, FName)) {
S = getUnarySummary(FT, cfretain);
} else if (isAutorelease(FD, FName)) {
S = getUnarySummary(FT, cfautorelease);
// The headers use cf_consumed, but we can fully model CFAutorelease
// ourselves.
AllowAnnotations = false;
} else if (isMakeCollectable(FD, FName)) {
S = getUnarySummary(FT, cfmakecollectable);
AllowAnnotations = false;
} else {
S = getCFCreateGetRuleSummary(FD);
}
break;
}
// For CoreGraphics ('CG') types.
if (cocoa::isRefType(RetTy, "CG", FName)) {
if (isRetain(FD, FName))
S = getUnarySummary(FT, cfretain);
else
S = getCFCreateGetRuleSummary(FD);
break;
}
// For the Disk Arbitration API (DiskArbitration/DADisk.h)
if (cocoa::isRefType(RetTy, "DADisk") ||
cocoa::isRefType(RetTy, "DADissenter") ||
cocoa::isRefType(RetTy, "DASessionRef")) {
S = getCFCreateGetRuleSummary(FD);
break;
}
if (FD->hasAttr<CFAuditedTransferAttr>()) {
S = getCFCreateGetRuleSummary(FD);
break;
}
break;
}
// Check for release functions, the only kind of functions that we care
// about that don't return a pointer type.
if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) {
// Test for 'CGCF'.
FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
if (isRelease(FD, FName))
S = getUnarySummary(FT, cfrelease);
else {
assert (ScratchArgs.isEmpty());
// Remaining CoreFoundation and CoreGraphics functions.
// We use to assume that they all strictly followed the ownership idiom
// and that ownership cannot be transferred. While this is technically
// correct, many methods allow a tracked object to escape. For example:
//
// CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
// CFDictionaryAddValue(y, key, x);
// CFRelease(x);
// ... it is okay to use 'x' since 'y' has a reference to it
//
// We handle this and similar cases with the follow heuristic. If the
// function name contains "InsertValue", "SetValue", "AddValue",
// "AppendValue", or "SetAttribute", then we assume that arguments may
// "escape." This means that something else holds on to the object,
// allowing it be used even after its local retain count drops to 0.
ArgEffect E = (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
StrInStrNoCase(FName, "SetAttribute") != StringRef::npos)
? MayEscape : DoNothing;
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
}
}
}
while (0);
// If we got all the way here without any luck, use a default summary.
if (!S)
S = getDefaultSummary();
// Annotations override defaults.
if (AllowAnnotations)
updateSummaryFromAnnotations(S, FD);
FuncSummaries[FD] = S;
return S;
}
const RetainSummary *
RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl *FD) {
if (coreFoundation::followsCreateRule(FD))
return getCFSummaryCreateRule(FD);
return getCFSummaryGetRule(FD);
}
const RetainSummary *
RetainSummaryManager::getUnarySummary(const FunctionType* FT,
UnaryFuncKind func) {
// Sanity check that this is *really* a unary function. This can
// happen if people do weird things.
const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
if (!FTP || FTP->getNumParams() != 1)
return getPersistentStopSummary();
assert (ScratchArgs.isEmpty());
ArgEffect Effect;
switch (func) {
case cfretain: Effect = IncRef; break;
case cfrelease: Effect = DecRef; break;
case cfautorelease: Effect = Autorelease; break;
case cfmakecollectable: Effect = MakeCollectable; break;
}
ScratchArgs = AF.add(ScratchArgs, 0, Effect);
return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
}
const RetainSummary *
RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
assert (ScratchArgs.isEmpty());
return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
}
const RetainSummary *
RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
assert (ScratchArgs.isEmpty());
return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
DoNothing, DoNothing);
}
//===----------------------------------------------------------------------===//
// Summary creation for Selectors.
//===----------------------------------------------------------------------===//
Optional<RetEffect>
RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
const Decl *D) {
if (cocoa::isCocoaObjectRef(RetTy)) {
if (D->hasAttr<NSReturnsRetainedAttr>())
return ObjCAllocRetE;
if (D->hasAttr<NSReturnsNotRetainedAttr>() ||
D->hasAttr<NSReturnsAutoreleasedAttr>())
return RetEffect::MakeNotOwned(RetEffect::ObjC);
} else if (!RetTy->isPointerType()) {
return None;
}
if (D->hasAttr<CFReturnsRetainedAttr>())
return RetEffect::MakeOwned(RetEffect::CF, true);
if (D->hasAttr<CFReturnsNotRetainedAttr>())
return RetEffect::MakeNotOwned(RetEffect::CF);
return None;
}
void
RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
const FunctionDecl *FD) {
if (!FD)
return;
assert(Summ && "Must have a summary to add annotations to.");
RetainSummaryTemplate Template(Summ, *this);
// Effects on the parameters.
unsigned parm_idx = 0;
for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
if (pd->hasAttr<NSConsumedAttr>())
Template->addArg(AF, parm_idx, DecRefMsg);
else if (pd->hasAttr<CFConsumedAttr>())
Template->addArg(AF, parm_idx, DecRef);
else if (pd->hasAttr<CFReturnsRetainedAttr>()) {
QualType PointeeTy = pd->getType()->getPointeeType();
if (!PointeeTy.isNull())
if (coreFoundation::isCFObjectRef(PointeeTy))
Template->addArg(AF, parm_idx, RetainedOutParameter);
} else if (pd->hasAttr<CFReturnsNotRetainedAttr>()) {
QualType PointeeTy = pd->getType()->getPointeeType();
if (!PointeeTy.isNull())
if (coreFoundation::isCFObjectRef(PointeeTy))
Template->addArg(AF, parm_idx, UnretainedOutParameter);
}
}
QualType RetTy = FD->getReturnType();
if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
Template->setRetEffect(*RetE);
}
void
RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
const ObjCMethodDecl *MD) {
if (!MD)
return;
assert(Summ && "Must have a valid summary to add annotations to");
RetainSummaryTemplate Template(Summ, *this);
// Effects on the receiver.
if (MD->hasAttr<NSConsumesSelfAttr>())
Template->setReceiverEffect(DecRefMsg);
// Effects on the parameters.
unsigned parm_idx = 0;
for (ObjCMethodDecl::param_const_iterator
pi=MD->param_begin(), pe=MD->param_end();
pi != pe; ++pi, ++parm_idx) {
const ParmVarDecl *pd = *pi;
if (pd->hasAttr<NSConsumedAttr>())
Template->addArg(AF, parm_idx, DecRefMsg);
else if (pd->hasAttr<CFConsumedAttr>()) {
Template->addArg(AF, parm_idx, DecRef);
} else if (pd->hasAttr<CFReturnsRetainedAttr>()) {
QualType PointeeTy = pd->getType()->getPointeeType();
if (!PointeeTy.isNull())
if (coreFoundation::isCFObjectRef(PointeeTy))
Template->addArg(AF, parm_idx, RetainedOutParameter);
} else if (pd->hasAttr<CFReturnsNotRetainedAttr>()) {
QualType PointeeTy = pd->getType()->getPointeeType();
if (!PointeeTy.isNull())
if (coreFoundation::isCFObjectRef(PointeeTy))
Template->addArg(AF, parm_idx, UnretainedOutParameter);
}
}
QualType RetTy = MD->getReturnType();
if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
Template->setRetEffect(*RetE);
}
const RetainSummary *
RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
Selector S, QualType RetTy) {
// Any special effects?
ArgEffect ReceiverEff = DoNothing;
RetEffect ResultEff = RetEffect::MakeNoRet();
// Check the method family, and apply any default annotations.
switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
case OMF_None:
case OMF_initialize:
case OMF_performSelector:
// Assume all Objective-C methods follow Cocoa Memory Management rules.
// FIXME: Does the non-threaded performSelector family really belong here?
// The selector could be, say, @selector(copy).
if (cocoa::isCocoaObjectRef(RetTy))
ResultEff = RetEffect::MakeNotOwned(RetEffect::ObjC);
else if (coreFoundation::isCFObjectRef(RetTy)) {
// ObjCMethodDecl currently doesn't consider CF objects as valid return
// values for alloc, new, copy, or mutableCopy, so we have to
// double-check with the selector. This is ugly, but there aren't that
// many Objective-C methods that return CF objects, right?
if (MD) {
switch (S.getMethodFamily()) {
case OMF_alloc:
case OMF_new:
case OMF_copy:
case OMF_mutableCopy:
ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
break;
default:
ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
break;
}
} else {
ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
}
}
break;
case OMF_init:
ResultEff = ObjCInitRetE;
ReceiverEff = DecRefMsg;
break;
case OMF_alloc:
case OMF_new:
case OMF_copy:
case OMF_mutableCopy:
if (cocoa::isCocoaObjectRef(RetTy))
ResultEff = ObjCAllocRetE;
else if (coreFoundation::isCFObjectRef(RetTy))
ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
break;
case OMF_autorelease:
ReceiverEff = Autorelease;
break;
case OMF_retain:
ReceiverEff = IncRefMsg;
break;
case OMF_release:
ReceiverEff = DecRefMsg;
break;
case OMF_dealloc:
ReceiverEff = Dealloc;
break;
case OMF_self:
// -self is handled specially by the ExprEngine to propagate the receiver.
break;
case OMF_retainCount:
case OMF_finalize:
// These methods don't return objects.
break;
}
// If one of the arguments in the selector has the keyword 'delegate' we
// should stop tracking the reference count for the receiver. This is
// because the reference count is quite possibly handled by a delegate
// method.
if (S.isKeywordSelector()) {
for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
StringRef Slot = S.getNameForSlot(i);
if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
if (ResultEff == ObjCInitRetE)
ResultEff = RetEffect::MakeNoRetHard();
else
ReceiverEff = StopTrackingHard;
}
}
}
if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing &&
ResultEff.getKind() == RetEffect::NoRet)
return getDefaultSummary();
return getPersistentSummary(ResultEff, ReceiverEff, MayEscape);
}
const RetainSummary *
RetainSummaryManager::getInstanceMethodSummary(const ObjCMethodCall &Msg,
ProgramStateRef State) {
const ObjCInterfaceDecl *ReceiverClass = nullptr;
// We do better tracking of the type of the object than the core ExprEngine.
// See if we have its type in our private state.
// FIXME: Eventually replace the use of state->get<RefBindings> with
// a generic API for reasoning about the Objective-C types of symbolic
// objects.
SVal ReceiverV = Msg.getReceiverSVal();
if (SymbolRef Sym = ReceiverV.getAsLocSymbol())
if (const RefVal *T = getRefBinding(State, Sym))
if (const ObjCObjectPointerType *PT =
T->getType()->getAs<ObjCObjectPointerType>())
ReceiverClass = PT->getInterfaceDecl();
// If we don't know what kind of object this is, fall back to its static type.
if (!ReceiverClass)
ReceiverClass = Msg.getReceiverInterface();
// FIXME: The receiver could be a reference to a class, meaning that
// we should use the class method.
// id x = [NSObject class];
// [x performSelector:... withObject:... afterDelay:...];
Selector S = Msg.getSelector();
const ObjCMethodDecl *Method = Msg.getDecl();
if (!Method && ReceiverClass)
Method = ReceiverClass->getInstanceMethod(S);
return getMethodSummary(S, ReceiverClass, Method, Msg.getResultType(),
ObjCMethodSummaries);
}
const RetainSummary *
RetainSummaryManager::getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
const ObjCMethodDecl *MD, QualType RetTy,
ObjCMethodSummariesTy &CachedSummaries) {
// Look up a summary in our summary cache.
const RetainSummary *Summ = CachedSummaries.find(ID, S);
if (!Summ) {
Summ = getStandardMethodSummary(MD, S, RetTy);
// Annotations override defaults.
updateSummaryFromAnnotations(Summ, MD);
// Memoize the summary.
CachedSummaries[ObjCSummaryKey(ID, S)] = Summ;
}
return Summ;
}
void RetainSummaryManager::InitializeClassMethodSummaries() {
assert(ScratchArgs.isEmpty());
// Create the [NSAssertionHandler currentHander] summary.
addClassMethSummary("NSAssertionHandler", "currentHandler",
getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
// Create the [NSAutoreleasePool addObject:] summary.
ScratchArgs = AF.add(ScratchArgs, 0, Autorelease);
addClassMethSummary("NSAutoreleasePool", "addObject",
getPersistentSummary(RetEffect::MakeNoRet(),
DoNothing, Autorelease));
}
void RetainSummaryManager::InitializeMethodSummaries() {
assert (ScratchArgs.isEmpty());
// Create the "init" selector. It just acts as a pass-through for the
// receiver.
const RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
// awakeAfterUsingCoder: behaves basically like an 'init' method. It
// claims the receiver and returns a retained object.
addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
InitSumm);
// The next methods are allocators.
const RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
const RetainSummary *CFAllocSumm =
getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
// Create the "retain" selector.
RetEffect NoRet = RetEffect::MakeNoRet();
const RetainSummary *Summ = getPersistentSummary(NoRet, IncRefMsg);
addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
// Create the "release" selector.
Summ = getPersistentSummary(NoRet, DecRefMsg);
addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
// Create the -dealloc summary.
Summ = getPersistentSummary(NoRet, Dealloc);
addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
// Create the "autorelease" selector.
Summ = getPersistentSummary(NoRet, Autorelease);
addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
// For NSWindow, allocated objects are (initially) self-owned.
// FIXME: For now we opt for false negatives with NSWindow, as these objects
// self-own themselves. However, they only do this once they are displayed.
// Thus, we need to track an NSWindow's display status.
// This is tracked in <rdar://problem/6062711>.
// See also http://llvm.org/bugs/show_bug.cgi?id=3714.
const RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
StopTracking,
StopTracking);
addClassMethSummary("NSWindow", "alloc", NoTrackYet);
// For NSPanel (which subclasses NSWindow), allocated objects are not
// self-owned.
// FIXME: For now we don't track NSPanels. object for the same reason
// as for NSWindow objects.
addClassMethSummary("NSPanel", "alloc", NoTrackYet);
// For NSNull, objects returned by +null are singletons that ignore
// retain/release semantics. Just don't track them.
// <rdar://problem/12858915>
addClassMethSummary("NSNull", "null", NoTrackYet);
// Don't track allocated autorelease pools, as it is okay to prematurely
// exit a method.
addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
addClassMethSummary("NSAutoreleasePool", "allocWithZone", NoTrackYet, false);
addClassMethSummary("NSAutoreleasePool", "new", NoTrackYet);
// Create summaries QCRenderer/QCView -createSnapShotImageOfType:
addInstMethSummary("QCRenderer", AllocSumm,
"createSnapshotImageOfType", nullptr);
addInstMethSummary("QCView", AllocSumm,
"createSnapshotImageOfType", nullptr);
// Create summaries for CIContext, 'createCGImage' and
// 'createCGLayerWithSize'. These objects are CF objects, and are not
// automatically garbage collected.
addInstMethSummary("CIContext", CFAllocSumm,
"createCGImage", "fromRect", nullptr);
addInstMethSummary("CIContext", CFAllocSumm, "createCGImage", "fromRect",
"format", "colorSpace", nullptr);
addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize", "info",
nullptr);
}
//===----------------------------------------------------------------------===//
// Error reporting.
//===----------------------------------------------------------------------===//
namespace {
typedef llvm::DenseMap<const ExplodedNode *, const RetainSummary *>
SummaryLogTy;
//===-------------===//
// Bug Descriptions. //
//===-------------===//
class CFRefBug : public BugType {
protected:
CFRefBug(const CheckerBase *checker, StringRef name)
: BugType(checker, name, categories::MemoryCoreFoundationObjectiveC) {}
public:
// FIXME: Eventually remove.
virtual const char *getDescription() const = 0;
virtual bool isLeak() const { return false; }
};
class UseAfterRelease : public CFRefBug {
public:
UseAfterRelease(const CheckerBase *checker)
: CFRefBug(checker, "Use-after-release") {}
const char *getDescription() const override {
return "Reference-counted object is used after it is released";
}
};
class BadRelease : public CFRefBug {
public:
BadRelease(const CheckerBase *checker) : CFRefBug(checker, "Bad release") {}
const char *getDescription() const override {
return "Incorrect decrement of the reference count of an object that is "
"not owned at this point by the caller";
}
};
class DeallocGC : public CFRefBug {
public:
DeallocGC(const CheckerBase *checker)
: CFRefBug(checker, "-dealloc called while using garbage collection") {}
const char *getDescription() const override {
return "-dealloc called while using garbage collection";
}
};
class DeallocNotOwned : public CFRefBug {
public:
DeallocNotOwned(const CheckerBase *checker)
: CFRefBug(checker, "-dealloc sent to non-exclusively owned object") {}
const char *getDescription() const override {
return "-dealloc sent to object that may be referenced elsewhere";
}
};
class OverAutorelease : public CFRefBug {
public:
OverAutorelease(const CheckerBase *checker)
: CFRefBug(checker, "Object autoreleased too many times") {}
const char *getDescription() const override {
return "Object autoreleased too many times";
}
};
class ReturnedNotOwnedForOwned : public CFRefBug {
public:
ReturnedNotOwnedForOwned(const CheckerBase *checker)
: CFRefBug(checker, "Method should return an owned object") {}
const char *getDescription() const override {
return "Object with a +0 retain count returned to caller where a +1 "
"(owning) retain count is expected";
}
};
class Leak : public CFRefBug {
public:
Leak(const CheckerBase *checker, StringRef name) : CFRefBug(checker, name) {
// Leaks should not be reported if they are post-dominated by a sink.
setSuppressOnSink(true);
}
const char *getDescription() const override { return ""; }
bool isLeak() const override { return true; }
};
//===---------===//
// Bug Reports. //
//===---------===//
class CFRefReportVisitor : public BugReporterVisitorImpl<CFRefReportVisitor> {
protected:
SymbolRef Sym;
const SummaryLogTy &SummaryLog;
bool GCEnabled;
public:
CFRefReportVisitor(SymbolRef sym, bool gcEnabled, const SummaryLogTy &log)
: Sym(sym), SummaryLog(log), GCEnabled(gcEnabled) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int x = 0;
ID.AddPointer(&x);
ID.AddPointer(Sym);
}
PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) override;
std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR) override;
};
class CFRefLeakReportVisitor : public CFRefReportVisitor {
public:
CFRefLeakReportVisitor(SymbolRef sym, bool GCEnabled,
const SummaryLogTy &log)
: CFRefReportVisitor(sym, GCEnabled, log) {}
std::unique_ptr<PathDiagnosticPiece> getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
BugReport &BR) override;
std::unique_ptr<BugReporterVisitor> clone() const override {
// The curiously-recurring template pattern only works for one level of
// subclassing. Rather than make a new template base for
// CFRefReportVisitor, we simply override clone() to do the right thing.
// This could be trouble someday if BugReporterVisitorImpl is ever
// used for something else besides a convenient implementation of clone().
return llvm::make_unique<CFRefLeakReportVisitor>(*this);
}
};
class CFRefReport : public BugReport {
void addGCModeDescription(const LangOptions &LOpts, bool GCEnabled);
public:
CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
bool registerVisitor = true)
: BugReport(D, D.getDescription(), n) {
if (registerVisitor)
addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
addGCModeDescription(LOpts, GCEnabled);
}
CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
StringRef endText)
: BugReport(D, D.getDescription(), endText, n) {
addVisitor(llvm::make_unique<CFRefReportVisitor>(sym, GCEnabled, Log));
addGCModeDescription(LOpts, GCEnabled);
}
llvm::iterator_range<ranges_iterator> getRanges() override {
const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType());
if (!BugTy.isLeak())
return BugReport::getRanges();
return llvm::make_range(ranges_iterator(), ranges_iterator());
}
};
class CFRefLeakReport : public CFRefReport {
const MemRegion* AllocBinding;
public:
CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
CheckerContext &Ctx,
bool IncludeAllocationLine);
PathDiagnosticLocation getLocation(const SourceManager &SM) const override {
assert(Location.isValid());
return Location;
}
};
} // end anonymous namespace
void CFRefReport::addGCModeDescription(const LangOptions &LOpts,
bool GCEnabled) {
const char *GCModeDescription = nullptr;
switch (LOpts.getGC()) {
case LangOptions::GCOnly:
assert(GCEnabled);
GCModeDescription = "Code is compiled to only use garbage collection";
break;
case LangOptions::NonGC:
assert(!GCEnabled);
GCModeDescription = "Code is compiled to use reference counts";
break;
case LangOptions::HybridGC:
if (GCEnabled) {
GCModeDescription = "Code is compiled to use either garbage collection "
"(GC) or reference counts (non-GC). The bug occurs "
"with GC enabled";
break;
} else {
GCModeDescription = "Code is compiled to use either garbage collection "
"(GC) or reference counts (non-GC). The bug occurs "
"in non-GC mode";
break;
}
}
assert(GCModeDescription && "invalid/unknown GC mode");
addExtraText(GCModeDescription);
}
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
return isa<IntegerLiteral>(E) ||
isa<CharacterLiteral>(E) ||
isa<FloatingLiteral>(E) ||
isa<ObjCBoolLiteralExpr>(E) ||
isa<CXXBoolLiteralExpr>(E);
}
/// Returns true if this stack frame is for an Objective-C method that is a
/// property getter or setter whose body has been synthesized by the analyzer.
static bool isSynthesizedAccessor(const StackFrameContext *SFC) {
auto Method = dyn_cast_or_null<ObjCMethodDecl>(SFC->getDecl());
if (!Method || !Method->isPropertyAccessor())
return false;
return SFC->getAnalysisDeclContext()->isBodyAutosynthesized();
}
PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
const ExplodedNode *PrevN,
BugReporterContext &BRC,
BugReport &BR) {
// FIXME: We will eventually need to handle non-statement-based events
// (__attribute__((cleanup))).
if (!N->getLocation().getAs<StmtPoint>())
return nullptr;
// Check if the type state has changed.
ProgramStateRef PrevSt = PrevN->getState();
ProgramStateRef CurrSt = N->getState();
const LocationContext *LCtx = N->getLocationContext();
const RefVal* CurrT = getRefBinding(CurrSt, Sym);
if (!CurrT) return nullptr;
const RefVal &CurrV = *CurrT;
const RefVal *PrevT = getRefBinding(PrevSt, Sym);
// Create a string buffer to constain all the useful things we want
// to tell the user.
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
// This is the allocation site since the previous node had no bindings
// for this symbol.
if (!PrevT) {
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
if (isa<ObjCIvarRefExpr>(S) &&
isSynthesizedAccessor(LCtx->getCurrentStackFrame())) {
S = LCtx->getCurrentStackFrame()->getCallSite();
}
if (isa<ObjCArrayLiteral>(S)) {
os << "NSArray literal is an object with a +0 retain count";
}
else if (isa<ObjCDictionaryLiteral>(S)) {
os << "NSDictionary literal is an object with a +0 retain count";
}
else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
if (isNumericLiteralExpression(BL->getSubExpr()))
os << "NSNumber literal is an object with a +0 retain count";
else {
const ObjCInterfaceDecl *BoxClass = nullptr;
if (const ObjCMethodDecl *Method = BL->getBoxingMethod())
BoxClass = Method->getClassInterface();
// We should always be able to find the boxing class interface,
// but consider this future-proofing.
if (BoxClass)
os << *BoxClass << " b";
else
os << "B";
os << "oxed expression produces an object with a +0 retain count";
}
}
else if (isa<ObjCIvarRefExpr>(S)) {
os << "Object loaded from instance variable";
}
else {
if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Get the name of the callee (if it is available).
SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
if (const FunctionDecl *FD = X.getAsFunctionDecl())
os << "Call to function '" << *FD << '\'';
else
os << "function call";
}
else {
assert(isa<ObjCMessageExpr>(S));
CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
CallEventRef<ObjCMethodCall> Call
= Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
switch (Call->getMessageKind()) {
case OCM_Message:
os << "Method";
break;
case OCM_PropertyAccess:
os << "Property";
break;
case OCM_Subscript:
os << "Subscript";
break;
}
}
if (CurrV.getObjKind() == RetEffect::CF) {
os << " returns a Core Foundation object with a ";
}
else {
assert (CurrV.getObjKind() == RetEffect::ObjC);
os << " returns an Objective-C object with a ";
}
if (CurrV.isOwned()) {
os << "+1 retain count";
if (GCEnabled) {
assert(CurrV.getObjKind() == RetEffect::CF);
os << ". "
"Core Foundation objects are not automatically garbage collected.";
}
}
else {
assert (CurrV.isNotOwned());
os << "+0 retain count";
}
}
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
return new PathDiagnosticEventPiece(Pos, os.str());
}
// Gather up the effects that were performed on the object at this
// program point
SmallVector<ArgEffect, 2> AEffects;
const ExplodedNode *OrigNode = BRC.getNodeResolver().getOriginalNode(N);
if (const RetainSummary *Summ = SummaryLog.lookup(OrigNode)) {
// We only have summaries attached to nodes after evaluating CallExpr and
// ObjCMessageExprs.
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
// Iterate through the parameter expressions and see if the symbol
// was ever passed as an argument.
unsigned i = 0;
for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
AI!=AE; ++AI, ++i) {
// Retrieve the value of the argument. Is it the symbol
// we are interested in?
if (CurrSt->getSValAsScalarOrLoc(*AI, LCtx).getAsLocSymbol() != Sym)
continue;
// We have an argument. Get the effect!
AEffects.push_back(Summ->getArg(i));
}
}
else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
if (const Expr *receiver = ME->getInstanceReceiver())
if (CurrSt->getSValAsScalarOrLoc(receiver, LCtx)
.getAsLocSymbol() == Sym) {
// The symbol we are tracking is the receiver.
AEffects.push_back(Summ->getReceiverEffect());
}
}
}
do {
// Get the previous type state.
RefVal PrevV = *PrevT;
// Specially handle -dealloc.
if (!GCEnabled && std::find(AEffects.begin(), AEffects.end(), Dealloc) !=
AEffects.end()) {
// Determine if the object's reference count was pushed to zero.
assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
// We may not have transitioned to 'release' if we hit an error.
// This case is handled elsewhere.
if (CurrV.getKind() == RefVal::Released) {
assert(CurrV.getCombinedCounts() == 0);
os << "Object released by directly sending the '-dealloc' message";
break;
}
}
// Specially handle CFMakeCollectable and friends.
if (std::find(AEffects.begin(), AEffects.end(), MakeCollectable) !=
AEffects.end()) {
// Get the name of the function.
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
SVal X =
CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee(), LCtx);
const FunctionDecl *FD = X.getAsFunctionDecl();
if (GCEnabled) {
// Determine if the object's reference count was pushed to zero.
assert(!PrevV.hasSameState(CurrV) && "The state should have changed.");
os << "In GC mode a call to '" << *FD
<< "' decrements an object's retain count and registers the "
"object with the garbage collector. ";
if (CurrV.getKind() == RefVal::Released) {
assert(CurrV.getCount() == 0);
os << "Since it now has a 0 retain count the object can be "
"automatically collected by the garbage collector.";
}
else
os << "An object must have a 0 retain count to be garbage collected. "
"After this call its retain count is +" << CurrV.getCount()
<< '.';
}
else
os << "When GC is not enabled a call to '" << *FD
<< "' has no effect on its argument.";
// Nothing more to say.
break;
}
// Determine if the typestate has changed.
if (!PrevV.hasSameState(CurrV))
switch (CurrV.getKind()) {
case RefVal::Owned:
case RefVal::NotOwned:
if (PrevV.getCount() == CurrV.getCount()) {
// Did an autorelease message get sent?
if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
return nullptr;
assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
os << "Object autoreleased";
break;
}
if (PrevV.getCount() > CurrV.getCount())
os << "Reference count decremented.";
else
os << "Reference count incremented.";
if (unsigned Count = CurrV.getCount())
os << " The object now has a +" << Count << " retain count.";
if (PrevV.getKind() == RefVal::Released) {
assert(GCEnabled && CurrV.getCount() > 0);
os << " The object is not eligible for garbage collection until "
"the retain count reaches 0 again.";
}
break;
case RefVal::Released:
if (CurrV.getIvarAccessHistory() ==
RefVal::IvarAccessHistory::ReleasedAfterDirectAccess &&
CurrV.getIvarAccessHistory() != PrevV.getIvarAccessHistory()) {
os << "Strong instance variable relinquished. ";
}
os << "Object released.";
break;
case RefVal::ReturnedOwned:
// Autoreleases can be applied after marking a node ReturnedOwned.
if (CurrV.getAutoreleaseCount())
return nullptr;
os << "Object returned to caller as an owning reference (single "
"retain count transferred to caller)";
break;
case RefVal::ReturnedNotOwned:
os << "Object returned to caller with a +0 retain count";
break;
default:
return nullptr;
}
// Emit any remaining diagnostics for the argument effects (if any).
for (SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
E=AEffects.end(); I != E; ++I) {
// A bunch of things have alternate behavior under GC.
if (GCEnabled)
switch (*I) {
default: break;
case Autorelease:
os << "In GC mode an 'autorelease' has no effect.";
continue;
case IncRefMsg:
os << "In GC mode the 'retain' message has no effect.";
continue;
case DecRefMsg:
os << "In GC mode the 'release' message has no effect.";
continue;
}
}
} while (0);
if (os.str().empty())
return nullptr; // We have nothing to say!
const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
PathDiagnosticPiece *P = new PathDiagnosticEventPiece(Pos, os.str());
// Add the range by scanning the children of the statement for any bindings
// to Sym.
for (const Stmt *Child : S->children())
if (const Expr *Exp = dyn_cast_or_null<Expr>(Child))
if (CurrSt->getSValAsScalarOrLoc(Exp, LCtx).getAsLocSymbol() == Sym) {
P->addRange(Exp->getSourceRange());
break;
}
return P;
}
// Find the first node in the current function context that referred to the
// tracked symbol and the memory location that value was stored to. Note, the
// value is only reported if the allocation occurred in the same function as
// the leak. The function can also return a location context, which should be
// treated as interesting.
struct AllocationInfo {
const ExplodedNode* N;
const MemRegion *R;
const LocationContext *InterestingMethodContext;
AllocationInfo(const ExplodedNode *InN,
const MemRegion *InR,
const LocationContext *InInterestingMethodContext) :
N(InN), R(InR), InterestingMethodContext(InInterestingMethodContext) {}
};
static AllocationInfo
GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
SymbolRef Sym) {
const ExplodedNode *AllocationNode = N;
const ExplodedNode *AllocationNodeInCurrentOrParentContext = N;
const MemRegion *FirstBinding = nullptr;
const LocationContext *LeakContext = N->getLocationContext();
// The location context of the init method called on the leaked object, if
// available.
const LocationContext *InitMethodContext = nullptr;
while (N) {
ProgramStateRef St = N->getState();
const LocationContext *NContext = N->getLocationContext();
if (!getRefBinding(St, Sym))
break;
StoreManager::FindUniqueBinding FB(Sym);
StateMgr.iterBindings(St, FB);
if (FB) {
const MemRegion *R = FB.getRegion();
const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
// Do not show local variables belonging to a function other than
// where the error is reported.
if (!VR || VR->getStackFrame() == LeakContext->getCurrentStackFrame())
FirstBinding = R;
}
// AllocationNode is the last node in which the symbol was tracked.
AllocationNode = N;
// AllocationNodeInCurrentContext, is the last node in the current or
// parent context in which the symbol was tracked.
//
// Note that the allocation site might be in the parent conext. For example,
// the case where an allocation happens in a block that captures a reference
// to it and that reference is overwritten/dropped by another call to
// the block.
if (NContext == LeakContext || NContext->isParentOf(LeakContext))
AllocationNodeInCurrentOrParentContext = N;
// Find the last init that was called on the given symbol and store the
// init method's location context.
if (!InitMethodContext)
if (Optional<CallEnter> CEP = N->getLocation().getAs<CallEnter>()) {
const Stmt *CE = CEP->getCallExpr();
if (const ObjCMessageExpr *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
const Stmt *RecExpr = ME->getInstanceReceiver();
if (RecExpr) {
SVal RecV = St->getSVal(RecExpr, NContext);
if (ME->getMethodFamily() == OMF_init && RecV.getAsSymbol() == Sym)
InitMethodContext = CEP->getCalleeContext();
}
}
}
N = N->pred_empty() ? nullptr : *(N->pred_begin());
}
// If we are reporting a leak of the object that was allocated with alloc,
// mark its init method as interesting.
const LocationContext *InterestingMethodContext = nullptr;
if (InitMethodContext) {
const ProgramPoint AllocPP = AllocationNode->getLocation();
if (Optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
if (const ObjCMessageExpr *ME = SP->getStmtAs<ObjCMessageExpr>())
if (ME->getMethodFamily() == OMF_alloc)
InterestingMethodContext = InitMethodContext;
}
// If allocation happened in a function different from the leak node context,
// do not report the binding.
assert(N && "Could not find allocation node");
if (N->getLocationContext() != LeakContext) {
FirstBinding = nullptr;
}
return AllocationInfo(AllocationNodeInCurrentOrParentContext,
FirstBinding,
InterestingMethodContext);
}
std::unique_ptr<PathDiagnosticPiece>
CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *EndN, BugReport &BR) {
BR.markInteresting(Sym);
return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
}
std::unique_ptr<PathDiagnosticPiece>
CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *EndN, BugReport &BR) {
// Tell the BugReporterContext to report cases when the tracked symbol is
// assigned to different variables, etc.
BR.markInteresting(Sym);
// We are reporting a leak. Walk up the graph to get to the first node where
// the symbol appeared, and also get the first VarDecl that tracked object
// is stored to.
AllocationInfo AllocI =
GetAllocationSite(BRC.getStateManager(), EndN, Sym);
const MemRegion* FirstBinding = AllocI.R;
BR.markInteresting(AllocI.InterestingMethodContext);
SourceManager& SM = BRC.getSourceManager();
// Compute an actual location for the leak. Sometimes a leak doesn't
// occur at an actual statement (e.g., transition between blocks; end
// of function) so we need to walk the graph and compute a real location.
const ExplodedNode *LeakN = EndN;
PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(LeakN, SM);
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
os << "Object leaked: ";
if (FirstBinding) {
os << "object allocated and stored into '"
<< FirstBinding->getString() << '\'';
}
else
os << "allocated object";
// Get the retain count.
const RefVal* RV = getRefBinding(EndN->getState(), Sym);
assert(RV);
if (RV->getKind() == RefVal::ErrorLeakReturned) {
// FIXME: Per comments in rdar://6320065, "create" only applies to CF
// objects. Only "copy", "alloc", "retain" and "new" transfer ownership
// to the caller for NS objects.
const Decl *D = &EndN->getCodeDecl();
os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
: " is returned from a function ");
if (D->hasAttr<CFReturnsNotRetainedAttr>())
os << "that is annotated as CF_RETURNS_NOT_RETAINED";
else if (D->hasAttr<NSReturnsNotRetainedAttr>())
os << "that is annotated as NS_RETURNS_NOT_RETAINED";
else {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
os << "whose name ('" << MD->getSelector().getAsString()
<< "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
" This violates the naming convention rules"
" given in the Memory Management Guide for Cocoa";
}
else {
const FunctionDecl *FD = cast<FunctionDecl>(D);
os << "whose name ('" << *FD
<< "') does not contain 'Copy' or 'Create'. This violates the naming"
" convention rules given in the Memory Management Guide for Core"
" Foundation";
}
}
}
else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
const ObjCMethodDecl &MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
os << " and returned from method '" << MD.getSelector().getAsString()
<< "' is potentially leaked when using garbage collection. Callers "
"of this method do not expect a returned object with a +1 retain "
"count since they expect the object to be managed by the garbage "
"collector";
}
else
os << " is not referenced later in this execution path and has a retain "
"count of +" << RV->getCount();
return llvm::make_unique<PathDiagnosticEventPiece>(L, os.str());
}
CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
bool GCEnabled, const SummaryLogTy &Log,
ExplodedNode *n, SymbolRef sym,
CheckerContext &Ctx,
bool IncludeAllocationLine)
: CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
// Most bug reports are cached at the location where they occurred.
// With leaks, we want to unique them by the location where they were
// allocated, and only report a single path. To do this, we need to find
// the allocation site of a piece of tracked memory, which we do via a
// call to GetAllocationSite. This will walk the ExplodedGraph backwards.
// Note that this is *not* the trimmed graph; we are guaranteed, however,
// that all ancestor nodes that represent the allocation site have the
// same SourceLocation.
const ExplodedNode *AllocNode = nullptr;
const SourceManager& SMgr = Ctx.getSourceManager();
AllocationInfo AllocI =
GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
AllocNode = AllocI.N;
AllocBinding = AllocI.R;
markInteresting(AllocI.InterestingMethodContext);
// Get the SourceLocation for the allocation site.
// FIXME: This will crash the analyzer if an allocation comes from an
// implicit call (ex: a destructor call).
// (Currently there are no such allocations in Cocoa, though.)
const Stmt *AllocStmt = 0;
ProgramPoint P = AllocNode->getLocation();
if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
AllocStmt = Exit->getCalleeContext()->getCallSite();
else
AllocStmt = P.castAs<PostStmt>().getStmt();
assert(AllocStmt && "Cannot find allocation statement");
PathDiagnosticLocation AllocLocation =
PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
AllocNode->getLocationContext());
Location = AllocLocation;
// Set uniqieing info, which will be used for unique the bug reports. The
// leaks should be uniqued on the allocation site.
UniqueingLocation = AllocLocation;
UniqueingDecl = AllocNode->getLocationContext()->getDecl();
// Fill in the description of the bug.
Description.clear();
llvm::raw_string_ostream os(Description);
os << "Potential leak ";
if (GCEnabled)
os << "(when using garbage collection) ";
os << "of an object";
if (AllocBinding) {
os << " stored into '" << AllocBinding->getString() << '\'';
if (IncludeAllocationLine) {
FullSourceLoc SL(AllocStmt->getLocStart(), Ctx.getSourceManager());
os << " (allocated on line " << SL.getSpellingLineNumber() << ")";
}
}
addVisitor(llvm::make_unique<CFRefLeakReportVisitor>(sym, GCEnabled, Log));
}
//===----------------------------------------------------------------------===//
// Main checker logic.
//===----------------------------------------------------------------------===//
namespace {
class RetainCountChecker
: public Checker< check::Bind,
check::DeadSymbols,
check::EndAnalysis,
check::EndFunction,
check::PostStmt<BlockExpr>,
check::PostStmt<CastExpr>,
check::PostStmt<ObjCArrayLiteral>,
check::PostStmt<ObjCDictionaryLiteral>,
check::PostStmt<ObjCBoxedExpr>,
check::PostStmt<ObjCIvarRefExpr>,
check::PostCall,
check::PreStmt<ReturnStmt>,
check::RegionChanges,
eval::Assume,
eval::Call > {
mutable std::unique_ptr<CFRefBug> useAfterRelease, releaseNotOwned;
mutable std::unique_ptr<CFRefBug> deallocGC, deallocNotOwned;
mutable std::unique_ptr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
mutable std::unique_ptr<CFRefBug> leakWithinFunction, leakAtReturn;
mutable std::unique_ptr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
typedef llvm::DenseMap<SymbolRef, const CheckerProgramPointTag *> SymbolTagMap;
// This map is only used to ensure proper deletion of any allocated tags.
mutable SymbolTagMap DeadSymbolTags;
mutable std::unique_ptr<RetainSummaryManager> Summaries;
mutable std::unique_ptr<RetainSummaryManager> SummariesGC;
mutable SummaryLogTy SummaryLog;
mutable bool ShouldResetSummaryLog;
/// Optional setting to indicate if leak reports should include
/// the allocation line.
mutable bool IncludeAllocationLine;
public:
RetainCountChecker(AnalyzerOptions &AO)
: ShouldResetSummaryLog(false),
IncludeAllocationLine(shouldIncludeAllocationSiteInLeakDiagnostics(AO)) {}
~RetainCountChecker() override { DeleteContainerSeconds(DeadSymbolTags); }
void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
ExprEngine &Eng) const {
// FIXME: This is a hack to make sure the summary log gets cleared between
// analyses of different code bodies.
//
// Why is this necessary? Because a checker's lifetime is tied to a
// translation unit, but an ExplodedGraph's lifetime is just a code body.
// Once in a blue moon, a new ExplodedNode will have the same address as an
// old one with an associated summary, and the bug report visitor gets very
// confused. (To make things worse, the summary lifetime is currently also
// tied to a code body, so we get a crash instead of incorrect results.)
//
// Why is this a bad solution? Because if the lifetime of the ExplodedGraph
// changes, things will start going wrong again. Really the lifetime of this
// log needs to be tied to either the specific nodes in it or the entire
// ExplodedGraph, not to a specific part of the code being analyzed.
//
// (Also, having stateful local data means that the same checker can't be
// used from multiple threads, but a lot of checkers have incorrect
// assumptions about that anyway. So that wasn't a priority at the time of
// this fix.)
//
// This happens at the end of analysis, but bug reports are emitted /after/
// this point. So we can't just clear the summary log now. Instead, we mark
// that the next time we access the summary log, it should be cleared.
// If we never reset the summary log during /this/ code body analysis,
// there were no new summaries. There might still have been summaries from
// the /last/ analysis, so clear them out to make sure the bug report
// visitors don't get confused.
if (ShouldResetSummaryLog)
SummaryLog.clear();
ShouldResetSummaryLog = !SummaryLog.empty();
}
CFRefBug *getLeakWithinFunctionBug(const LangOptions &LOpts,
bool GCEnabled) const {
if (GCEnabled) {
if (!leakWithinFunctionGC)
leakWithinFunctionGC.reset(new Leak(this, "Leak of object when using "
"garbage collection"));
return leakWithinFunctionGC.get();
} else {
if (!leakWithinFunction) {
if (LOpts.getGC() == LangOptions::HybridGC) {
leakWithinFunction.reset(new Leak(this,
"Leak of object when not using "
"garbage collection (GC) in "
"dual GC/non-GC code"));
} else {
leakWithinFunction.reset(new Leak(this, "Leak"));
}
}
return leakWithinFunction.get();
}
}
CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts, bool GCEnabled) const {
if (GCEnabled) {
if (!leakAtReturnGC)
leakAtReturnGC.reset(new Leak(this,
"Leak of returned object when using "
"garbage collection"));
return leakAtReturnGC.get();
} else {
if (!leakAtReturn) {
if (LOpts.getGC() == LangOptions::HybridGC) {
leakAtReturn.reset(new Leak(this,
"Leak of returned object when not using "
"garbage collection (GC) in dual "
"GC/non-GC code"));
} else {
leakAtReturn.reset(new Leak(this, "Leak of returned object"));
}
}
return leakAtReturn.get();
}
}
RetainSummaryManager &getSummaryManager(ASTContext &Ctx,
bool GCEnabled) const {
// FIXME: We don't support ARC being turned on and off during one analysis.
// (nor, for that matter, do we support changing ASTContexts)
bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
if (GCEnabled) {
if (!SummariesGC)
SummariesGC.reset(new RetainSummaryManager(Ctx, true, ARCEnabled));
else
assert(SummariesGC->isARCEnabled() == ARCEnabled);
return *SummariesGC;
} else {
if (!Summaries)
Summaries.reset(new RetainSummaryManager(Ctx, false, ARCEnabled));
else
assert(Summaries->isARCEnabled() == ARCEnabled);
return *Summaries;
}
}
RetainSummaryManager &getSummaryManager(CheckerContext &C) const {
return getSummaryManager(C.getASTContext(), C.isObjCGCEnabled());
}
void printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const override;
void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
void checkPostStmt(const ObjCBoxedExpr *BE, CheckerContext &C) const;
void checkPostStmt(const ObjCIvarRefExpr *IRE, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
CheckerContext &C) const;
void processSummaryOfInlined(const RetainSummary &Summ,
const CallEvent &Call,
CheckerContext &C) const;
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
bool Assumption) const;
ProgramStateRef
checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const;
bool wantsRegionChangeUpdate(ProgramStateRef state) const {
return true;
}
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
ExplodedNode *Pred, RetEffect RE, RefVal X,
SymbolRef Sym, ProgramStateRef state) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkEndFunction(CheckerContext &C) const;
ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
RefVal V, ArgEffect E, RefVal::Kind &hasErr,
CheckerContext &C) const;
void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
RefVal::Kind ErrorKind, SymbolRef Sym,
CheckerContext &C) const;
void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
ProgramStateRef handleSymbolDeath(ProgramStateRef state,
SymbolRef sid, RefVal V,
SmallVectorImpl<SymbolRef> &Leaked) const;
ProgramStateRef
handleAutoreleaseCounts(ProgramStateRef state, ExplodedNode *Pred,
const ProgramPointTag *Tag, CheckerContext &Ctx,
SymbolRef Sym, RefVal V) const;
ExplodedNode *processLeaks(ProgramStateRef state,
SmallVectorImpl<SymbolRef> &Leaked,
CheckerContext &Ctx,
ExplodedNode *Pred = nullptr) const;
};
} // end anonymous namespace
namespace {
class StopTrackingCallback : public SymbolVisitor {
ProgramStateRef state;
public:
StopTrackingCallback(ProgramStateRef st) : state(st) {}
ProgramStateRef getState() const { return state; }
bool VisitSymbol(SymbolRef sym) override {
state = state->remove<RefBindings>(sym);
return true;
}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Handle statements that may have an effect on refcounts.
//===----------------------------------------------------------------------===//
void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
CheckerContext &C) const {
// Scan the BlockDecRefExprs for any object the retain count checker
// may be tracking.
if (!BE->getBlockDecl()->hasCaptures())
return;
ProgramStateRef state = C.getState();
const BlockDataRegion *R =
cast<BlockDataRegion>(state->getSVal(BE,
C.getLocationContext()).getAsRegion());
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
if (I == E)
return;
// FIXME: For now we invalidate the tracking of all symbols passed to blocks
// via captured variables, even though captured variables result in a copy
// and in implicit increment/decrement of a retain count.
SmallVector<const MemRegion*, 10> Regions;
const LocationContext *LC = C.getLocationContext();
MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
for ( ; I != E; ++I) {
const VarRegion *VR = I.getCapturedRegion();
if (VR->getSuperRegion() == R) {
VR = MemMgr.getVarRegion(VR->getDecl(), LC);
}
Regions.push_back(VR);
}
state =
state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
Regions.data() + Regions.size()).getState();
C.addTransition(state);
}
void RetainCountChecker::checkPostStmt(const CastExpr *CE,
CheckerContext &C) const {
const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
if (!BE)
return;
ArgEffect AE = IncRef;
switch (BE->getBridgeKind()) {
case clang::OBC_Bridge:
// Do nothing.
return;
case clang::OBC_BridgeRetained:
AE = IncRef;
break;
case clang::OBC_BridgeTransfer:
AE = DecRefBridgedTransferred;
break;
}
ProgramStateRef state = C.getState();
SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
if (!Sym)
return;
const RefVal* T = getRefBinding(state, Sym);
if (!T)
return;
RefVal::Kind hasErr = (RefVal::Kind) 0;
state = updateSymbol(state, Sym, *T, AE, hasErr, C);
if (hasErr) {
// FIXME: If we get an error during a bridge cast, should we report it?
return;
}
C.addTransition(state);
}
void RetainCountChecker::processObjCLiterals(CheckerContext &C,
const Expr *Ex) const {
ProgramStateRef state = C.getState();
const ExplodedNode *pred = C.getPredecessor();
for (const Stmt *Child : Ex->children()) {
SVal V = state->getSVal(Child, pred->getLocationContext());
if (SymbolRef sym = V.getAsSymbol())
if (const RefVal* T = getRefBinding(state, sym)) {
RefVal::Kind hasErr = (RefVal::Kind) 0;
state = updateSymbol(state, sym, *T, MayEscape, hasErr, C);
if (hasErr) {
processNonLeakError(state, Child->getSourceRange(), hasErr, sym, C);
return;
}
}
}
// Return the object as autoreleased.
// RetEffect RE = RetEffect::MakeNotOwned(RetEffect::ObjC);
if (SymbolRef sym =
state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
QualType ResultTy = Ex->getType();
state = setRefBinding(state, sym,
RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
}
C.addTransition(state);
}
void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
CheckerContext &C) const {
// Apply the 'MayEscape' to all values.
processObjCLiterals(C, AL);
}
void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
CheckerContext &C) const {
// Apply the 'MayEscape' to all keys and values.
processObjCLiterals(C, DL);
}
void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
CheckerContext &C) const {
const ExplodedNode *Pred = C.getPredecessor();
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef State = Pred->getState();
if (SymbolRef Sym = State->getSVal(Ex, LCtx).getAsSymbol()) {
QualType ResultTy = Ex->getType();
State = setRefBinding(State, Sym,
RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
}
C.addTransition(State);
}
static bool wasLoadedFromIvar(SymbolRef Sym) {
if (auto DerivedVal = dyn_cast<SymbolDerived>(Sym))
return isa<ObjCIvarRegion>(DerivedVal->getRegion());
if (auto RegionVal = dyn_cast<SymbolRegionValue>(Sym))
return isa<ObjCIvarRegion>(RegionVal->getRegion());
return false;
}
void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE,
CheckerContext &C) const {
Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>();
if (!IVarLoc)
return;
ProgramStateRef State = C.getState();
SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol();
if (!Sym || !wasLoadedFromIvar(Sym))
return;
// Accessing an ivar directly is unusual. If we've done that, be more
// forgiving about what the surrounding code is allowed to do.
QualType Ty = Sym->getType();
RetEffect::ObjKind Kind;
if (Ty->isObjCRetainableType())
Kind = RetEffect::ObjC;
else if (coreFoundation::isCFObjectRef(Ty))
Kind = RetEffect::CF;
else
return;
// If the value is already known to be nil, don't bother tracking it.
ConstraintManager &CMgr = State->getConstraintManager();
if (CMgr.isNull(State, Sym).isConstrainedTrue())
return;
if (const RefVal *RV = getRefBinding(State, Sym)) {
// If we've seen this symbol before, or we're only seeing it now because
// of something the analyzer has synthesized, don't do anything.
if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None ||
isSynthesizedAccessor(C.getStackFrame())) {
return;
}
// Note that this value has been loaded from an ivar.
C.addTransition(setRefBinding(State, Sym, RV->withIvarAccess()));
return;
}
RefVal PlusZero = RefVal::makeNotOwned(Kind, Ty);
// In a synthesized accessor, the effective retain count is +0.
if (isSynthesizedAccessor(C.getStackFrame())) {
C.addTransition(setRefBinding(State, Sym, PlusZero));
return;
}
State = setRefBinding(State, Sym, PlusZero.withIvarAccess());
C.addTransition(State);
}
void RetainCountChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
RetainSummaryManager &Summaries = getSummaryManager(C);
const RetainSummary *Summ = Summaries.getSummary(Call, C.getState());
if (C.wasInlined) {
processSummaryOfInlined(*Summ, Call, C);
return;
}
checkSummary(*Summ, Call, C);
}
/// GetReturnType - Used to get the return type of a message expression or
/// function call with the intention of affixing that type to a tracked symbol.
/// While the return type can be queried directly from RetEx, when
/// invoking class methods we augment to the return type to be that of
/// a pointer to the class (as opposed it just being id).
// FIXME: We may be able to do this with related result types instead.
// This function is probably overestimating.
static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
QualType RetTy = RetE->getType();
// If RetE is not a message expression just return its type.
// If RetE is a message expression, return its types if it is something
/// more specific than id.
if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
PT->isObjCClassType()) {
// At this point we know the return type of the message expression is
// id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
// is a call to a class method whose type we can resolve. In such
// cases, promote the return type to XXX* (where XXX is the class).
const ObjCInterfaceDecl *D = ME->getReceiverInterface();
return !D ? RetTy :
Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
}
return RetTy;
}
// We don't always get the exact modeling of the function with regards to the
// retain count checker even when the function is inlined. For example, we need
// to stop tracking the symbols which were marked with StopTrackingHard.
void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
const CallEvent &CallOrMsg,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
// Evaluate the effect of the arguments.
for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
if (Summ.getArg(idx) == StopTrackingHard) {
SVal V = CallOrMsg.getArgSVal(idx);
if (SymbolRef Sym = V.getAsLocSymbol()) {
state = removeRefBinding(state, Sym);
}
}
}
// Evaluate the effect on the message receiver.
const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
if (MsgInvocation) {
if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
if (Summ.getReceiverEffect() == StopTrackingHard) {
state = removeRefBinding(state, Sym);
}
}
}
// Consult the summary for the return value.
RetEffect RE = Summ.getRetEffect();
if (RE.getKind() == RetEffect::NoRetHard) {
SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
if (Sym)
state = removeRefBinding(state, Sym);
}
C.addTransition(state);
}
static ProgramStateRef updateOutParameter(ProgramStateRef State,
SVal ArgVal,
ArgEffect Effect) {
auto *ArgRegion = dyn_cast_or_null<TypedValueRegion>(ArgVal.getAsRegion());
if (!ArgRegion)
return State;
QualType PointeeTy = ArgRegion->getValueType();
if (!coreFoundation::isCFObjectRef(PointeeTy))
return State;
SVal PointeeVal = State->getSVal(ArgRegion);
SymbolRef Pointee = PointeeVal.getAsLocSymbol();
if (!Pointee)
return State;
switch (Effect) {
case UnretainedOutParameter:
State = setRefBinding(State, Pointee,
RefVal::makeNotOwned(RetEffect::CF, PointeeTy));
break;
case RetainedOutParameter:
// Do nothing. Retained out parameters will either point to a +1 reference
// or NULL, but the way you check for failure differs depending on the API.
// Consequently, we don't have a good way to track them yet.
break;
default:
llvm_unreachable("only for out parameters");
}
return State;
}
void RetainCountChecker::checkSummary(const RetainSummary &Summ,
const CallEvent &CallOrMsg,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
// Evaluate the effect of the arguments.
RefVal::Kind hasErr = (RefVal::Kind) 0;
SourceRange ErrorRange;
SymbolRef ErrorSym = nullptr;
for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
SVal V = CallOrMsg.getArgSVal(idx);
ArgEffect Effect = Summ.getArg(idx);
if (Effect == RetainedOutParameter || Effect == UnretainedOutParameter) {
state = updateOutParameter(state, V, Effect);
} else if (SymbolRef Sym = V.getAsLocSymbol()) {
if (const RefVal *T = getRefBinding(state, Sym)) {
state = updateSymbol(state, Sym, *T, Effect, hasErr, C);
if (hasErr) {
ErrorRange = CallOrMsg.getArgSourceRange(idx);
ErrorSym = Sym;
break;
}
}
}
}
// Evaluate the effect on the message receiver.
bool ReceiverIsTracked = false;
if (!hasErr) {
const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
if (MsgInvocation) {
if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
if (const RefVal *T = getRefBinding(state, Sym)) {
ReceiverIsTracked = true;
state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
hasErr, C);
if (hasErr) {
ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange();
ErrorSym = Sym;
}
}
}
}
}
// Process any errors.
if (hasErr) {
processNonLeakError(state, ErrorRange, hasErr, ErrorSym, C);
return;
}
// Consult the summary for the return value.
RetEffect RE = Summ.getRetEffect();
if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
if (ReceiverIsTracked)
RE = getSummaryManager(C).getObjAllocRetEffect();
else
RE = RetEffect::MakeNoRet();
}
switch (RE.getKind()) {
default:
llvm_unreachable("Unhandled RetEffect.");
case RetEffect::NoRet:
case RetEffect::NoRetHard:
// No work necessary.
break;
case RetEffect::OwnedAllocatedSymbol:
case RetEffect::OwnedSymbol: {
SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
if (!Sym)
break;
// Use the result type from the CallEvent as it automatically adjusts
// for methods/functions that return references.
QualType ResultTy = CallOrMsg.getResultType();
state = setRefBinding(state, Sym, RefVal::makeOwned(RE.getObjKind(),
ResultTy));
// FIXME: Add a flag to the checker where allocations are assumed to
// *not* fail.
break;
}
case RetEffect::GCNotOwnedSymbol:
case RetEffect::NotOwnedSymbol: {
const Expr *Ex = CallOrMsg.getOriginExpr();
SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
if (!Sym)
break;
assert(Ex);
// Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *.
QualType ResultTy = GetReturnType(Ex, C.getASTContext());
state = setRefBinding(state, Sym, RefVal::makeNotOwned(RE.getObjKind(),
ResultTy));
break;
}
}
// This check is actually necessary; otherwise the statement builder thinks
// we've hit a previously-found path.
// Normally addTransition takes care of this, but we want the node pointer.
ExplodedNode *NewNode;
if (state == C.getState()) {
NewNode = C.getPredecessor();
} else {
NewNode = C.addTransition(state);
}
// Annotate the node with summary we used.
if (NewNode) {
// FIXME: This is ugly. See checkEndAnalysis for why it's necessary.
if (ShouldResetSummaryLog) {
SummaryLog.clear();
ShouldResetSummaryLog = false;
}
SummaryLog[NewNode] = &Summ;
}
}
ProgramStateRef
RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
RefVal V, ArgEffect E, RefVal::Kind &hasErr,
CheckerContext &C) const {
// In GC mode [... release] and [... retain] do nothing.
// In ARC mode they shouldn't exist at all, but we just ignore them.
bool IgnoreRetainMsg = C.isObjCGCEnabled();
if (!IgnoreRetainMsg)
IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
switch (E) {
default:
break;
case IncRefMsg:
E = IgnoreRetainMsg ? DoNothing : IncRef;
break;
case DecRefMsg:
E = IgnoreRetainMsg ? DoNothing : DecRef;
break;
case DecRefMsgAndStopTrackingHard:
E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTrackingHard;
break;
case MakeCollectable:
E = C.isObjCGCEnabled() ? DecRef : DoNothing;
break;
}
// Handle all use-after-releases.
if (!C.isObjCGCEnabled() && V.getKind() == RefVal::Released) {
V = V ^ RefVal::ErrorUseAfterRelease;
hasErr = V.getKind();
return setRefBinding(state, sym, V);
}
switch (E) {
case DecRefMsg:
case IncRefMsg:
case MakeCollectable:
case DecRefMsgAndStopTrackingHard:
llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
case UnretainedOutParameter:
case RetainedOutParameter:
llvm_unreachable("Applies to pointer-to-pointer parameters, which should "
"not have ref state.");
case Dealloc:
// Any use of -dealloc in GC is *bad*.
if (C.isObjCGCEnabled()) {
V = V ^ RefVal::ErrorDeallocGC;
hasErr = V.getKind();
break;
}
switch (V.getKind()) {
default:
llvm_unreachable("Invalid RefVal state for an explicit dealloc.");
case RefVal::Owned:
// The object immediately transitions to the released state.
V = V ^ RefVal::Released;
V.clearCounts();
return setRefBinding(state, sym, V);
case RefVal::NotOwned:
V = V ^ RefVal::ErrorDeallocNotOwned;
hasErr = V.getKind();
break;
}
break;
case MayEscape:
if (V.getKind() == RefVal::Owned) {
V = V ^ RefVal::NotOwned;
break;
}
// Fall-through.
case DoNothing:
return state;
case Autorelease:
if (C.isObjCGCEnabled())
return state;
// Update the autorelease counts.
V = V.autorelease();
break;
case StopTracking:
case StopTrackingHard:
return removeRefBinding(state, sym);
case IncRef:
switch (V.getKind()) {
default:
llvm_unreachable("Invalid RefVal state for a retain.");
case RefVal::Owned:
case RefVal::NotOwned:
V = V + 1;
break;
case RefVal::Released:
// Non-GC cases are handled above.
assert(C.isObjCGCEnabled());
V = (V ^ RefVal::Owned) + 1;
break;
}
break;
case DecRef:
case DecRefBridgedTransferred:
case DecRefAndStopTrackingHard:
switch (V.getKind()) {
default:
// case 'RefVal::Released' handled above.
llvm_unreachable("Invalid RefVal state for a release.");
case RefVal::Owned:
assert(V.getCount() > 0);
if (V.getCount() == 1) {
if (E == DecRefBridgedTransferred ||
V.getIvarAccessHistory() ==
RefVal::IvarAccessHistory::AccessedDirectly)
V = V ^ RefVal::NotOwned;
else
V = V ^ RefVal::Released;
} else if (E == DecRefAndStopTrackingHard) {
return removeRefBinding(state, sym);
}
V = V - 1;
break;
case RefVal::NotOwned:
if (V.getCount() > 0) {
if (E == DecRefAndStopTrackingHard)
return removeRefBinding(state, sym);
V = V - 1;
} else if (V.getIvarAccessHistory() ==
RefVal::IvarAccessHistory::AccessedDirectly) {
// Assume that the instance variable was holding on the object at
// +1, and we just didn't know.
if (E == DecRefAndStopTrackingHard)
return removeRefBinding(state, sym);
V = V.releaseViaIvar() ^ RefVal::Released;
} else {
V = V ^ RefVal::ErrorReleaseNotOwned;
hasErr = V.getKind();
}
break;
case RefVal::Released:
// Non-GC cases are handled above.
assert(C.isObjCGCEnabled());
V = V ^ RefVal::ErrorUseAfterRelease;
hasErr = V.getKind();
break;
}
break;
}
return setRefBinding(state, sym, V);
}
void RetainCountChecker::processNonLeakError(ProgramStateRef St,
SourceRange ErrorRange,
RefVal::Kind ErrorKind,
SymbolRef Sym,
CheckerContext &C) const {
// HACK: Ignore retain-count issues on values accessed through ivars,
// because of cases like this:
// [_contentView retain];
// [_contentView removeFromSuperview];
// [self addSubview:_contentView]; // invalidates 'self'
// [_contentView release];
if (const RefVal *RV = getRefBinding(St, Sym))
if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
return;
ExplodedNode *N = C.generateSink(St);
if (!N)
return;
CFRefBug *BT;
switch (ErrorKind) {
default:
llvm_unreachable("Unhandled error.");
case RefVal::ErrorUseAfterRelease:
if (!useAfterRelease)
useAfterRelease.reset(new UseAfterRelease(this));
BT = useAfterRelease.get();
break;
case RefVal::ErrorReleaseNotOwned:
if (!releaseNotOwned)
releaseNotOwned.reset(new BadRelease(this));
BT = releaseNotOwned.get();
break;
case RefVal::ErrorDeallocGC:
if (!deallocGC)
deallocGC.reset(new DeallocGC(this));
BT = deallocGC.get();
break;
case RefVal::ErrorDeallocNotOwned:
if (!deallocNotOwned)
deallocNotOwned.reset(new DeallocNotOwned(this));
BT = deallocNotOwned.get();
break;
}
assert(BT);
auto report = std::unique_ptr<BugReport>(
new CFRefReport(*BT, C.getASTContext().getLangOpts(), C.isObjCGCEnabled(),
SummaryLog, N, Sym));
report->addRange(ErrorRange);
C.emitReport(std::move(report));
}
//===----------------------------------------------------------------------===//
// Handle the return values of retain-count-related functions.
//===----------------------------------------------------------------------===//
bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Get the callee. We're only interested in simple C functions.
ProgramStateRef state = C.getState();
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return false;
IdentifierInfo *II = FD->getIdentifier();
if (!II)
return false;
// For now, we're only handling the functions that return aliases of their
// arguments: CFRetain and CFMakeCollectable (and their families).
// Eventually we should add other functions we can model entirely,
// such as CFRelease, which don't invalidate their arguments or globals.
if (CE->getNumArgs() != 1)
return false;
// Get the name of the function.
StringRef FName = II->getName();
FName = FName.substr(FName.find_first_not_of('_'));
// See if it's one of the specific functions we know how to eval.
bool canEval = false;
QualType ResultTy = CE->getCallReturnType(C.getASTContext());
if (ResultTy->isObjCIdType()) {
// Handle: id NSMakeCollectable(CFTypeRef)
canEval = II->isStr("NSMakeCollectable");
} else if (ResultTy->isPointerType()) {
// Handle: (CF|CG)Retain
// CFAutorelease
// CFMakeCollectable
// It's okay to be a little sloppy here (CGMakeCollectable doesn't exist).
if (cocoa::isRefType(ResultTy, "CF", FName) ||
cocoa::isRefType(ResultTy, "CG", FName)) {
canEval = isRetain(FD, FName) || isAutorelease(FD, FName) ||
isMakeCollectable(FD, FName);
}
}
if (!canEval)
return false;
// Bind the return value.
const LocationContext *LCtx = C.getLocationContext();
SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
if (RetVal.isUnknown()) {
// If the receiver is unknown, conjure a return value.
SValBuilder &SVB = C.getSValBuilder();
RetVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, ResultTy, C.blockCount());
}
state = state->BindExpr(CE, LCtx, RetVal, false);
// FIXME: This should not be necessary, but otherwise the argument seems to be
// considered alive during the next statement.
if (const MemRegion *ArgRegion = RetVal.getAsRegion()) {
// Save the refcount status of the argument.
SymbolRef Sym = RetVal.getAsLocSymbol();
const RefVal *Binding = nullptr;
if (Sym)
Binding = getRefBinding(state, Sym);
// Invalidate the argument region.
state = state->invalidateRegions(ArgRegion, CE, C.blockCount(), LCtx,
/*CausesPointerEscape*/ false);
// Restore the refcount status of the argument.
if (Binding)
state = setRefBinding(state, Sym, *Binding);
}
C.addTransition(state);
return true;
}
//===----------------------------------------------------------------------===//
// Handle return statements.
//===----------------------------------------------------------------------===//
void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
CheckerContext &C) const {
// Only adjust the reference count if this is the top-level call frame,
// and not the result of inlining. In the future, we should do
// better checking even for inlined calls, and see if they match
// with their expected semantics (e.g., the method should return a retained
// object, etc.).
if (!C.inTopFrame())
return;
const Expr *RetE = S->getRetValue();
if (!RetE)
return;
ProgramStateRef state = C.getState();
SymbolRef Sym =
state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
if (!Sym)
return;
// Get the reference count binding (if any).
const RefVal *T = getRefBinding(state, Sym);
if (!T)
return;
// Change the reference count.
RefVal X = *T;
switch (X.getKind()) {
case RefVal::Owned: {
unsigned cnt = X.getCount();
assert(cnt > 0);
X.setCount(cnt - 1);
X = X ^ RefVal::ReturnedOwned;
break;
}
case RefVal::NotOwned: {
unsigned cnt = X.getCount();
if (cnt) {
X.setCount(cnt - 1);
X = X ^ RefVal::ReturnedOwned;
}
else {
X = X ^ RefVal::ReturnedNotOwned;
}
break;
}
default:
return;
}
// Update the binding.
state = setRefBinding(state, Sym, X);
ExplodedNode *Pred = C.addTransition(state);
// At this point we have updated the state properly.
// Everything after this is merely checking to see if the return value has
// been over- or under-retained.
// Did we cache out?
if (!Pred)
return;
// Update the autorelease counts.
static CheckerProgramPointTag AutoreleaseTag(this, "Autorelease");
state = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag, C, Sym, X);
// Did we cache out?
if (!state)
return;
// Get the updated binding.
T = getRefBinding(state, Sym);
assert(T);
X = *T;
// Consult the summary of the enclosing method.
RetainSummaryManager &Summaries = getSummaryManager(C);
const Decl *CD = &Pred->getCodeDecl();
RetEffect RE = RetEffect::MakeNoRet();
// FIXME: What is the convention for blocks? Is there one?
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
const RetainSummary *Summ = Summaries.getMethodSummary(MD);
RE = Summ->getRetEffect();
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
if (!isa<CXXMethodDecl>(FD)) {
const RetainSummary *Summ = Summaries.getFunctionSummary(FD);
RE = Summ->getRetEffect();
}
}
checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
}
void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
CheckerContext &C,
ExplodedNode *Pred,
RetEffect RE, RefVal X,
SymbolRef Sym,
ProgramStateRef state) const {
// HACK: Ignore retain-count issues on values accessed through ivars,
// because of cases like this:
// [_contentView retain];
// [_contentView removeFromSuperview];
// [self addSubview:_contentView]; // invalidates 'self'
// [_contentView release];
if (X.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
return;
// Any leaks or other errors?
if (X.isReturnedOwned() && X.getCount() == 0) {
if (RE.getKind() != RetEffect::NoRet) {
bool hasError = false;
if (C.isObjCGCEnabled() && RE.getObjKind() == RetEffect::ObjC) {
// Things are more complicated with garbage collection. If the
// returned object is suppose to be an Objective-C object, we have
// a leak (as the caller expects a GC'ed object) because no
// method should return ownership unless it returns a CF object.
hasError = true;
X = X ^ RefVal::ErrorGCLeakReturned;
}
else if (!RE.isOwned()) {
// Either we are using GC and the returned object is a CF type
// or we aren't using GC. In either case, we expect that the
// enclosing method is expected to return ownership.
hasError = true;
X = X ^ RefVal::ErrorLeakReturned;
}
if (hasError) {
// Generate an error node.
state = setRefBinding(state, Sym, X);
static CheckerProgramPointTag ReturnOwnLeakTag(this, "ReturnsOwnLeak");
ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
if (N) {
const LangOptions &LOpts = C.getASTContext().getLangOpts();
bool GCEnabled = C.isObjCGCEnabled();
C.emitReport(std::unique_ptr<BugReport>(new CFRefLeakReport(
*getLeakAtReturnBug(LOpts, GCEnabled), LOpts, GCEnabled,
SummaryLog, N, Sym, C, IncludeAllocationLine)));
}
}
}
} else if (X.isReturnedNotOwned()) {
if (RE.isOwned()) {
if (X.getIvarAccessHistory() ==
RefVal::IvarAccessHistory::AccessedDirectly) {
// Assume the method was trying to transfer a +1 reference from a
// strong ivar to the caller.
state = setRefBinding(state, Sym,
X.releaseViaIvar() ^ RefVal::ReturnedOwned);
} else {
// Trying to return a not owned object to a caller expecting an
// owned object.
state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned);
static CheckerProgramPointTag
ReturnNotOwnedTag(this, "ReturnNotOwnedForOwned");
ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
if (N) {
if (!returnNotOwnedForOwned)
returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this));
C.emitReport(std::unique_ptr<BugReport>(new CFRefReport(
*returnNotOwnedForOwned, C.getASTContext().getLangOpts(),
C.isObjCGCEnabled(), SummaryLog, N, Sym)));
}
}
}
}
}
//===----------------------------------------------------------------------===//
// Check various ways a symbol can be invalidated.
//===----------------------------------------------------------------------===//
void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
CheckerContext &C) const {
// Are we storing to something that causes the value to "escape"?
bool escapes = true;
// A value escapes in three possible cases (this may change):
//
// (1) we are binding to something that is not a memory region.
// (2) we are binding to a memregion that does not have stack storage
// (3) we are binding to a memregion with stack storage that the store
// does not understand.
ProgramStateRef state = C.getState();
if (Optional<loc::MemRegionVal> regionLoc = loc.getAs<loc::MemRegionVal>()) {
escapes = !regionLoc->getRegion()->hasStackStorage();
if (!escapes) {
// To test (3), generate a new state with the binding added. If it is
// the same state, then it escapes (since the store cannot represent
// the binding).
// Do this only if we know that the store is not supposed to generate the
// same state.
SVal StoredVal = state->getSVal(regionLoc->getRegion());
if (StoredVal != val)
escapes = (state == (state->bindLoc(*regionLoc, val)));
}
if (!escapes) {
// Case 4: We do not currently model what happens when a symbol is
// assigned to a struct field, so be conservative here and let the symbol
// go. TODO: This could definitely be improved upon.
escapes = !isa<VarRegion>(regionLoc->getRegion());
}
}
// If we are storing the value into an auto function scope variable annotated
// with (__attribute__((cleanup))), stop tracking the value to avoid leak
// false positives.
if (const VarRegion *LVR = dyn_cast_or_null<VarRegion>(loc.getAsRegion())) {
const VarDecl *VD = LVR->getDecl();
if (VD->hasAttr<CleanupAttr>()) {
escapes = true;
}
}
// If our store can represent the binding and we aren't storing to something
// that doesn't have local storage then just return and have the simulation
// state continue as is.
if (!escapes)
return;
// Otherwise, find all symbols referenced by 'val' that we are tracking
// and stop tracking them.
state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
C.addTransition(state);
}
ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
SVal Cond,
bool Assumption) const {
// FIXME: We may add to the interface of evalAssume the list of symbols
// whose assumptions have changed. For now we just iterate through the
// bindings and check if any of the tracked symbols are NULL. This isn't
// too bad since the number of symbols we will track in practice are
// probably small and evalAssume is only called at branches and a few
// other places.
RefBindingsTy B = state->get<RefBindings>();
if (B.isEmpty())
return state;
bool changed = false;
RefBindingsTy::Factory &RefBFactory = state->get_context<RefBindings>();
for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
// Check if the symbol is null stop tracking the symbol.
ConstraintManager &CMgr = state->getConstraintManager();
ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
if (AllocFailed.isConstrainedTrue()) {
changed = true;
B = RefBFactory.remove(B, I.getKey());
}
}
if (changed)
state = state->set<RefBindings>(B);
return state;
}
ProgramStateRef
RetainCountChecker::checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
ArrayRef<const MemRegion *> ExplicitRegions,
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const {
if (!invalidated)
return state;
llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
E = ExplicitRegions.end(); I != E; ++I) {
if (const SymbolicRegion *SR = (*I)->StripCasts()->getAs<SymbolicRegion>())
WhitelistedSymbols.insert(SR->getSymbol());
}
for (InvalidatedSymbols::const_iterator I=invalidated->begin(),
E = invalidated->end(); I!=E; ++I) {
SymbolRef sym = *I;
if (WhitelistedSymbols.count(sym))
continue;
// Remove any existing reference-count binding.
state = removeRefBinding(state, sym);
}
return state;
}
//===----------------------------------------------------------------------===//
// Handle dead symbols and end-of-path.
//===----------------------------------------------------------------------===//
ProgramStateRef
RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
ExplodedNode *Pred,
const ProgramPointTag *Tag,
CheckerContext &Ctx,
SymbolRef Sym, RefVal V) const {
unsigned ACnt = V.getAutoreleaseCount();
// No autorelease counts? Nothing to be done.
if (!ACnt)
return state;
assert(!Ctx.isObjCGCEnabled() && "Autorelease counts in GC mode?");
unsigned Cnt = V.getCount();
// FIXME: Handle sending 'autorelease' to already released object.
if (V.getKind() == RefVal::ReturnedOwned)
++Cnt;
// If we would over-release here, but we know the value came from an ivar,
// assume it was a strong ivar that's just been relinquished.
if (ACnt > Cnt &&
V.getIvarAccessHistory() == RefVal::IvarAccessHistory::AccessedDirectly) {
V = V.releaseViaIvar();
--ACnt;
}
if (ACnt <= Cnt) {
if (ACnt == Cnt) {
V.clearCounts();
if (V.getKind() == RefVal::ReturnedOwned)
V = V ^ RefVal::ReturnedNotOwned;
else
V = V ^ RefVal::NotOwned;
} else {
V.setCount(V.getCount() - ACnt);
V.setAutoreleaseCount(0);
}
return setRefBinding(state, Sym, V);
}
// HACK: Ignore retain-count issues on values accessed through ivars,
// because of cases like this:
// [_contentView retain];
// [_contentView removeFromSuperview];
// [self addSubview:_contentView]; // invalidates 'self'
// [_contentView release];
if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
return state;
// Woah! More autorelease counts then retain counts left.
// Emit hard error.
V = V ^ RefVal::ErrorOverAutorelease;
state = setRefBinding(state, Sym, V);
ExplodedNode *N = Ctx.generateSink(state, Pred, Tag);
if (N) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << "Object was autoreleased ";
if (V.getAutoreleaseCount() > 1)
os << V.getAutoreleaseCount() << " times but the object ";
else
os << "but ";
os << "has a +" << V.getCount() << " retain count";
if (!overAutorelease)
overAutorelease.reset(new OverAutorelease(this));
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
Ctx.emitReport(std::unique_ptr<BugReport>(
new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false,
SummaryLog, N, Sym, os.str())));
}
return nullptr;
}
ProgramStateRef
RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
SymbolRef sid, RefVal V,
SmallVectorImpl<SymbolRef> &Leaked) const {
bool hasLeak;
// HACK: Ignore retain-count issues on values accessed through ivars,
// because of cases like this:
// [_contentView retain];
// [_contentView removeFromSuperview];
// [self addSubview:_contentView]; // invalidates 'self'
// [_contentView release];
if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None)
hasLeak = false;
else if (V.isOwned())
hasLeak = true;
else if (V.isNotOwned() || V.isReturnedOwned())
hasLeak = (V.getCount() > 0);
else
hasLeak = false;
if (!hasLeak)
return removeRefBinding(state, sid);
Leaked.push_back(sid);
return setRefBinding(state, sid, V ^ RefVal::ErrorLeak);
}
ExplodedNode *
RetainCountChecker::processLeaks(ProgramStateRef state,
SmallVectorImpl<SymbolRef> &Leaked,
CheckerContext &Ctx,
ExplodedNode *Pred) const {
// Generate an intermediate node representing the leak point.
ExplodedNode *N = Ctx.addTransition(state, Pred);
if (N) {
for (SmallVectorImpl<SymbolRef>::iterator
I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
bool GCEnabled = Ctx.isObjCGCEnabled();
CFRefBug *BT = Pred ? getLeakWithinFunctionBug(LOpts, GCEnabled)
: getLeakAtReturnBug(LOpts, GCEnabled);
assert(BT && "BugType not initialized.");
Ctx.emitReport(std::unique_ptr<BugReport>(
new CFRefLeakReport(*BT, LOpts, GCEnabled, SummaryLog, N, *I, Ctx,
IncludeAllocationLine)));
}
}
return N;
}
void RetainCountChecker::checkEndFunction(CheckerContext &Ctx) const {
ProgramStateRef state = Ctx.getState();
RefBindingsTy B = state->get<RefBindings>();
ExplodedNode *Pred = Ctx.getPredecessor();
// Don't process anything within synthesized bodies.
const LocationContext *LCtx = Pred->getLocationContext();
if (LCtx->getAnalysisDeclContext()->isBodyAutosynthesized()) {
assert(LCtx->getParent());
return;
}
for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
state = handleAutoreleaseCounts(state, Pred, /*Tag=*/nullptr, Ctx,
I->first, I->second);
if (!state)
return;
}
// If the current LocationContext has a parent, don't check for leaks.
// We will do that later.
// FIXME: we should instead check for imbalances of the retain/releases,
// and suggest annotations.
if (LCtx->getParent())
return;
B = state->get<RefBindings>();
SmallVector<SymbolRef, 10> Leaked;
for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I)
state = handleSymbolDeath(state, I->first, I->second, Leaked);
processLeaks(state, Leaked, Ctx, Pred);
}
const ProgramPointTag *
RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
const CheckerProgramPointTag *&tag = DeadSymbolTags[sym];
if (!tag) {
SmallString<64> buf;
llvm::raw_svector_ostream out(buf);
out << "Dead Symbol : ";
sym->dumpToStream(out);
tag = new CheckerProgramPointTag(this, out.str());
}
return tag;
}
void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
ExplodedNode *Pred = C.getPredecessor();
ProgramStateRef state = C.getState();
RefBindingsTy B = state->get<RefBindings>();
SmallVector<SymbolRef, 10> Leaked;
// Update counts from autorelease pools
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
SymbolRef Sym = *I;
if (const RefVal *T = B.lookup(Sym)){
// Use the symbol as the tag.
// FIXME: This might not be as unique as we would like.
const ProgramPointTag *Tag = getDeadSymbolTag(Sym);
state = handleAutoreleaseCounts(state, Pred, Tag, C, Sym, *T);
if (!state)
return;
// Fetch the new reference count from the state, and use it to handle
// this symbol.
state = handleSymbolDeath(state, *I, *getRefBinding(state, Sym), Leaked);
}
}
if (Leaked.empty()) {
C.addTransition(state);
return;
}
Pred = processLeaks(state, Leaked, C, Pred);
// Did we cache out?
if (!Pred)
return;
// Now generate a new node that nukes the old bindings.
// The only bindings left at this point are the leaked symbols.
RefBindingsTy::Factory &F = state->get_context<RefBindings>();
B = state->get<RefBindings>();
for (SmallVectorImpl<SymbolRef>::iterator I = Leaked.begin(),
E = Leaked.end();
I != E; ++I)
B = F.remove(B, *I);
state = state->set<RefBindings>(B);
C.addTransition(state, Pred);
}
void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
RefBindingsTy B = State->get<RefBindings>();
if (B.isEmpty())
return;
Out << Sep << NL;
for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
Out << I->first << " : ";
I->second.print(Out);
Out << NL;
}
}
//===----------------------------------------------------------------------===//
// Checker registration.
//===----------------------------------------------------------------------===//
void ento::registerRetainCountChecker(CheckerManager &Mgr) {
Mgr.registerChecker<RetainCountChecker>(Mgr.getAnalyzerOptions());
}
//===----------------------------------------------------------------------===//
// Implementation of the CallEffects API.
//===----------------------------------------------------------------------===//
namespace clang { namespace ento { namespace objc_retain {
// This is a bit gross, but it allows us to populate CallEffects without
// creating a bunch of accessors. This kind is very localized, so the
// damage of this macro is limited.
#define createCallEffect(D, KIND)\
ASTContext &Ctx = D->getASTContext();\
LangOptions L = Ctx.getLangOpts();\
RetainSummaryManager M(Ctx, L.GCOnly, L.ObjCAutoRefCount);\
const RetainSummary *S = M.get ## KIND ## Summary(D);\
CallEffects CE(S->getRetEffect());\
CE.Receiver = S->getReceiverEffect();\
unsigned N = D->param_size();\
for (unsigned i = 0; i < N; ++i) {\
CE.Args.push_back(S->getArg(i));\
}
CallEffects CallEffects::getEffect(const ObjCMethodDecl *MD) {
createCallEffect(MD, Method);
return CE;
}
CallEffects CallEffects::getEffect(const FunctionDecl *FD) {
createCallEffect(FD, Function);
return CE;
}
#undef createCallEffect
}}}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp | //=== UndefResultChecker.cpp ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines UndefResultChecker, a builtin check in ExprEngine that
// performs checks for undefined results of non-assignment binary operators.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
class UndefResultChecker
: public Checker< check::PostStmt<BinaryOperator> > {
mutable std::unique_ptr<BugType> BT;
public:
void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
};
} // end anonymous namespace
void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
if (state->getSVal(B, LCtx).isUndef()) {
// Do not report assignments of uninitialized values inside swap functions.
// This should allow to swap partially uninitialized structs
// (radar://14129997)
if (const FunctionDecl *EnclosingFunctionDecl =
dyn_cast<FunctionDecl>(C.getStackFrame()->getDecl()))
if (C.getCalleeName(EnclosingFunctionDecl) == "swap")
return;
// Generate an error node.
ExplodedNode *N = C.generateSink();
if (!N)
return;
if (!BT)
BT.reset(
new BuiltinBug(this, "Result of operation is garbage or undefined"));
SmallString<256> sbuf;
llvm::raw_svector_ostream OS(sbuf);
const Expr *Ex = nullptr;
bool isLeft = true;
if (state->getSVal(B->getLHS(), LCtx).isUndef()) {
Ex = B->getLHS()->IgnoreParenCasts();
isLeft = true;
}
else if (state->getSVal(B->getRHS(), LCtx).isUndef()) {
Ex = B->getRHS()->IgnoreParenCasts();
isLeft = false;
}
if (Ex) {
OS << "The " << (isLeft ? "left" : "right")
<< " operand of '"
<< BinaryOperator::getOpcodeStr(B->getOpcode())
<< "' is a garbage value";
}
else {
// Neither operand was undefined, but the result is undefined.
OS << "The result of the '"
<< BinaryOperator::getOpcodeStr(B->getOpcode())
<< "' expression is undefined";
}
auto report = llvm::make_unique<BugReport>(*BT, OS.str(), N);
if (Ex) {
report->addRange(Ex->getSourceRange());
bugreporter::trackNullOrUndefValue(N, Ex, *report);
}
else
bugreporter::trackNullOrUndefValue(N, B, *report);
C.emitReport(std::move(report));
}
}
void ento::registerUndefResultChecker(CheckerManager &mgr) {
mgr.registerChecker<UndefResultChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp | //==- CheckSizeofPointer.cpp - Check for sizeof on pointers ------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a check for unintended use of sizeof() on pointer
// expressions.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
using namespace clang;
using namespace ento;
namespace {
class WalkAST : public StmtVisitor<WalkAST> {
BugReporter &BR;
const CheckerBase *Checker;
AnalysisDeclContext* AC;
public:
WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
: BR(br), Checker(checker), AC(ac) {}
void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
void VisitStmt(Stmt *S) { VisitChildren(S); }
void VisitChildren(Stmt *S);
};
}
void WalkAST::VisitChildren(Stmt *S) {
for (Stmt *Child : S->children())
if (Child)
Visit(Child);
}
// CWE-467: Use of sizeof() on a Pointer Type
void WalkAST::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
if (E->getKind() != UETT_SizeOf)
return;
// If an explicit type is used in the code, usually the coder knows what they are
// doing.
if (E->isArgumentType())
return;
QualType T = E->getTypeOfArgument();
if (T->isPointerType()) {
// Many false positives have the form 'sizeof *p'. This is reasonable
// because people know what they are doing when they intentionally
// dereference the pointer.
Expr *ArgEx = E->getArgumentExpr();
if (!isa<DeclRefExpr>(ArgEx->IgnoreParens()))
return;
PathDiagnosticLocation ELoc =
PathDiagnosticLocation::createBegin(E, BR.getSourceManager(), AC);
BR.EmitBasicReport(AC->getDecl(), Checker,
"Potential unintended use of sizeof() on pointer type",
categories::LogicError,
"The code calls sizeof() on a pointer type. "
"This can produce an unexpected result.",
ELoc, ArgEx->getSourceRange());
}
}
//===----------------------------------------------------------------------===//
// SizeofPointerChecker
//===----------------------------------------------------------------------===//
namespace {
class SizeofPointerChecker : public Checker<check::ASTCodeBody> {
public:
void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
BugReporter &BR) const {
WalkAST walker(BR, this, mgr.getAnalysisDeclContext(D));
walker.Visit(D->getBody());
}
};
}
void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
mgr.registerChecker<SizeofPointerChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp | //== BoolAssignmentChecker.cpp - Boolean assignment checker -----*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This defines BoolAssignmentChecker, a builtin check in ExprEngine that
// performs checks for assignment of non-Boolean values to Boolean variables.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
namespace {
class BoolAssignmentChecker : public Checker< check::Bind > {
mutable std::unique_ptr<BuiltinBug> BT;
void emitReport(ProgramStateRef state, CheckerContext &C) const;
public:
void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
};
} // end anonymous namespace
void BoolAssignmentChecker::emitReport(ProgramStateRef state,
CheckerContext &C) const {
if (ExplodedNode *N = C.addTransition(state)) {
if (!BT)
BT.reset(new BuiltinBug(this, "Assignment of a non-Boolean value"));
C.emitReport(llvm::make_unique<BugReport>(*BT, BT->getDescription(), N));
}
}
static bool isBooleanType(QualType Ty) {
if (Ty->isBooleanType()) // C++ or C99
return true;
if (const TypedefType *TT = Ty->getAs<TypedefType>())
return TT->getDecl()->getName() == "BOOL" || // Objective-C
TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
TT->getDecl()->getName() == "Boolean"; // MacTypes.h
return false;
}
void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
CheckerContext &C) const {
// We are only interested in stores into Booleans.
const TypedValueRegion *TR =
dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
if (!TR)
return;
QualType valTy = TR->getValueType();
if (!isBooleanType(valTy))
return;
// Get the value of the right-hand side. We only care about values
// that are defined (UnknownVals and UndefinedVals are handled by other
// checkers).
Optional<DefinedSVal> DV = val.getAs<DefinedSVal>();
if (!DV)
return;
// Check if the assigned value meets our criteria for correctness. It must
// be a value that is either 0 or 1. One way to check this is to see if
// the value is possibly < 0 (for a negative value) or greater than 1.
ProgramStateRef state = C.getState();
SValBuilder &svalBuilder = C.getSValBuilder();
ConstraintManager &CM = C.getConstraintManager();
// First, ensure that the value is >= 0.
DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
SVal greaterThanOrEqualToZeroVal =
svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
svalBuilder.getConditionType());
Optional<DefinedSVal> greaterThanEqualToZero =
greaterThanOrEqualToZeroVal.getAs<DefinedSVal>();
if (!greaterThanEqualToZero) {
// The SValBuilder cannot construct a valid SVal for this condition.
// This means we cannot properly reason about it.
return;
}
ProgramStateRef stateLT, stateGE;
std::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
// Is it possible for the value to be less than zero?
if (stateLT) {
// It is possible for the value to be less than zero. We only
// want to emit a warning, however, if that value is fully constrained.
// If it it possible for the value to be >= 0, then essentially the
// value is underconstrained and there is nothing left to be done.
if (!stateGE)
emitReport(stateLT, C);
// In either case, we are done.
return;
}
// If we reach here, it must be the case that the value is constrained
// to only be >= 0.
assert(stateGE == state);
// At this point we know that the value is >= 0.
// Now check to ensure that the value is <= 1.
DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
SVal lessThanEqToOneVal =
svalBuilder.evalBinOp(state, BO_LE, *DV, OneVal,
svalBuilder.getConditionType());
Optional<DefinedSVal> lessThanEqToOne =
lessThanEqToOneVal.getAs<DefinedSVal>();
if (!lessThanEqToOne) {
// The SValBuilder cannot construct a valid SVal for this condition.
// This means we cannot properly reason about it.
return;
}
ProgramStateRef stateGT, stateLE;
std::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
// Is it possible for the value to be greater than one?
if (stateGT) {
// It is possible for the value to be greater than one. We only
// want to emit a warning, however, if that value is fully constrained.
// If it is possible for the value to be <= 1, then essentially the
// value is underconstrained and there is nothing left to be done.
if (!stateLE)
emitReport(stateGT, C);
// In either case, we are done.
return;
}
// If we reach here, it must be the case that the value is constrained
// to only be <= 1.
assert(stateLE == state);
}
void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
mgr.registerChecker<BoolAssignmentChecker>();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer | repos/DirectXShaderCompiler/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp | //==- ObjCMissingSuperCallChecker.cpp - Check missing super-calls in ObjC --==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a ObjCMissingSuperCallChecker, a checker that
// analyzes a UIViewController implementation to determine if it
// correctly calls super in the methods where this is mandatory.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace ento;
namespace {
struct SelectorDescriptor {
const char *SelectorName;
unsigned ArgumentCount;
};
//===----------------------------------------------------------------------===//
// FindSuperCallVisitor - Identify specific calls to the superclass.
//===----------------------------------------------------------------------===//
class FindSuperCallVisitor : public RecursiveASTVisitor<FindSuperCallVisitor> {
public:
explicit FindSuperCallVisitor(Selector S) : DoesCallSuper(false), Sel(S) {}
bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
if (E->getSelector() == Sel)
if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
DoesCallSuper = true;
// Recurse if we didn't find the super call yet.
return !DoesCallSuper;
}
bool DoesCallSuper;
private:
Selector Sel;
};
//===----------------------------------------------------------------------===//
// ObjCSuperCallChecker
//===----------------------------------------------------------------------===//
class ObjCSuperCallChecker : public Checker<
check::ASTDecl<ObjCImplementationDecl> > {
public:
ObjCSuperCallChecker() : IsInitialized(false) {}
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr,
BugReporter &BR) const;
private:
bool isCheckableClass(const ObjCImplementationDecl *D,
StringRef &SuperclassName) const;
void initializeSelectors(ASTContext &Ctx) const;
void fillSelectors(ASTContext &Ctx, ArrayRef<SelectorDescriptor> Sel,
StringRef ClassName) const;
mutable llvm::StringMap<llvm::SmallSet<Selector, 16> > SelectorsForClass;
mutable bool IsInitialized;
};
}
/// \brief Determine whether the given class has a superclass that we want
/// to check. The name of the found superclass is stored in SuperclassName.
///
/// \param D The declaration to check for superclasses.
/// \param[out] SuperclassName On return, the found superclass name.
bool ObjCSuperCallChecker::isCheckableClass(const ObjCImplementationDecl *D,
StringRef &SuperclassName) const {
const ObjCInterfaceDecl *ID = D->getClassInterface();
for ( ; ID ; ID = ID->getSuperClass())
{
SuperclassName = ID->getIdentifier()->getName();
if (SelectorsForClass.count(SuperclassName))
return true;
}
return false;
}
void ObjCSuperCallChecker::fillSelectors(ASTContext &Ctx,
ArrayRef<SelectorDescriptor> Sel,
StringRef ClassName) const {
llvm::SmallSet<Selector, 16> &ClassSelectors = SelectorsForClass[ClassName];
// Fill the Selectors SmallSet with all selectors we want to check.
for (ArrayRef<SelectorDescriptor>::iterator I = Sel.begin(), E = Sel.end();
I != E; ++I) {
SelectorDescriptor Descriptor = *I;
assert(Descriptor.ArgumentCount <= 1); // No multi-argument selectors yet.
// Get the selector.
IdentifierInfo *II = &Ctx.Idents.get(Descriptor.SelectorName);
Selector Sel = Ctx.Selectors.getSelector(Descriptor.ArgumentCount, &II);
ClassSelectors.insert(Sel);
}
}
void ObjCSuperCallChecker::initializeSelectors(ASTContext &Ctx) const {
{ // Initialize selectors for: UIViewController
const SelectorDescriptor Selectors[] = {
{ "addChildViewController", 1 },
{ "viewDidAppear", 1 },
{ "viewDidDisappear", 1 },
{ "viewWillAppear", 1 },
{ "viewWillDisappear", 1 },
{ "removeFromParentViewController", 0 },
{ "didReceiveMemoryWarning", 0 },
{ "viewDidUnload", 0 },
{ "viewDidLoad", 0 },
{ "viewWillUnload", 0 },
{ "updateViewConstraints", 0 },
{ "encodeRestorableStateWithCoder", 1 },
{ "restoreStateWithCoder", 1 }};
fillSelectors(Ctx, Selectors, "UIViewController");
}
{ // Initialize selectors for: UIResponder
const SelectorDescriptor Selectors[] = {
{ "resignFirstResponder", 0 }};
fillSelectors(Ctx, Selectors, "UIResponder");
}
{ // Initialize selectors for: NSResponder
const SelectorDescriptor Selectors[] = {
{ "encodeRestorableStateWithCoder", 1 },
{ "restoreStateWithCoder", 1 }};
fillSelectors(Ctx, Selectors, "NSResponder");
}
{ // Initialize selectors for: NSDocument
const SelectorDescriptor Selectors[] = {
{ "encodeRestorableStateWithCoder", 1 },
{ "restoreStateWithCoder", 1 }};
fillSelectors(Ctx, Selectors, "NSDocument");
}
IsInitialized = true;
}
void ObjCSuperCallChecker::checkASTDecl(const ObjCImplementationDecl *D,
AnalysisManager &Mgr,
BugReporter &BR) const {
ASTContext &Ctx = BR.getContext();
// We need to initialize the selector table once.
if (!IsInitialized)
initializeSelectors(Ctx);
// Find out whether this class has a superclass that we are supposed to check.
StringRef SuperclassName;
if (!isCheckableClass(D, SuperclassName))
return;
// Iterate over all instance methods.
for (auto *MD : D->instance_methods()) {
Selector S = MD->getSelector();
// Find out whether this is a selector that we want to check.
if (!SelectorsForClass[SuperclassName].count(S))
continue;
// Check if the method calls its superclass implementation.
if (MD->getBody())
{
FindSuperCallVisitor Visitor(S);
Visitor.TraverseDecl(MD);
// It doesn't call super, emit a diagnostic.
if (!Visitor.DoesCallSuper) {
PathDiagnosticLocation DLoc =
PathDiagnosticLocation::createEnd(MD->getBody(),
BR.getSourceManager(),
Mgr.getAnalysisDeclContext(D));
const char *Name = "Missing call to superclass";
SmallString<320> Buf;
llvm::raw_svector_ostream os(Buf);
os << "The '" << S.getAsString()
<< "' instance method in " << SuperclassName.str() << " subclass '"
<< *D << "' is missing a [super " << S.getAsString() << "] call";
BR.EmitBasicReport(MD, this, Name, categories::CoreFoundationObjectiveC,
os.str(), DLoc);
}
}
}
}
//===----------------------------------------------------------------------===//
// Check registration.
//===----------------------------------------------------------------------===//
void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) {
Mgr.registerChecker<ObjCSuperCallChecker>();
}
/*
ToDo list for expanding this check in the future, the list is not exhaustive.
There are also cases where calling super is suggested but not "mandatory".
In addition to be able to check the classes and methods below, architectural
improvements like being able to allow for the super-call to be done in a called
method would be good too.
UIDocument subclasses
- finishedHandlingError:recovered: (is multi-arg)
- finishedHandlingError:recovered: (is multi-arg)
UIViewController subclasses
- loadView (should *never* call super)
- transitionFromViewController:toViewController:
duration:options:animations:completion: (is multi-arg)
UICollectionViewController subclasses
- loadView (take care because UIViewController subclasses should NOT call super
in loadView, but UICollectionViewController subclasses should)
NSObject subclasses
- doesNotRecognizeSelector (it only has to call super if it doesn't throw)
UIPopoverBackgroundView subclasses (some of those are class methods)
- arrowDirection (should *never* call super)
- arrowOffset (should *never* call super)
- arrowBase (should *never* call super)
- arrowHeight (should *never* call super)
- contentViewInsets (should *never* call super)
UITextSelectionRect subclasses (some of those are properties)
- rect (should *never* call super)
- range (should *never* call super)
- writingDirection (should *never* call super)
- isVertical (should *never* call super)
- containsStart (should *never* call super)
- containsEnd (should *never* call super)
*/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.