Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/RecordLayoutBuilder.cpp | //=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/RecordLayout.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
using namespace clang;
namespace {
/// BaseSubobjectInfo - Represents a single base subobject in a complete class.
/// For a class hierarchy like
///
/// class A { };
/// class B : A { };
/// class C : A, B { };
///
/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo
/// instances, one for B and two for A.
///
/// If a base is virtual, it will only have one BaseSubobjectInfo allocated.
struct BaseSubobjectInfo {
/// Class - The class for this base info.
const CXXRecordDecl *Class;
/// IsVirtual - Whether the BaseInfo represents a virtual base or not.
bool IsVirtual;
/// Bases - Information about the base subobjects.
SmallVector<BaseSubobjectInfo*, 4> Bases;
/// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base
/// of this base info (if one exists).
BaseSubobjectInfo *PrimaryVirtualBaseInfo;
// FIXME: Document.
const BaseSubobjectInfo *Derived;
};
/// \brief Externally provided layout. Typically used when the AST source, such
/// as DWARF, lacks all the information that was available at compile time, such
/// as alignment attributes on fields and pragmas in effect.
struct ExternalLayout {
ExternalLayout() : Size(0), Align(0) {}
/// \brief Overall record size in bits.
uint64_t Size;
/// \brief Overall record alignment in bits.
uint64_t Align;
/// \brief Record field offsets in bits.
llvm::DenseMap<const FieldDecl *, uint64_t> FieldOffsets;
/// \brief Direct, non-virtual base offsets.
llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsets;
/// \brief Virtual base offsets.
llvm::DenseMap<const CXXRecordDecl *, CharUnits> VirtualBaseOffsets;
/// Get the offset of the given field. The external source must provide
/// entries for all fields in the record.
uint64_t getExternalFieldOffset(const FieldDecl *FD) {
assert(FieldOffsets.count(FD) &&
"Field does not have an external offset");
return FieldOffsets[FD];
}
bool getExternalNVBaseOffset(const CXXRecordDecl *RD, CharUnits &BaseOffset) {
auto Known = BaseOffsets.find(RD);
if (Known == BaseOffsets.end())
return false;
BaseOffset = Known->second;
return true;
}
bool getExternalVBaseOffset(const CXXRecordDecl *RD, CharUnits &BaseOffset) {
auto Known = VirtualBaseOffsets.find(RD);
if (Known == VirtualBaseOffsets.end())
return false;
BaseOffset = Known->second;
return true;
}
};
/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
/// offsets while laying out a C++ class.
class EmptySubobjectMap {
const ASTContext &Context;
uint64_t CharWidth;
/// Class - The class whose empty entries we're keeping track of.
const CXXRecordDecl *Class;
/// EmptyClassOffsets - A map from offsets to empty record decls.
typedef llvm::TinyPtrVector<const CXXRecordDecl *> ClassVectorTy;
typedef llvm::DenseMap<CharUnits, ClassVectorTy> EmptyClassOffsetsMapTy;
EmptyClassOffsetsMapTy EmptyClassOffsets;
/// MaxEmptyClassOffset - The highest offset known to contain an empty
/// base subobject.
CharUnits MaxEmptyClassOffset;
/// ComputeEmptySubobjectSizes - Compute the size of the largest base or
/// member subobject that is empty.
void ComputeEmptySubobjectSizes();
void AddSubobjectAtOffset(const CXXRecordDecl *RD, CharUnits Offset);
void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
CharUnits Offset, bool PlacingEmptyBase);
void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
CharUnits Offset);
void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset);
/// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
/// subobjects beyond the given offset.
bool AnyEmptySubobjectsBeyondOffset(CharUnits Offset) const {
return Offset <= MaxEmptyClassOffset;
}
CharUnits
getFieldOffset(const ASTRecordLayout &Layout, unsigned FieldNo) const {
uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
assert(FieldOffset % CharWidth == 0 &&
"Field offset not at char boundary!");
return Context.toCharUnitsFromBits(FieldOffset);
}
protected:
bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
CharUnits Offset) const;
bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
CharUnits Offset);
bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
CharUnits Offset) const;
bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
CharUnits Offset) const;
public:
/// This holds the size of the largest empty subobject (either a base
/// or a member). Will be zero if the record being built doesn't contain
/// any empty classes.
CharUnits SizeOfLargestEmptySubobject;
EmptySubobjectMap(const ASTContext &Context, const CXXRecordDecl *Class)
: Context(Context), CharWidth(Context.getCharWidth()), Class(Class) {
ComputeEmptySubobjectSizes();
}
/// CanPlaceBaseAtOffset - Return whether the given base class can be placed
/// at the given offset.
/// Returns false if placing the record will result in two components
/// (direct or indirect) of the same type having the same offset.
bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
CharUnits Offset);
/// CanPlaceFieldAtOffset - Return whether a field can be placed at the given
/// offset.
bool CanPlaceFieldAtOffset(const FieldDecl *FD, CharUnits Offset);
};
void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
// Check the bases.
for (const CXXBaseSpecifier &Base : Class->bases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits EmptySize;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
if (BaseDecl->isEmpty()) {
// If the class decl is empty, get its size.
EmptySize = Layout.getSize();
} else {
// Otherwise, we get the largest empty subobject for the decl.
EmptySize = Layout.getSizeOfLargestEmptySubobject();
}
if (EmptySize > SizeOfLargestEmptySubobject)
SizeOfLargestEmptySubobject = EmptySize;
}
// Check the fields.
for (const FieldDecl *FD : Class->fields()) {
const RecordType *RT =
Context.getBaseElementType(FD->getType())->getAs<RecordType>();
// We only care about record types.
if (!RT)
continue;
CharUnits EmptySize;
const CXXRecordDecl *MemberDecl = RT->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl);
if (MemberDecl->isEmpty()) {
// If the class decl is empty, get its size.
EmptySize = Layout.getSize();
} else {
// Otherwise, we get the largest empty subobject for the decl.
EmptySize = Layout.getSizeOfLargestEmptySubobject();
}
if (EmptySize > SizeOfLargestEmptySubobject)
SizeOfLargestEmptySubobject = EmptySize;
}
}
bool
EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
CharUnits Offset) const {
// We only need to check empty bases.
if (!RD->isEmpty())
return true;
EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset);
if (I == EmptyClassOffsets.end())
return true;
const ClassVectorTy &Classes = I->second;
if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end())
return true;
// There is already an empty class of the same type at this offset.
return false;
}
void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
CharUnits Offset) {
// We only care about empty bases.
if (!RD->isEmpty())
return;
// If we have empty structures inside a union, we can assign both
// the same offset. Just avoid pushing them twice in the list.
ClassVectorTy &Classes = EmptyClassOffsets[Offset];
if (std::find(Classes.begin(), Classes.end(), RD) != Classes.end())
return;
Classes.push_back(RD);
// Update the empty class offset.
if (Offset > MaxEmptyClassOffset)
MaxEmptyClassOffset = Offset;
}
bool
EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
CharUnits Offset) {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
if (!AnyEmptySubobjectsBeyondOffset(Offset))
return true;
if (!CanPlaceSubobjectAtOffset(Info->Class, Offset))
return false;
// Traverse all non-virtual bases.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
for (const BaseSubobjectInfo *Base : Info->Bases) {
if (Base->IsVirtual)
continue;
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
return false;
}
if (Info->PrimaryVirtualBaseInfo) {
BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
if (Info == PrimaryVirtualBaseInfo->Derived) {
if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset))
return false;
}
}
// Traverse all member variables.
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset))
return false;
}
return true;
}
void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
CharUnits Offset,
bool PlacingEmptyBase) {
if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) {
// We know that the only empty subobjects that can conflict with empty
// subobject of non-empty bases, are empty bases that can be placed at
// offset zero. Because of this, we only need to keep track of empty base
// subobjects with offsets less than the size of the largest empty
// subobject for our class.
return;
}
AddSubobjectAtOffset(Info->Class, Offset);
// Traverse all non-virtual bases.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
for (const BaseSubobjectInfo *Base : Info->Bases) {
if (Base->IsVirtual)
continue;
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase);
}
if (Info->PrimaryVirtualBaseInfo) {
BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
if (Info == PrimaryVirtualBaseInfo->Derived)
UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset,
PlacingEmptyBase);
}
// Traverse all member variables.
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
UpdateEmptyFieldSubobjects(*I, FieldOffset);
}
}
bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
CharUnits Offset) {
// If we know this class doesn't have any empty subobjects we don't need to
// bother checking.
if (SizeOfLargestEmptySubobject.isZero())
return true;
if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
return false;
// We are able to place the base at this offset. Make sure to update the
// empty base subobject map.
UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty());
return true;
}
bool
EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
CharUnits Offset) const {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
if (!AnyEmptySubobjectsBeyondOffset(Offset))
return true;
if (!CanPlaceSubobjectAtOffset(RD, Offset))
return false;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Traverse all non-virtual bases.
for (const CXXBaseSpecifier &Base : RD->bases()) {
if (Base.isVirtual())
continue;
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
return false;
}
if (RD == Class) {
// This is the most derived class, traverse virtual bases as well.
for (const CXXBaseSpecifier &Base : RD->vbases()) {
const CXXRecordDecl *VBaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
return false;
}
}
// Traverse all member variables.
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++FieldNo) {
if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
if (!CanPlaceFieldSubobjectAtOffset(*I, FieldOffset))
return false;
}
return true;
}
bool
EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
CharUnits Offset) const {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
if (!AnyEmptySubobjectsBeyondOffset(Offset))
return true;
QualType T = FD->getType();
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset);
// If we have an array type we need to look at every element.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
QualType ElemTy = Context.getBaseElementType(AT);
const RecordType *RT = ElemTy->getAs<RecordType>();
if (!RT)
return true;
const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
uint64_t NumElements = Context.getConstantArrayElementCount(AT);
CharUnits ElementOffset = Offset;
for (uint64_t I = 0; I != NumElements; ++I) {
// We don't have to keep looking past the maximum offset that's known to
// contain an empty class.
if (!AnyEmptySubobjectsBeyondOffset(ElementOffset))
return true;
if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset))
return false;
ElementOffset += Layout.getSize();
}
}
return true;
}
bool
EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD,
CharUnits Offset) {
if (!CanPlaceFieldSubobjectAtOffset(FD, Offset))
return false;
// We are able to place the member variable at this offset.
// Make sure to update the empty base subobject map.
UpdateEmptyFieldSubobjects(FD, Offset);
return true;
}
void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
const CXXRecordDecl *Class,
CharUnits Offset) {
// We know that the only empty subobjects that can conflict with empty
// field subobjects are subobjects of empty bases that can be placed at offset
// zero. Because of this, we only need to keep track of empty field
// subobjects with offsets less than the size of the largest empty
// subobject for our class.
if (Offset >= SizeOfLargestEmptySubobject)
return;
AddSubobjectAtOffset(RD, Offset);
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Traverse all non-virtual bases.
for (const CXXBaseSpecifier &Base : RD->bases()) {
if (Base.isVirtual())
continue;
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
}
if (RD == Class) {
// This is the most derived class, traverse virtual bases as well.
for (const CXXBaseSpecifier &Base : RD->vbases()) {
const CXXRecordDecl *VBaseDecl = Base.getType()->getAsCXXRecordDecl();
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
}
}
// Traverse all member variables.
unsigned FieldNo = 0;
for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I, ++FieldNo) {
if (I->isBitField())
continue;
CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
UpdateEmptyFieldSubobjects(*I, FieldOffset);
}
}
void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
CharUnits Offset) {
QualType T = FD->getType();
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
UpdateEmptyFieldSubobjects(RD, RD, Offset);
return;
}
// If we have an array type we need to update every element.
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
QualType ElemTy = Context.getBaseElementType(AT);
const RecordType *RT = ElemTy->getAs<RecordType>();
if (!RT)
return;
const CXXRecordDecl *RD = RT->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
uint64_t NumElements = Context.getConstantArrayElementCount(AT);
CharUnits ElementOffset = Offset;
for (uint64_t I = 0; I != NumElements; ++I) {
// We know that the only empty subobjects that can conflict with empty
// field subobjects are subobjects of empty bases that can be placed at
// offset zero. Because of this, we only need to keep track of empty field
// subobjects with offsets less than the size of the largest empty
// subobject for our class.
if (ElementOffset >= SizeOfLargestEmptySubobject)
return;
UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
ElementOffset += Layout.getSize();
}
}
}
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> ClassSetTy;
class RecordLayoutBuilder {
protected:
// FIXME: Remove this and make the appropriate fields public.
friend class clang::ASTContext;
const ASTContext &Context;
EmptySubobjectMap *EmptySubobjects;
/// Size - The current size of the record layout.
uint64_t Size;
/// Alignment - The current alignment of the record layout.
CharUnits Alignment;
/// \brief The alignment if attribute packed is not used.
CharUnits UnpackedAlignment;
SmallVector<uint64_t, 16> FieldOffsets;
/// \brief Whether the external AST source has provided a layout for this
/// record.
unsigned UseExternalLayout : 1;
/// \brief Whether we need to infer alignment, even when we have an
/// externally-provided layout.
unsigned InferAlignment : 1;
/// Packed - Whether the record is packed or not.
unsigned Packed : 1;
unsigned IsUnion : 1;
unsigned IsMac68kAlign : 1;
unsigned IsMsStruct : 1;
/// UnfilledBitsInLastUnit - If the last field laid out was a bitfield,
/// this contains the number of bits in the last unit that can be used for
/// an adjacent bitfield if necessary. The unit in question is usually
/// a byte, but larger units are used if IsMsStruct.
unsigned char UnfilledBitsInLastUnit;
/// LastBitfieldTypeSize - If IsMsStruct, represents the size of the type
/// of the previous field if it was a bitfield.
unsigned char LastBitfieldTypeSize;
/// MaxFieldAlignment - The maximum allowed field alignment. This is set by
/// #pragma pack.
CharUnits MaxFieldAlignment;
/// DataSize - The data size of the record being laid out.
uint64_t DataSize;
CharUnits NonVirtualSize;
CharUnits NonVirtualAlignment;
/// PrimaryBase - the primary base class (if one exists) of the class
/// we're laying out.
const CXXRecordDecl *PrimaryBase;
/// PrimaryBaseIsVirtual - Whether the primary base of the class we're laying
/// out is virtual.
bool PrimaryBaseIsVirtual;
/// HasOwnVFPtr - Whether the class provides its own vtable/vftbl
/// pointer, as opposed to inheriting one from a primary base class.
bool HasOwnVFPtr;
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
/// Bases - base classes and their offsets in the record.
BaseOffsetsMapTy Bases;
// VBases - virtual base classes and their offsets in the record.
ASTRecordLayout::VBaseOffsetsMapTy VBases;
/// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
/// primary base classes for some other direct or indirect base class.
CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
/// FirstNearlyEmptyVBase - The first nearly empty virtual base class in
/// inheritance graph order. Used for determining the primary base class.
const CXXRecordDecl *FirstNearlyEmptyVBase;
/// VisitedVirtualBases - A set of all the visited virtual bases, used to
/// avoid visiting virtual bases more than once.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
/// Valid if UseExternalLayout is true.
ExternalLayout External;
RecordLayoutBuilder(const ASTContext &Context,
EmptySubobjectMap *EmptySubobjects)
: Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
UseExternalLayout(false), InferAlignment(false),
Packed(false), IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0),
MaxFieldAlignment(CharUnits::Zero()),
DataSize(0), NonVirtualSize(CharUnits::Zero()),
NonVirtualAlignment(CharUnits::One()),
PrimaryBase(nullptr), PrimaryBaseIsVirtual(false),
HasOwnVFPtr(false),
FirstNearlyEmptyVBase(nullptr) {}
void Layout(const RecordDecl *D);
void Layout(const CXXRecordDecl *D);
void Layout(const ObjCInterfaceDecl *D);
void LayoutFields(const RecordDecl *D);
void LayoutField(const FieldDecl *D, bool InsertExtraPadding);
void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
bool FieldPacked, const FieldDecl *D);
void LayoutBitField(const FieldDecl *D);
TargetCXXABI getCXXABI() const {
return Context.getTargetInfo().getCXXABI();
}
/// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator;
typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *>
BaseSubobjectInfoMapTy;
/// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases
/// of the class we're laying out to their base subobject info.
BaseSubobjectInfoMapTy VirtualBaseInfo;
/// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the
/// class we're laying out to their base subobject info.
BaseSubobjectInfoMapTy NonVirtualBaseInfo;
/// ComputeBaseSubobjectInfo - Compute the base subobject information for the
/// bases of the given class.
void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD);
/// ComputeBaseSubobjectInfo - Compute the base subobject information for a
/// single class and all of its base classes.
BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
bool IsVirtual,
BaseSubobjectInfo *Derived);
/// DeterminePrimaryBase - Determine the primary base of the given class.
void DeterminePrimaryBase(const CXXRecordDecl *RD);
void SelectPrimaryVBase(const CXXRecordDecl *RD);
void EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign);
/// LayoutNonVirtualBases - Determines the primary base class (if any) and
/// lays it out. Will then proceed to lay out all non-virtual base clasess.
void LayoutNonVirtualBases(const CXXRecordDecl *RD);
/// LayoutNonVirtualBase - Lays out a single non-virtual base.
void LayoutNonVirtualBase(const BaseSubobjectInfo *Base);
void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
CharUnits Offset);
/// LayoutVirtualBases - Lays out all the virtual bases.
void LayoutVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *MostDerivedClass);
/// LayoutVirtualBase - Lays out a single virtual base.
void LayoutVirtualBase(const BaseSubobjectInfo *Base);
/// LayoutBase - Will lay out a base and return the offset where it was
/// placed, in chars.
CharUnits LayoutBase(const BaseSubobjectInfo *Base);
/// InitializeLayout - Initialize record layout for the given record decl.
void InitializeLayout(const Decl *D);
/// FinishLayout - Finalize record layout. Adjust record size based on the
/// alignment.
void FinishLayout(const NamedDecl *D);
void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment);
void UpdateAlignment(CharUnits NewAlignment) {
UpdateAlignment(NewAlignment, NewAlignment);
}
/// \brief Retrieve the externally-supplied field offset for the given
/// field.
///
/// \param Field The field whose offset is being queried.
/// \param ComputedOffset The offset that we've computed for this field.
uint64_t updateExternalFieldOffset(const FieldDecl *Field,
uint64_t ComputedOffset);
void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset,
uint64_t UnpackedOffset, unsigned UnpackedAlign,
bool isPacked, const FieldDecl *D);
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
CharUnits getSize() const {
assert(Size % Context.getCharWidth() == 0);
return Context.toCharUnitsFromBits(Size);
}
uint64_t getSizeInBits() const { return Size; }
void setSize(CharUnits NewSize) { Size = Context.toBits(NewSize); }
void setSize(uint64_t NewSize) { Size = NewSize; }
CharUnits getAligment() const { return Alignment; }
CharUnits getDataSize() const {
assert(DataSize % Context.getCharWidth() == 0);
return Context.toCharUnitsFromBits(DataSize);
}
uint64_t getDataSizeInBits() const { return DataSize; }
void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); }
void setDataSize(uint64_t NewSize) { DataSize = NewSize; }
RecordLayoutBuilder(const RecordLayoutBuilder &) = delete;
void operator=(const RecordLayoutBuilder &) = delete;
};
} // end anonymous namespace
void
RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
for (const auto &I : RD->bases()) {
assert(!I.getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
const CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
// Check if this is a nearly empty virtual base.
if (I.isVirtual() && Context.isNearlyEmpty(Base)) {
// If it's not an indirect primary base, then we've found our primary
// base.
if (!IndirectPrimaryBases.count(Base)) {
PrimaryBase = Base;
PrimaryBaseIsVirtual = true;
return;
}
// Is this the first nearly empty virtual base?
if (!FirstNearlyEmptyVBase)
FirstNearlyEmptyVBase = Base;
}
SelectPrimaryVBase(Base);
if (PrimaryBase)
return;
}
}
/// DeterminePrimaryBase - Determine the primary base of the given class.
void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
// If the class isn't dynamic, it won't have a primary base.
if (!RD->isDynamicClass())
return;
// Compute all the primary virtual bases for all of our direct and
// indirect bases, and record all their primary virtual base classes.
RD->getIndirectPrimaryBases(IndirectPrimaryBases);
// If the record has a dynamic base class, attempt to choose a primary base
// class. It is the first (in direct base class order) non-virtual dynamic
// base class, if one exists.
for (const auto &I : RD->bases()) {
// Ignore virtual bases.
if (I.isVirtual())
continue;
const CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
if (Base->isDynamicClass()) {
// We found it.
PrimaryBase = Base;
PrimaryBaseIsVirtual = false;
return;
}
}
// Under the Itanium ABI, if there is no non-virtual primary base class,
// try to compute the primary virtual base. The primary virtual base is
// the first nearly empty virtual base that is not an indirect primary
// virtual base class, if one exists.
if (RD->getNumVBases() != 0) {
SelectPrimaryVBase(RD);
if (PrimaryBase)
return;
}
// Otherwise, it is the first indirect primary base class, if one exists.
if (FirstNearlyEmptyVBase) {
PrimaryBase = FirstNearlyEmptyVBase;
PrimaryBaseIsVirtual = true;
return;
}
assert(!PrimaryBase && "Should not get here with a primary base!");
}
BaseSubobjectInfo *
RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
bool IsVirtual,
BaseSubobjectInfo *Derived) {
BaseSubobjectInfo *Info;
if (IsVirtual) {
// Check if we already have info about this virtual base.
BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD];
if (InfoSlot) {
assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
return InfoSlot;
}
// We don't, create it.
InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
Info = InfoSlot;
} else {
Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
}
Info->Class = RD;
Info->IsVirtual = IsVirtual;
Info->Derived = nullptr;
Info->PrimaryVirtualBaseInfo = nullptr;
const CXXRecordDecl *PrimaryVirtualBase = nullptr;
BaseSubobjectInfo *PrimaryVirtualBaseInfo = nullptr;
// Check if this base has a primary virtual base.
if (RD->getNumVBases()) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (Layout.isPrimaryBaseVirtual()) {
// This base does have a primary virtual base.
PrimaryVirtualBase = Layout.getPrimaryBase();
assert(PrimaryVirtualBase && "Didn't have a primary virtual base!");
// Now check if we have base subobject info about this primary base.
PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
if (PrimaryVirtualBaseInfo) {
if (PrimaryVirtualBaseInfo->Derived) {
// We did have info about this primary base, and it turns out that it
// has already been claimed as a primary virtual base for another
// base.
PrimaryVirtualBase = nullptr;
} else {
// We can claim this base as our primary base.
Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
PrimaryVirtualBaseInfo->Derived = Info;
}
}
}
}
// Now go through all direct bases.
for (const auto &I : RD->bases()) {
bool IsVirtual = I.isVirtual();
const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
}
if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) {
// Traversing the bases must have created the base info for our primary
// virtual base.
PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
assert(PrimaryVirtualBaseInfo &&
"Did not create a primary virtual base!");
// Claim the primary virtual base as our primary virtual base.
Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
PrimaryVirtualBaseInfo->Derived = Info;
}
return Info;
}
void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
for (const auto &I : RD->bases()) {
bool IsVirtual = I.isVirtual();
const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
// Compute the base subobject info for this base.
BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual,
nullptr);
if (IsVirtual) {
// ComputeBaseInfo has already added this base for us.
assert(VirtualBaseInfo.count(BaseDecl) &&
"Did not add virtual base!");
} else {
// Add the base info to the map of non-virtual bases.
assert(!NonVirtualBaseInfo.count(BaseDecl) &&
"Non-virtual base already exists!");
NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
}
}
}
void
RecordLayoutBuilder::EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign) {
CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
// The maximum field alignment overrides base align.
if (!MaxFieldAlignment.isZero()) {
BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
}
// Round up the current record size to pointer alignment.
setSize(getSize().RoundUpToAlignment(BaseAlign));
setDataSize(getSize());
// Update the alignment.
UpdateAlignment(BaseAlign, UnpackedBaseAlign);
}
void
RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
// Then, determine the primary base class.
DeterminePrimaryBase(RD);
// Compute base subobject info.
ComputeBaseSubobjectInfo(RD);
// If we have a primary base class, lay it out.
if (PrimaryBase) {
if (PrimaryBaseIsVirtual) {
// If the primary virtual base was a primary virtual base of some other
// base class we'll have to steal it.
BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase);
PrimaryBaseInfo->Derived = nullptr;
// We have a virtual primary base, insert it as an indirect primary base.
IndirectPrimaryBases.insert(PrimaryBase);
assert(!VisitedVirtualBases.count(PrimaryBase) &&
"vbase already visited!");
VisitedVirtualBases.insert(PrimaryBase);
LayoutVirtualBase(PrimaryBaseInfo);
} else {
BaseSubobjectInfo *PrimaryBaseInfo =
NonVirtualBaseInfo.lookup(PrimaryBase);
assert(PrimaryBaseInfo &&
"Did not find base info for non-virtual primary base!");
LayoutNonVirtualBase(PrimaryBaseInfo);
}
// If this class needs a vtable/vf-table and didn't get one from a
// primary base, add it in now.
} else if (RD->isDynamicClass()) {
assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
CharUnits PtrWidth =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
CharUnits PtrAlign =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
EnsureVTablePointerAlignment(PtrAlign);
HasOwnVFPtr = true;
setSize(getSize() + PtrWidth);
setDataSize(getSize());
}
// Now lay out the non-virtual bases.
for (const auto &I : RD->bases()) {
// Ignore virtual bases.
if (I.isVirtual())
continue;
const CXXRecordDecl *BaseDecl = I.getType()->getAsCXXRecordDecl();
// Skip the primary base, because we've already laid it out. The
// !PrimaryBaseIsVirtual check is required because we might have a
// non-virtual base of the same type as a primary virtual base.
if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual)
continue;
// Lay out the base.
BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl);
assert(BaseInfo && "Did not find base info for non-virtual base!");
LayoutNonVirtualBase(BaseInfo);
}
}
void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
// Layout the base.
CharUnits Offset = LayoutBase(Base);
// Add its base class offset.
assert(!Bases.count(Base->Class) && "base offset already exists!");
Bases.insert(std::make_pair(Base->Class, Offset));
AddPrimaryVirtualBaseOffsets(Base, Offset);
}
void
RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
CharUnits Offset) {
// This base isn't interesting, it has no virtual bases.
if (!Info->Class->getNumVBases())
return;
// First, check if we have a virtual primary base to add offsets for.
if (Info->PrimaryVirtualBaseInfo) {
assert(Info->PrimaryVirtualBaseInfo->IsVirtual &&
"Primary virtual base is not virtual!");
if (Info->PrimaryVirtualBaseInfo->Derived == Info) {
// Add the offset.
assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) &&
"primary vbase offset already exists!");
VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class,
ASTRecordLayout::VBaseInfo(Offset, false)));
// Traverse the primary virtual base.
AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset);
}
}
// Now go through all direct non-virtual bases.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
for (const BaseSubobjectInfo *Base : Info->Bases) {
if (Base->IsVirtual)
continue;
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
AddPrimaryVirtualBaseOffsets(Base, BaseOffset);
}
}
void
RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *MostDerivedClass) {
const CXXRecordDecl *PrimaryBase;
bool PrimaryBaseIsVirtual;
if (MostDerivedClass == RD) {
PrimaryBase = this->PrimaryBase;
PrimaryBaseIsVirtual = this->PrimaryBaseIsVirtual;
} else {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
PrimaryBase = Layout.getPrimaryBase();
PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
}
for (const CXXBaseSpecifier &Base : RD->bases()) {
assert(!Base.getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
if (Base.isVirtual()) {
if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
// Only lay out the virtual base if it's not an indirect primary base.
if (!IndirectPrimaryBase) {
// Only visit virtual bases once.
if (!VisitedVirtualBases.insert(BaseDecl).second)
continue;
const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
assert(BaseInfo && "Did not find virtual base info!");
LayoutVirtualBase(BaseInfo);
}
}
}
if (!BaseDecl->getNumVBases()) {
// This base isn't interesting since it doesn't have any virtual bases.
continue;
}
LayoutVirtualBases(BaseDecl, MostDerivedClass);
}
}
void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
assert(!Base->Derived && "Trying to lay out a primary virtual base!");
// Layout the base.
CharUnits Offset = LayoutBase(Base);
// Add its base class offset.
assert(!VBases.count(Base->Class) && "vbase offset already exists!");
VBases.insert(std::make_pair(Base->Class,
ASTRecordLayout::VBaseInfo(Offset, false)));
AddPrimaryVirtualBaseOffsets(Base, Offset);
}
CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
CharUnits Offset;
// Query the external layout to see if it provides an offset.
bool HasExternalLayout = false;
if (UseExternalLayout) {
llvm::DenseMap<const CXXRecordDecl *, CharUnits>::iterator Known;
if (Base->IsVirtual)
HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset);
else
HasExternalLayout = External.getExternalVBaseOffset(Base->Class, Offset);
}
CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlignment();
CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
// If we have an empty base class, try to place it at offset 0.
if (Base->Class->isEmpty() &&
(!HasExternalLayout || Offset == CharUnits::Zero()) &&
EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
setSize(std::max(getSize(), Layout.getSize()));
UpdateAlignment(BaseAlign, UnpackedBaseAlign);
return CharUnits::Zero();
}
// The maximum field alignment overrides base align.
if (!MaxFieldAlignment.isZero()) {
BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
}
if (!HasExternalLayout) {
// Round up the current record size to the base's alignment boundary.
Offset = getDataSize().RoundUpToAlignment(BaseAlign);
// Try to place the base.
while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
Offset += BaseAlign;
} else {
bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset);
(void)Allowed;
assert(Allowed && "Base subobject externally placed at overlapping offset");
if (InferAlignment && Offset < getDataSize().RoundUpToAlignment(BaseAlign)){
// The externally-supplied base offset is before the base offset we
// computed. Assume that the structure is packed.
Alignment = CharUnits::One();
InferAlignment = false;
}
}
if (!Base->Class->isEmpty()) {
// Update the data size.
setDataSize(Offset + Layout.getNonVirtualSize());
setSize(std::max(getSize(), getDataSize()));
} else
setSize(std::max(getSize(), Offset + Layout.getSize()));
// Remember max struct/class alignment.
UpdateAlignment(BaseAlign, UnpackedBaseAlign);
return Offset;
}
void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
IsUnion = RD->isUnion();
IsMsStruct = RD->isMsStruct(Context);
}
Packed = D->hasAttr<PackedAttr>();
// Honor the default struct packing maximum alignment flag.
if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) {
MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
}
// mac68k alignment supersedes maximum field alignment and attribute aligned,
// and forces all structures to have 2-byte alignment. The IBM docs on it
// allude to additional (more complicated) semantics, especially with regard
// to bit-fields, but gcc appears not to follow that.
if (D->hasAttr<AlignMac68kAttr>()) {
IsMac68kAlign = true;
MaxFieldAlignment = CharUnits::fromQuantity(2);
Alignment = CharUnits::fromQuantity(2);
} else {
if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>())
MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
if (unsigned MaxAlign = D->getMaxAlignment())
UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign));
}
// If there is an external AST source, ask it for the various offsets.
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
if (ExternalASTSource *Source = Context.getExternalSource()) {
UseExternalLayout = Source->layoutRecordType(
RD, External.Size, External.Align, External.FieldOffsets,
External.BaseOffsets, External.VirtualBaseOffsets);
// Update based on external alignment.
if (UseExternalLayout) {
if (External.Align > 0) {
Alignment = Context.toCharUnitsFromBits(External.Align);
} else {
// The external source didn't have alignment information; infer it.
InferAlignment = true;
}
}
}
}
void RecordLayoutBuilder::Layout(const RecordDecl *D) {
InitializeLayout(D);
LayoutFields(D);
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
FinishLayout(D);
}
void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
InitializeLayout(RD);
// Lay out the vtable and the non-virtual bases.
LayoutNonVirtualBases(RD);
LayoutFields(RD);
NonVirtualSize = Context.toCharUnitsFromBits(
llvm::RoundUpToAlignment(getSizeInBits(),
Context.getTargetInfo().getCharAlign()));
NonVirtualAlignment = Alignment;
// Lay out the virtual bases and add the primary virtual base offsets.
LayoutVirtualBases(RD, RD);
// Finally, round the size of the total struct up to the alignment
// of the struct itself.
FinishLayout(RD);
#ifndef NDEBUG
// Check that we have base offsets for all bases.
for (const CXXBaseSpecifier &Base : RD->bases()) {
if (Base.isVirtual())
continue;
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
assert(Bases.count(BaseDecl) && "Did not find base offset!");
}
// And all virtual bases.
for (const CXXBaseSpecifier &Base : RD->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
assert(VBases.count(BaseDecl) && "Did not find base offset!");
}
#endif
}
void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
const ASTRecordLayout &SL = Context.getASTObjCInterfaceLayout(SD);
UpdateAlignment(SL.getAlignment());
// We start laying out ivars not at the end of the superclass
// structure, but at the next byte following the last field.
setSize(SL.getDataSize());
setDataSize(getSize());
}
InitializeLayout(D);
// Layout each ivar sequentially.
for (const ObjCIvarDecl *IVD = D->all_declared_ivar_begin(); IVD;
IVD = IVD->getNextIvar())
LayoutField(IVD, false);
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
FinishLayout(D);
}
void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
// Layout each field, for now, just sequentially, respecting alignment. In
// the future, this will need to be tweakable by targets.
bool InsertExtraPadding = D->mayInsertExtraPadding(/*EmitRemark=*/true);
bool HasFlexibleArrayMember = D->hasFlexibleArrayMember();
for (auto I = D->field_begin(), End = D->field_end(); I != End; ++I) {
auto Next(I);
++Next;
LayoutField(*I,
InsertExtraPadding && (Next != End || !HasFlexibleArrayMember));
}
}
// Rounds the specified size to have it a multiple of the char size.
static uint64_t
roundUpSizeToCharAlignment(uint64_t Size,
const ASTContext &Context) {
uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
return llvm::RoundUpToAlignment(Size, CharAlignment);
}
void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
uint64_t TypeSize,
bool FieldPacked,
const FieldDecl *D) {
assert(Context.getLangOpts().CPlusPlus &&
"Can only have wide bit-fields in C++!");
// Itanium C++ ABI 2.4:
// If sizeof(T)*8 < n, let T' be the largest integral POD type with
// sizeof(T')*8 <= n.
QualType IntegralPODTypes[] = {
Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy,
Context.UnsignedLongTy, Context.UnsignedLongLongTy
};
QualType Type;
for (const QualType &QT : IntegralPODTypes) {
uint64_t Size = Context.getTypeSize(QT);
if (Size > FieldSize)
break;
Type = QT;
}
assert(!Type.isNull() && "Did not find a type!");
CharUnits TypeAlign = Context.getTypeAlignInChars(Type);
// We're not going to use any of the unfilled bits in the last byte.
UnfilledBitsInLastUnit = 0;
LastBitfieldTypeSize = 0;
uint64_t FieldOffset;
uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
if (IsUnion) {
uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize,
Context);
setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize));
FieldOffset = 0;
} else {
// The bitfield is allocated starting at the next offset aligned
// appropriately for T', with length n bits.
FieldOffset = llvm::RoundUpToAlignment(getDataSizeInBits(),
Context.toBits(TypeAlign));
uint64_t NewSizeInBits = FieldOffset + FieldSize;
setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
Context.getTargetInfo().getCharAlign()));
UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
}
// Place this field at the current location.
FieldOffsets.push_back(FieldOffset);
CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, FieldOffset,
Context.toBits(TypeAlign), FieldPacked, D);
// Update the size.
setSize(std::max(getSizeInBits(), getDataSizeInBits()));
// Remember max struct/class alignment.
UpdateAlignment(TypeAlign);
}
void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
uint64_t FieldSize = D->getBitWidthValue(Context);
TypeInfo FieldInfo = Context.getTypeInfo(D->getType());
uint64_t TypeSize = FieldInfo.Width;
unsigned FieldAlign = FieldInfo.Align;
// UnfilledBitsInLastUnit is the difference between the end of the
// last allocated bitfield (i.e. the first bit offset available for
// bitfields) and the end of the current data size in bits (i.e. the
// first bit offset available for non-bitfields). The current data
// size in bits is always a multiple of the char size; additionally,
// for ms_struct records it's also a multiple of the
// LastBitfieldTypeSize (if set).
// The struct-layout algorithm is dictated by the platform ABI,
// which in principle could use almost any rules it likes. In
// practice, UNIXy targets tend to inherit the algorithm described
// in the System V generic ABI. The basic bitfield layout rule in
// System V is to place bitfields at the next available bit offset
// where the entire bitfield would fit in an aligned storage unit of
// the declared type; it's okay if an earlier or later non-bitfield
// is allocated in the same storage unit. However, some targets
// (those that !useBitFieldTypeAlignment(), e.g. ARM APCS) don't
// require this storage unit to be aligned, and therefore always put
// the bitfield at the next available bit offset.
// ms_struct basically requests a complete replacement of the
// platform ABI's struct-layout algorithm, with the high-level goal
// of duplicating MSVC's layout. For non-bitfields, this follows
// the standard algorithm. The basic bitfield layout rule is to
// allocate an entire unit of the bitfield's declared type
// (e.g. 'unsigned long'), then parcel it up among successive
// bitfields whose declared types have the same size, making a new
// unit as soon as the last can no longer store the whole value.
// Since it completely replaces the platform ABI's algorithm,
// settings like !useBitFieldTypeAlignment() do not apply.
// A zero-width bitfield forces the use of a new storage unit for
// later bitfields. In general, this occurs by rounding up the
// current size of the struct as if the algorithm were about to
// place a non-bitfield of the field's formal type. Usually this
// does not change the alignment of the struct itself, but it does
// on some targets (those that useZeroLengthBitfieldAlignment(),
// e.g. ARM). In ms_struct layout, zero-width bitfields are
// ignored unless they follow a non-zero-width bitfield.
// A field alignment restriction (e.g. from #pragma pack) or
// specification (e.g. from __attribute__((aligned))) changes the
// formal alignment of the field. For System V, this alters the
// required alignment of the notional storage unit that must contain
// the bitfield. For ms_struct, this only affects the placement of
// new storage units. In both cases, the effect of #pragma pack is
// ignored on zero-width bitfields.
// On System V, a packed field (e.g. from #pragma pack or
// __attribute__((packed))) always uses the next available bit
// offset.
// In an ms_struct struct, the alignment of a fundamental type is
// always equal to its size. This is necessary in order to mimic
// the i386 alignment rules on targets which might not fully align
// all types (e.g. Darwin PPC32, where alignof(long long) == 4).
// First, some simple bookkeeping to perform for ms_struct structs.
if (IsMsStruct) {
// The field alignment for integer types is always the size.
FieldAlign = TypeSize;
// If the previous field was not a bitfield, or was a bitfield
// with a different storage unit size, we're done with that
// storage unit.
if (LastBitfieldTypeSize != TypeSize) {
// Also, ignore zero-length bitfields after non-bitfields.
if (!LastBitfieldTypeSize && !FieldSize)
FieldAlign = 1;
UnfilledBitsInLastUnit = 0;
LastBitfieldTypeSize = 0;
}
}
// If the field is wider than its declared type, it follows
// different rules in all cases.
if (FieldSize > TypeSize) {
LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
return;
}
// Compute the next available bit offset.
uint64_t FieldOffset =
IsUnion ? 0 : (getDataSizeInBits() - UnfilledBitsInLastUnit);
// Handle targets that don't honor bitfield type alignment.
if (!IsMsStruct && !Context.getTargetInfo().useBitFieldTypeAlignment()) {
// Some such targets do honor it on zero-width bitfields.
if (FieldSize == 0 &&
Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
// The alignment to round up to is the max of the field's natural
// alignment and a target-specific fixed value (sometimes zero).
unsigned ZeroLengthBitfieldBoundary =
Context.getTargetInfo().getZeroLengthBitfieldBoundary();
FieldAlign = std::max(FieldAlign, ZeroLengthBitfieldBoundary);
// If that doesn't apply, just ignore the field alignment.
} else {
FieldAlign = 1;
}
}
// Remember the alignment we would have used if the field were not packed.
unsigned UnpackedFieldAlign = FieldAlign;
// Ignore the field alignment if the field is packed unless it has zero-size.
if (!IsMsStruct && FieldPacked && FieldSize != 0)
FieldAlign = 1;
// But, if there's an 'aligned' attribute on the field, honor that.
if (unsigned ExplicitFieldAlign = D->getMaxAlignment()) {
FieldAlign = std::max(FieldAlign, ExplicitFieldAlign);
UnpackedFieldAlign = std::max(UnpackedFieldAlign, ExplicitFieldAlign);
}
// But, if there's a #pragma pack in play, that takes precedent over
// even the 'aligned' attribute, for non-zero-width bitfields.
if (!MaxFieldAlignment.isZero() && FieldSize) {
unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
}
// For purposes of diagnostics, we're going to simultaneously
// compute the field offsets that we would have used if we weren't
// adding any alignment padding or if the field weren't packed.
uint64_t UnpaddedFieldOffset = FieldOffset;
uint64_t UnpackedFieldOffset = FieldOffset;
// Check if we need to add padding to fit the bitfield within an
// allocation unit with the right size and alignment. The rules are
// somewhat different here for ms_struct structs.
if (IsMsStruct) {
// If it's not a zero-width bitfield, and we can fit the bitfield
// into the active storage unit (and we haven't already decided to
// start a new storage unit), just do so, regardless of any other
// other consideration. Otherwise, round up to the right alignment.
if (FieldSize == 0 || FieldSize > UnfilledBitsInLastUnit) {
FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
UnpackedFieldAlign);
UnfilledBitsInLastUnit = 0;
}
} else {
// #pragma pack, with any value, suppresses the insertion of padding.
bool AllowPadding = MaxFieldAlignment.isZero();
// Compute the real offset.
if (FieldSize == 0 ||
(AllowPadding &&
(FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)) {
FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
}
// Repeat the computation for diagnostic purposes.
if (FieldSize == 0 ||
(AllowPadding &&
(UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
UnpackedFieldAlign);
}
// If we're using external layout, give the external layout a chance
// to override this information.
if (UseExternalLayout)
FieldOffset = updateExternalFieldOffset(D, FieldOffset);
// Okay, place the bitfield at the calculated offset.
FieldOffsets.push_back(FieldOffset);
// Bookkeeping:
// Anonymous members don't affect the overall record alignment,
// except on targets where they do.
if (!IsMsStruct &&
!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
!D->getIdentifier())
FieldAlign = UnpackedFieldAlign = 1;
// Diagnose differences in layout due to padding or packing.
if (!UseExternalLayout)
CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
UnpackedFieldAlign, FieldPacked, D);
// Update DataSize to include the last byte containing (part of) the bitfield.
// For unions, this is just a max operation, as usual.
if (IsUnion) {
uint64_t RoundedFieldSize = roundUpSizeToCharAlignment(FieldSize,
Context);
setDataSize(std::max(getDataSizeInBits(), RoundedFieldSize));
// For non-zero-width bitfields in ms_struct structs, allocate a new
// storage unit if necessary.
} else if (IsMsStruct && FieldSize) {
// We should have cleared UnfilledBitsInLastUnit in every case
// where we changed storage units.
if (!UnfilledBitsInLastUnit) {
setDataSize(FieldOffset + TypeSize);
UnfilledBitsInLastUnit = TypeSize;
}
UnfilledBitsInLastUnit -= FieldSize;
LastBitfieldTypeSize = TypeSize;
// Otherwise, bump the data size up to include the bitfield,
// including padding up to char alignment, and then remember how
// bits we didn't use.
} else {
uint64_t NewSizeInBits = FieldOffset + FieldSize;
uint64_t CharAlignment = Context.getTargetInfo().getCharAlign();
setDataSize(llvm::RoundUpToAlignment(NewSizeInBits, CharAlignment));
UnfilledBitsInLastUnit = getDataSizeInBits() - NewSizeInBits;
// The only time we can get here for an ms_struct is if this is a
// zero-width bitfield, which doesn't count as anything for the
// purposes of unfilled bits.
LastBitfieldTypeSize = 0;
}
// Update the size.
setSize(std::max(getSizeInBits(), getDataSizeInBits()));
// Remember max struct/class alignment.
UpdateAlignment(Context.toCharUnitsFromBits(FieldAlign),
Context.toCharUnitsFromBits(UnpackedFieldAlign));
}
void RecordLayoutBuilder::LayoutField(const FieldDecl *D,
bool InsertExtraPadding) {
if (D->isBitField()) {
LayoutBitField(D);
return;
}
uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastUnit;
// Reset the unfilled bits.
UnfilledBitsInLastUnit = 0;
LastBitfieldTypeSize = 0;
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
CharUnits FieldOffset =
IsUnion ? CharUnits::Zero() : getDataSize();
CharUnits FieldSize;
CharUnits FieldAlign;
if (D->getType()->isIncompleteArrayType()) {
// This is a flexible array member; we can't directly
// query getTypeInfo about these, so we figure it out here.
// Flexible array members don't have any size, but they
// have to be aligned appropriately for their element type.
FieldSize = CharUnits::Zero();
const ArrayType* ATy = Context.getAsArrayType(D->getType());
FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
} else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
unsigned AS = RT->getPointeeType().getAddressSpace();
FieldSize =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
FieldAlign =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
} else {
std::pair<CharUnits, CharUnits> FieldInfo =
Context.getTypeInfoInChars(D->getType());
FieldSize = FieldInfo.first;
FieldAlign = FieldInfo.second;
if (IsMsStruct) {
// If MS bitfield layout is required, figure out what type is being
// laid out and align the field to the width of that type.
// Resolve all typedefs down to their base type and round up the field
// alignment if necessary.
QualType T = Context.getBaseElementType(D->getType());
if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
if (TypeSize > FieldAlign)
FieldAlign = TypeSize;
}
}
}
// The align if the field is not packed. This is to check if the attribute
// was unnecessary (-Wpacked).
CharUnits UnpackedFieldAlign = FieldAlign;
CharUnits UnpackedFieldOffset = FieldOffset;
if (FieldPacked)
FieldAlign = CharUnits::One();
CharUnits MaxAlignmentInChars =
Context.toCharUnitsFromBits(D->getMaxAlignment());
FieldAlign = std::max(FieldAlign, MaxAlignmentInChars);
UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars);
// The maximum field alignment overrides the aligned attribute.
if (!MaxFieldAlignment.isZero()) {
FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment);
}
// Round up the current record size to the field's alignment boundary.
FieldOffset = FieldOffset.RoundUpToAlignment(FieldAlign);
UnpackedFieldOffset =
UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign);
if (UseExternalLayout) {
FieldOffset = Context.toCharUnitsFromBits(
updateExternalFieldOffset(D, Context.toBits(FieldOffset)));
if (!IsUnion && EmptySubobjects) {
// Record the fact that we're placing a field at this offset.
bool Allowed = EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset);
(void)Allowed;
assert(Allowed && "Externally-placed field cannot be placed here");
}
} else {
if (!IsUnion && EmptySubobjects) {
// Check if we can place the field at this offset.
while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
// We couldn't place the field at the offset. Try again at a new offset.
FieldOffset += FieldAlign;
}
}
}
// Place this field at the current location.
FieldOffsets.push_back(Context.toBits(FieldOffset));
if (!UseExternalLayout)
CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset,
Context.toBits(UnpackedFieldOffset),
Context.toBits(UnpackedFieldAlign), FieldPacked, D);
if (InsertExtraPadding) {
CharUnits ASanAlignment = CharUnits::fromQuantity(8);
CharUnits ExtraSizeForAsan = ASanAlignment;
if (FieldSize % ASanAlignment)
ExtraSizeForAsan +=
ASanAlignment - CharUnits::fromQuantity(FieldSize % ASanAlignment);
FieldSize += ExtraSizeForAsan;
}
// Reserve space for this field.
uint64_t FieldSizeInBits = Context.toBits(FieldSize);
if (IsUnion)
setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits));
else
setDataSize(FieldOffset + FieldSize);
// Update the size.
setSize(std::max(getSizeInBits(), getDataSizeInBits()));
// Remember max struct/class alignment.
UpdateAlignment(FieldAlign, UnpackedFieldAlign);
}
void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
// In C++, records cannot be of size 0.
if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
// Compatibility with gcc requires a class (pod or non-pod)
// which is not empty but of size 0; such as having fields of
// array of zero-length, remains of Size 0
if (RD->isEmpty())
setSize(CharUnits::One());
}
else
setSize(CharUnits::One());
}
// Finally, round the size of the record up to the alignment of the
// record itself.
uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastUnit;
uint64_t UnpackedSizeInBits =
llvm::RoundUpToAlignment(getSizeInBits(),
Context.toBits(UnpackedAlignment));
CharUnits UnpackedSize = Context.toCharUnitsFromBits(UnpackedSizeInBits);
uint64_t RoundedSize
= llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment));
if (UseExternalLayout) {
// If we're inferring alignment, and the external size is smaller than
// our size after we've rounded up to alignment, conservatively set the
// alignment to 1.
if (InferAlignment && External.Size < RoundedSize) {
Alignment = CharUnits::One();
InferAlignment = false;
}
setSize(External.Size);
return;
}
// Set the size to the final size.
setSize(RoundedSize);
unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
// Warn if padding was introduced to the struct/class/union.
if (getSizeInBits() > UnpaddedSize) {
unsigned PadSize = getSizeInBits() - UnpaddedSize;
bool InBits = true;
if (PadSize % CharBitNum == 0) {
PadSize = PadSize / CharBitNum;
InBits = false;
}
Diag(RD->getLocation(), diag::warn_padded_struct_size)
<< Context.getTypeDeclType(RD)
<< PadSize
<< (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
}
// Warn if we packed it unnecessarily. If the alignment is 1 byte don't
// bother since there won't be alignment issues.
if (Packed && UnpackedAlignment > CharUnits::One() &&
getSize() == UnpackedSize)
Diag(D->getLocation(), diag::warn_unnecessary_packed)
<< Context.getTypeDeclType(RD);
}
}
void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
CharUnits UnpackedNewAlignment) {
// The alignment is not modified when using 'mac68k' alignment or when
// we have an externally-supplied layout that also provides overall alignment.
if (IsMac68kAlign || (UseExternalLayout && !InferAlignment))
return;
if (NewAlignment > Alignment) {
assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) &&
"Alignment not a power of 2");
Alignment = NewAlignment;
}
if (UnpackedNewAlignment > UnpackedAlignment) {
assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) &&
"Alignment not a power of 2");
UnpackedAlignment = UnpackedNewAlignment;
}
}
uint64_t
RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
uint64_t ComputedOffset) {
uint64_t ExternalFieldOffset = External.getExternalFieldOffset(Field);
if (InferAlignment && ExternalFieldOffset < ComputedOffset) {
// The externally-supplied field offset is before the field offset we
// computed. Assume that the structure is packed.
Alignment = CharUnits::One();
InferAlignment = false;
}
// Use the externally-supplied field offset.
return ExternalFieldOffset;
}
/// \brief Get diagnostic %select index for tag kind for
/// field padding diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
/// \returns diagnostic %select index.
static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
case TTK_Struct: return 0;
case TTK_Interface: return 1;
case TTK_Class: return 2;
default: llvm_unreachable("Invalid tag kind for field padding diagnostic!");
}
}
void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
uint64_t UnpaddedOffset,
uint64_t UnpackedOffset,
unsigned UnpackedAlign,
bool isPacked,
const FieldDecl *D) {
// We let objc ivars without warning, objc interfaces generally are not used
// for padding tricks.
if (isa<ObjCIvarDecl>(D))
return;
// Don't warn about structs created without a SourceLocation. This can
// be done by clients of the AST, such as codegen.
if (D->getLocation().isInvalid())
return;
unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
// Warn if padding was introduced to the struct/class.
if (!IsUnion && Offset > UnpaddedOffset) {
unsigned PadSize = Offset - UnpaddedOffset;
bool InBits = true;
if (PadSize % CharBitNum == 0) {
PadSize = PadSize / CharBitNum;
InBits = false;
}
if (D->getIdentifier())
Diag(D->getLocation(), diag::warn_padded_struct_field)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
<< Context.getTypeDeclType(D->getParent())
<< PadSize
<< (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1) // plural or not
<< D->getIdentifier();
else
Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
<< Context.getTypeDeclType(D->getParent())
<< PadSize
<< (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
}
// Warn if we packed it unnecessarily. If the alignment is 1 byte don't
// bother since there won't be alignment issues.
if (isPacked && UnpackedAlign > CharBitNum && Offset == UnpackedOffset)
Diag(D->getLocation(), diag::warn_unnecessary_packed)
<< D->getIdentifier();
}
static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
const CXXRecordDecl *RD) {
// If a class isn't polymorphic it doesn't have a key function.
if (!RD->isPolymorphic())
return nullptr;
// A class that is not externally visible doesn't have a key function. (Or
// at least, there's no point to assigning a key function to such a class;
// this doesn't affect the ABI.)
if (!RD->isExternallyVisible())
return nullptr;
// Template instantiations don't have key functions per Itanium C++ ABI 5.2.6.
// Same behavior as GCC.
TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
if (TSK == TSK_ImplicitInstantiation ||
TSK == TSK_ExplicitInstantiationDeclaration ||
TSK == TSK_ExplicitInstantiationDefinition)
return nullptr;
bool allowInlineFunctions =
Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline();
for (const CXXMethodDecl *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
if (MD->isPure())
continue;
// Ignore implicit member functions, they are always marked as inline, but
// they don't have a body until they're defined.
if (MD->isImplicit())
continue;
if (MD->isInlineSpecified())
continue;
if (MD->hasInlineBody())
continue;
// Ignore inline deleted or defaulted functions.
if (!MD->isUserProvided())
continue;
// In certain ABIs, ignore functions with out-of-line inline definitions.
if (!allowInlineFunctions) {
const FunctionDecl *Def;
if (MD->hasBody(Def) && Def->isInlineSpecified())
continue;
}
// If the key function is dllimport but the class isn't, then the class has
// no key function. The DLL that exports the key function won't export the
// vtable in this case.
if (MD->hasAttr<DLLImportAttr>() && !RD->hasAttr<DLLImportAttr>())
return nullptr;
// We found it.
return MD;
}
return nullptr;
}
DiagnosticBuilder
RecordLayoutBuilder::Diag(SourceLocation Loc, unsigned DiagID) {
return Context.getDiagnostics().Report(Loc, DiagID);
}
/// Does the target C++ ABI require us to skip over the tail-padding
/// of the given class (considering it as a base class) when allocating
/// objects?
static bool mustSkipTailPadding(TargetCXXABI ABI, const CXXRecordDecl *RD) {
switch (ABI.getTailPaddingUseRules()) {
case TargetCXXABI::AlwaysUseTailPadding:
return false;
case TargetCXXABI::UseTailPaddingUnlessPOD03:
// FIXME: To the extent that this is meant to cover the Itanium ABI
// rules, we should implement the restrictions about over-sized
// bitfields:
//
// http://mentorembedded.github.com/cxx-abi/abi.html#POD :
// In general, a type is considered a POD for the purposes of
// layout if it is a POD type (in the sense of ISO C++
// [basic.types]). However, a POD-struct or POD-union (in the
// sense of ISO C++ [class]) with a bitfield member whose
// declared width is wider than the declared type of the
// bitfield is not a POD for the purpose of layout. Similarly,
// an array type is not a POD for the purpose of layout if the
// element type of the array is not a POD for the purpose of
// layout.
//
// Where references to the ISO C++ are made in this paragraph,
// the Technical Corrigendum 1 version of the standard is
// intended.
return RD->isPOD();
case TargetCXXABI::UseTailPaddingUnlessPOD11:
// This is equivalent to RD->getTypeForDecl().isCXX11PODType(),
// but with a lot of abstraction penalty stripped off. This does
// assume that these properties are set correctly even in C++98
// mode; fortunately, that is true because we want to assign
// consistently semantics to the type-traits intrinsics (or at
// least as many of them as possible).
return RD->isTrivial() && RD->isStandardLayout();
}
llvm_unreachable("bad tail-padding use kind");
}
static bool isMsLayout(const RecordDecl* D) {
return D->getASTContext().getTargetInfo().getCXXABI().isMicrosoft();
}
// This section contains an implementation of struct layout that is, up to the
// included tests, compatible with cl.exe (2013). The layout produced is
// significantly different than those produced by the Itanium ABI. Here we note
// the most important differences.
//
// * The alignment of bitfields in unions is ignored when computing the
// alignment of the union.
// * The existence of zero-width bitfield that occurs after anything other than
// a non-zero length bitfield is ignored.
// * There is no explicit primary base for the purposes of layout. All bases
// with vfptrs are laid out first, followed by all bases without vfptrs.
// * The Itanium equivalent vtable pointers are split into a vfptr (virtual
// function pointer) and a vbptr (virtual base pointer). They can each be
// shared with a, non-virtual bases. These bases need not be the same. vfptrs
// always occur at offset 0. vbptrs can occur at an arbitrary offset and are
// placed after the lexiographically last non-virtual base. This placement
// is always before fields but can be in the middle of the non-virtual bases
// due to the two-pass layout scheme for non-virtual-bases.
// * Virtual bases sometimes require a 'vtordisp' field that is laid out before
// the virtual base and is used in conjunction with virtual overrides during
// construction and destruction. This is always a 4 byte value and is used as
// an alternative to constructor vtables.
// * vtordisps are allocated in a block of memory with size and alignment equal
// to the alignment of the completed structure (before applying __declspec(
// align())). The vtordisp always occur at the end of the allocation block,
// immediately prior to the virtual base.
// * vfptrs are injected after all bases and fields have been laid out. In
// order to guarantee proper alignment of all fields, the vfptr injection
// pushes all bases and fields back by the alignment imposed by those bases
// and fields. This can potentially add a significant amount of padding.
// vfptrs are always injected at offset 0.
// * vbptrs are injected after all bases and fields have been laid out. In
// order to guarantee proper alignment of all fields, the vfptr injection
// pushes all bases and fields back by the alignment imposed by those bases
// and fields. This can potentially add a significant amount of padding.
// vbptrs are injected immediately after the last non-virtual base as
// lexiographically ordered in the code. If this site isn't pointer aligned
// the vbptr is placed at the next properly aligned location. Enough padding
// is added to guarantee a fit.
// * The last zero sized non-virtual base can be placed at the end of the
// struct (potentially aliasing another object), or may alias with the first
// field, even if they are of the same type.
// * The last zero size virtual base may be placed at the end of the struct
// potentially aliasing another object.
// * The ABI attempts to avoid aliasing of zero sized bases by adding padding
// between bases or vbases with specific properties. The criteria for
// additional padding between two bases is that the first base is zero sized
// or ends with a zero sized subobject and the second base is zero sized or
// trails with a zero sized base or field (sharing of vfptrs can reorder the
// layout of the so the leading base is not always the first one declared).
// This rule does take into account fields that are not records, so padding
// will occur even if the last field is, e.g. an int. The padding added for
// bases is 1 byte. The padding added between vbases depends on the alignment
// of the object but is at least 4 bytes (in both 32 and 64 bit modes).
// * There is no concept of non-virtual alignment, non-virtual alignment and
// alignment are always identical.
// * There is a distinction between alignment and required alignment.
// __declspec(align) changes the required alignment of a struct. This
// alignment is _always_ obeyed, even in the presence of #pragma pack. A
// record inherits required alignment from all of its fields and bases.
// * __declspec(align) on bitfields has the effect of changing the bitfield's
// alignment instead of its required alignment. This is the only known way
// to make the alignment of a struct bigger than 8. Interestingly enough
// this alignment is also immune to the effects of #pragma pack and can be
// used to create structures with large alignment under #pragma pack.
// However, because it does not impact required alignment, such a structure,
// when used as a field or base, will not be aligned if #pragma pack is
// still active at the time of use.
//
// Known incompatibilities:
// * all: #pragma pack between fields in a record
// * 2010 and back: If the last field in a record is a bitfield, every object
// laid out after the record will have extra padding inserted before it. The
// extra padding will have size equal to the size of the storage class of the
// bitfield. 0 sized bitfields don't exhibit this behavior and the extra
// padding can be avoided by adding a 0 sized bitfield after the non-zero-
// sized bitfield.
// * 2012 and back: In 64-bit mode, if the alignment of a record is 16 or
// greater due to __declspec(align()) then a second layout phase occurs after
// The locations of the vf and vb pointers are known. This layout phase
// suffers from the "last field is a bitfield" bug in 2010 and results in
// _every_ field getting padding put in front of it, potentially including the
// vfptr, leaving the vfprt at a non-zero location which results in a fault if
// anything tries to read the vftbl. The second layout phase also treats
// bitfields as separate entities and gives them each storage rather than
// packing them. Additionally, because this phase appears to perform a
// (an unstable) sort on the members before laying them out and because merged
// bitfields have the same address, the bitfields end up in whatever order
// the sort left them in, a behavior we could never hope to replicate.
namespace {
struct MicrosoftRecordLayoutBuilder {
struct ElementInfo {
CharUnits Size;
CharUnits Alignment;
};
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {}
private:
MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) = delete;
void operator=(const MicrosoftRecordLayoutBuilder &) = delete;
public:
void layout(const RecordDecl *RD);
void cxxLayout(const CXXRecordDecl *RD);
/// \brief Initializes size and alignment and honors some flags.
void initializeLayout(const RecordDecl *RD);
/// \brief Initialized C++ layout, compute alignment and virtual alignment and
/// existence of vfptrs and vbptrs. Alignment is needed before the vfptr is
/// laid out.
void initializeCXXLayout(const CXXRecordDecl *RD);
void layoutNonVirtualBases(const CXXRecordDecl *RD);
void layoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
const ASTRecordLayout &BaseLayout,
const ASTRecordLayout *&PreviousBaseLayout);
void injectVFPtr(const CXXRecordDecl *RD);
void injectVBPtr(const CXXRecordDecl *RD);
/// \brief Lays out the fields of the record. Also rounds size up to
/// alignment.
void layoutFields(const RecordDecl *RD);
void layoutField(const FieldDecl *FD);
void layoutBitField(const FieldDecl *FD);
/// \brief Lays out a single zero-width bit-field in the record and handles
/// special cases associated with zero-width bit-fields.
void layoutZeroWidthBitField(const FieldDecl *FD);
void layoutVirtualBases(const CXXRecordDecl *RD);
void finalizeLayout(const RecordDecl *RD);
/// \brief Gets the size and alignment of a base taking pragma pack and
/// __declspec(align) into account.
ElementInfo getAdjustedElementInfo(const ASTRecordLayout &Layout);
/// \brief Gets the size and alignment of a field taking pragma pack and
/// __declspec(align) into account. It also updates RequiredAlignment as a
/// side effect because it is most convenient to do so here.
ElementInfo getAdjustedElementInfo(const FieldDecl *FD);
/// \brief Places a field at an offset in CharUnits.
void placeFieldAtOffset(CharUnits FieldOffset) {
FieldOffsets.push_back(Context.toBits(FieldOffset));
}
/// \brief Places a bitfield at a bit offset.
void placeFieldAtBitOffset(uint64_t FieldOffset) {
FieldOffsets.push_back(FieldOffset);
}
/// \brief Compute the set of virtual bases for which vtordisps are required.
void computeVtorDispSet(
llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtorDispSet,
const CXXRecordDecl *RD) const;
const ASTContext &Context;
/// \brief The size of the record being laid out.
CharUnits Size;
/// \brief The non-virtual size of the record layout.
CharUnits NonVirtualSize;
/// \brief The data size of the record layout.
CharUnits DataSize;
/// \brief The current alignment of the record layout.
CharUnits Alignment;
/// \brief The maximum allowed field alignment. This is set by #pragma pack.
CharUnits MaxFieldAlignment;
/// \brief The alignment that this record must obey. This is imposed by
/// __declspec(align()) on the record itself or one of its fields or bases.
CharUnits RequiredAlignment;
/// \brief The size of the allocation of the currently active bitfield.
/// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield
/// is true.
CharUnits CurrentBitfieldSize;
/// \brief Offset to the virtual base table pointer (if one exists).
CharUnits VBPtrOffset;
/// \brief Minimum record size possible.
CharUnits MinEmptyStructSize;
/// \brief The size and alignment info of a pointer.
ElementInfo PointerInfo;
/// \brief The primary base class (if one exists).
const CXXRecordDecl *PrimaryBase;
/// \brief The class we share our vb-pointer with.
const CXXRecordDecl *SharedVBPtrBase;
/// \brief The collection of field offsets.
SmallVector<uint64_t, 16> FieldOffsets;
/// \brief Base classes and their offsets in the record.
BaseOffsetsMapTy Bases;
/// \brief virtual base classes and their offsets in the record.
ASTRecordLayout::VBaseOffsetsMapTy VBases;
/// \brief The number of remaining bits in our last bitfield allocation.
/// This value isn't meaningful unless LastFieldIsNonZeroWidthBitfield is
/// true.
unsigned RemainingBitsInField;
bool IsUnion : 1;
/// \brief True if the last field laid out was a bitfield and was not 0
/// width.
bool LastFieldIsNonZeroWidthBitfield : 1;
/// \brief True if the class has its own vftable pointer.
bool HasOwnVFPtr : 1;
/// \brief True if the class has a vbtable pointer.
bool HasVBPtr : 1;
/// \brief True if the last sub-object within the type is zero sized or the
/// object itself is zero sized. This *does not* count members that are not
/// records. Only used for MS-ABI.
bool EndsWithZeroSizedObject : 1;
/// \brief True if this class is zero sized or first base is zero sized or
/// has this property. Only used for MS-ABI.
bool LeadsWithZeroSizedBase : 1;
/// \brief True if the external AST source provided a layout for this record.
bool UseExternalLayout : 1;
/// \brief The layout provided by the external AST source. Only active if
/// UseExternalLayout is true.
ExternalLayout External;
};
} // namespace
MicrosoftRecordLayoutBuilder::ElementInfo
MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
const ASTRecordLayout &Layout) {
ElementInfo Info;
Info.Alignment = Layout.getAlignment();
// Respect pragma pack.
if (!MaxFieldAlignment.isZero())
Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
// Track zero-sized subobjects here where it's already available.
EndsWithZeroSizedObject = Layout.hasZeroSizedSubObject();
// Respect required alignment, this is necessary because we may have adjusted
// the alignment in the case of pragam pack. Note that the required alignment
// doesn't actually apply to the struct alignment at this point.
Alignment = std::max(Alignment, Info.Alignment);
RequiredAlignment = std::max(RequiredAlignment, Layout.getRequiredAlignment());
Info.Alignment = std::max(Info.Alignment, Layout.getRequiredAlignment());
Info.Size = Layout.getNonVirtualSize();
return Info;
}
MicrosoftRecordLayoutBuilder::ElementInfo
MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
const FieldDecl *FD) {
// Get the alignment of the field type's natural alignment, ignore any
// alignment attributes.
ElementInfo Info;
std::tie(Info.Size, Info.Alignment) =
Context.getTypeInfoInChars(FD->getType()->getUnqualifiedDesugaredType());
// Respect align attributes on the field.
CharUnits FieldRequiredAlignment =
Context.toCharUnitsFromBits(FD->getMaxAlignment());
// Respect align attributes on the type.
if (Context.isAlignmentRequired(FD->getType()))
FieldRequiredAlignment = std::max(
Context.getTypeAlignInChars(FD->getType()), FieldRequiredAlignment);
// Respect attributes applied to subobjects of the field.
if (FD->isBitField())
// For some reason __declspec align impacts alignment rather than required
// alignment when it is applied to bitfields.
Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
else {
if (auto RT =
FD->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
auto const &Layout = Context.getASTRecordLayout(RT->getDecl());
EndsWithZeroSizedObject = Layout.hasZeroSizedSubObject();
FieldRequiredAlignment = std::max(FieldRequiredAlignment,
Layout.getRequiredAlignment());
}
// Capture required alignment as a side-effect.
RequiredAlignment = std::max(RequiredAlignment, FieldRequiredAlignment);
}
// Respect pragma pack, attribute pack and declspec align
if (!MaxFieldAlignment.isZero())
Info.Alignment = std::min(Info.Alignment, MaxFieldAlignment);
if (FD->hasAttr<PackedAttr>())
Info.Alignment = CharUnits::One();
Info.Alignment = std::max(Info.Alignment, FieldRequiredAlignment);
return Info;
}
void MicrosoftRecordLayoutBuilder::layout(const RecordDecl *RD) {
// For C record layout, zero-sized records always have size 4.
MinEmptyStructSize = CharUnits::fromQuantity(4);
initializeLayout(RD);
layoutFields(RD);
DataSize = Size = Size.RoundUpToAlignment(Alignment);
RequiredAlignment = std::max(
RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
finalizeLayout(RD);
}
void MicrosoftRecordLayoutBuilder::cxxLayout(const CXXRecordDecl *RD) {
// HLSL Change Begins
if (Context.getLangOpts().HLSL) {
MinEmptyStructSize = CharUnits::fromQuantity(0);
}
else { // HLSL Change Ends
// The C++ standard says that empty structs have size 1.
MinEmptyStructSize = CharUnits::One();
} // HLSL Change
initializeLayout(RD);
initializeCXXLayout(RD);
layoutNonVirtualBases(RD);
layoutFields(RD);
injectVBPtr(RD);
injectVFPtr(RD);
if (HasOwnVFPtr || (HasVBPtr && !SharedVBPtrBase))
Alignment = std::max(Alignment, PointerInfo.Alignment);
auto RoundingAlignment = Alignment;
if (!MaxFieldAlignment.isZero())
RoundingAlignment = std::min(RoundingAlignment, MaxFieldAlignment);
NonVirtualSize = Size = Size.RoundUpToAlignment(RoundingAlignment);
RequiredAlignment = std::max(
RequiredAlignment, Context.toCharUnitsFromBits(RD->getMaxAlignment()));
layoutVirtualBases(RD);
finalizeLayout(RD);
}
void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
IsUnion = RD->isUnion();
Size = CharUnits::Zero();
Alignment = CharUnits::One();
// In 64-bit mode we always perform an alignment step after laying out vbases.
// In 32-bit mode we do not. The check to see if we need to perform alignment
// checks the RequiredAlignment field and performs alignment if it isn't 0.
RequiredAlignment = Context.getTargetInfo().getTriple().isArch64Bit()
? CharUnits::One()
: CharUnits::Zero();
// Compute the maximum field alignment.
MaxFieldAlignment = CharUnits::Zero();
// Honor the default struct packing maximum alignment flag.
if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct)
MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
// Honor the packing attribute. The MS-ABI ignores pragma pack if its larger
// than the pointer size.
if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>()){
unsigned PackedAlignment = MFAA->getAlignment();
if (PackedAlignment <= Context.getTargetInfo().getPointerWidth(0))
MaxFieldAlignment = Context.toCharUnitsFromBits(PackedAlignment);
}
// Packed attribute forces max field alignment to be 1.
if (RD->hasAttr<PackedAttr>())
MaxFieldAlignment = CharUnits::One();
// Try to respect the external layout if present.
UseExternalLayout = false;
if (ExternalASTSource *Source = Context.getExternalSource())
UseExternalLayout = Source->layoutRecordType(
RD, External.Size, External.Align, External.FieldOffsets,
External.BaseOffsets, External.VirtualBaseOffsets);
}
void
MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) {
EndsWithZeroSizedObject = false;
LeadsWithZeroSizedBase = false;
HasOwnVFPtr = false;
HasVBPtr = false;
PrimaryBase = nullptr;
SharedVBPtrBase = nullptr;
// Calculate pointer size and alignment. These are used for vfptr and vbprt
// injection.
PointerInfo.Size =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
PointerInfo.Alignment =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
// Respect pragma pack.
if (!MaxFieldAlignment.isZero())
PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment);
}
void
MicrosoftRecordLayoutBuilder::layoutNonVirtualBases(const CXXRecordDecl *RD) {
// The MS-ABI lays out all bases that contain leading vfptrs before it lays
// out any bases that do not contain vfptrs. We implement this as two passes
// over the bases. This approach guarantees that the primary base is laid out
// first. We use these passes to calculate some additional aggregated
// information about the bases, such as reqruied alignment and the presence of
// zero sized members.
const ASTRecordLayout *PreviousBaseLayout = nullptr;
// Iterate through the bases and lay out the non-virtual ones.
for (const CXXBaseSpecifier &Base : RD->bases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
// Mark and skip virtual bases.
if (Base.isVirtual()) {
HasVBPtr = true;
continue;
}
// Check fo a base to share a VBPtr with.
if (!SharedVBPtrBase && BaseLayout.hasVBPtr()) {
SharedVBPtrBase = BaseDecl;
HasVBPtr = true;
}
// Only lay out bases with extendable VFPtrs on the first pass.
if (!BaseLayout.hasExtendableVFPtr())
continue;
// If we don't have a primary base, this one qualifies.
if (!PrimaryBase) {
PrimaryBase = BaseDecl;
LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
}
// Lay out the base.
layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
}
// Figure out if we need a fresh VFPtr for this class.
if (!PrimaryBase && RD->isDynamicClass())
for (CXXRecordDecl::method_iterator i = RD->method_begin(),
e = RD->method_end();
!HasOwnVFPtr && i != e; ++i)
HasOwnVFPtr = i->isVirtual() && i->size_overridden_methods() == 0;
// If we don't have a primary base then we have a leading object that could
// itself lead with a zero-sized object, something we track.
bool CheckLeadingLayout = !PrimaryBase;
// Iterate through the bases and lay out the non-virtual ones.
for (const CXXBaseSpecifier &Base : RD->bases()) {
if (Base.isVirtual())
continue;
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
// Only lay out bases without extendable VFPtrs on the second pass.
if (BaseLayout.hasExtendableVFPtr()) {
VBPtrOffset = Bases[BaseDecl] + BaseLayout.getNonVirtualSize();
continue;
}
// If this is the first layout, check to see if it leads with a zero sized
// object. If it does, so do we.
if (CheckLeadingLayout) {
CheckLeadingLayout = false;
LeadsWithZeroSizedBase = BaseLayout.leadsWithZeroSizedBase();
}
// Lay out the base.
layoutNonVirtualBase(BaseDecl, BaseLayout, PreviousBaseLayout);
VBPtrOffset = Bases[BaseDecl] + BaseLayout.getNonVirtualSize();
}
// Set our VBPtroffset if we know it at this point.
if (!HasVBPtr)
VBPtrOffset = CharUnits::fromQuantity(-1);
else if (SharedVBPtrBase) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(SharedVBPtrBase);
VBPtrOffset = Bases[SharedVBPtrBase] + Layout.getVBPtrOffset();
}
}
void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
const CXXRecordDecl *BaseDecl,
const ASTRecordLayout &BaseLayout,
const ASTRecordLayout *&PreviousBaseLayout) {
// Insert padding between two bases if the left first one is zero sized or
// contains a zero sized subobject and the right is zero sized or one leads
// with a zero sized base.
if (PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
BaseLayout.leadsWithZeroSizedBase())
Size++;
ElementInfo Info = getAdjustedElementInfo(BaseLayout);
CharUnits BaseOffset;
// Respect the external AST source base offset, if present.
bool FoundBase = false;
if (UseExternalLayout) {
FoundBase = External.getExternalNVBaseOffset(BaseDecl, BaseOffset);
if (FoundBase)
assert(BaseOffset >= Size && "base offset already allocated");
}
if (!FoundBase)
BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
Bases.insert(std::make_pair(BaseDecl, BaseOffset));
Size = BaseOffset + BaseLayout.getNonVirtualSize();
PreviousBaseLayout = &BaseLayout;
}
void MicrosoftRecordLayoutBuilder::layoutFields(const RecordDecl *RD) {
LastFieldIsNonZeroWidthBitfield = false;
for (const FieldDecl *Field : RD->fields())
layoutField(Field);
}
void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) {
if (FD->isBitField()) {
layoutBitField(FD);
return;
}
LastFieldIsNonZeroWidthBitfield = false;
ElementInfo Info = getAdjustedElementInfo(FD);
Alignment = std::max(Alignment, Info.Alignment);
if (IsUnion) {
placeFieldAtOffset(CharUnits::Zero());
Size = std::max(Size, Info.Size);
} else {
CharUnits FieldOffset;
if (UseExternalLayout) {
FieldOffset =
Context.toCharUnitsFromBits(External.getExternalFieldOffset(FD));
assert(FieldOffset >= Size && "field offset already allocated");
} else {
FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
}
placeFieldAtOffset(FieldOffset);
Size = FieldOffset + Info.Size;
}
}
void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) {
unsigned Width = FD->getBitWidthValue(Context);
if (Width == 0) {
layoutZeroWidthBitField(FD);
return;
}
ElementInfo Info = getAdjustedElementInfo(FD);
// Clamp the bitfield to a containable size for the sake of being able
// to lay them out. Sema will throw an error.
if (Width > Context.toBits(Info.Size))
Width = Context.toBits(Info.Size);
// Check to see if this bitfield fits into an existing allocation. Note:
// MSVC refuses to pack bitfields of formal types with different sizes
// into the same allocation.
if (!IsUnion && LastFieldIsNonZeroWidthBitfield &&
CurrentBitfieldSize == Info.Size && Width <= RemainingBitsInField) {
placeFieldAtBitOffset(Context.toBits(Size) - RemainingBitsInField);
RemainingBitsInField -= Width;
return;
}
LastFieldIsNonZeroWidthBitfield = true;
CurrentBitfieldSize = Info.Size;
if (IsUnion) {
placeFieldAtOffset(CharUnits::Zero());
Size = std::max(Size, Info.Size);
// TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
} else {
// Allocate a new block of memory and place the bitfield in it.
CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
placeFieldAtOffset(FieldOffset);
Size = FieldOffset + Info.Size;
Alignment = std::max(Alignment, Info.Alignment);
RemainingBitsInField = Context.toBits(Info.Size) - Width;
}
}
void
MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) {
// Zero-width bitfields are ignored unless they follow a non-zero-width
// bitfield.
if (!LastFieldIsNonZeroWidthBitfield) {
placeFieldAtOffset(IsUnion ? CharUnits::Zero() : Size);
// TODO: Add a Sema warning that MS ignores alignment for zero
// sized bitfields that occur after zero-size bitfields or non-bitfields.
return;
}
LastFieldIsNonZeroWidthBitfield = false;
ElementInfo Info = getAdjustedElementInfo(FD);
if (IsUnion) {
placeFieldAtOffset(CharUnits::Zero());
Size = std::max(Size, Info.Size);
// TODO: Add a Sema warning that MS ignores bitfield alignment in unions.
} else {
// Round up the current record size to the field's alignment boundary.
CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment);
placeFieldAtOffset(FieldOffset);
Size = FieldOffset;
Alignment = std::max(Alignment, Info.Alignment);
}
}
void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) {
if (!HasVBPtr || SharedVBPtrBase)
return;
// Inject the VBPointer at the injection site.
CharUnits InjectionSite = VBPtrOffset;
// But before we do, make sure it's properly aligned.
VBPtrOffset = VBPtrOffset.RoundUpToAlignment(PointerInfo.Alignment);
// Shift everything after the vbptr down, unless we're using an external
// layout.
if (UseExternalLayout)
return;
// Determine where the first field should be laid out after the vbptr.
CharUnits FieldStart = VBPtrOffset + PointerInfo.Size;
// Make sure that the amount we push the fields back by is a multiple of the
// alignment.
CharUnits Offset = (FieldStart - InjectionSite).RoundUpToAlignment(
std::max(RequiredAlignment, Alignment));
Size += Offset;
for (uint64_t &FieldOffset : FieldOffsets)
FieldOffset += Context.toBits(Offset);
for (BaseOffsetsMapTy::value_type &Base : Bases)
if (Base.second >= InjectionSite)
Base.second += Offset;
}
void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) {
if (!HasOwnVFPtr)
return;
// Make sure that the amount we push the struct back by is a multiple of the
// alignment.
CharUnits Offset = PointerInfo.Size.RoundUpToAlignment(
std::max(RequiredAlignment, Alignment));
// Increase the size of the object and push back all fields, the vbptr and all
// bases by the offset amount.
Size += Offset;
for (uint64_t &FieldOffset : FieldOffsets)
FieldOffset += Context.toBits(Offset);
if (HasVBPtr)
VBPtrOffset += Offset;
for (BaseOffsetsMapTy::value_type &Base : Bases)
Base.second += Offset;
}
void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
if (!HasVBPtr)
return;
// Vtordisps are always 4 bytes (even in 64-bit mode)
CharUnits VtorDispSize = CharUnits::fromQuantity(4);
CharUnits VtorDispAlignment = VtorDispSize;
// vtordisps respect pragma pack.
if (!MaxFieldAlignment.isZero())
VtorDispAlignment = std::min(VtorDispAlignment, MaxFieldAlignment);
// The alignment of the vtordisp is at least the required alignment of the
// entire record. This requirement may be present to support vtordisp
// injection.
for (const CXXBaseSpecifier &VBase : RD->vbases()) {
const CXXRecordDecl *BaseDecl = VBase.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
RequiredAlignment =
std::max(RequiredAlignment, BaseLayout.getRequiredAlignment());
}
VtorDispAlignment = std::max(VtorDispAlignment, RequiredAlignment);
// Compute the vtordisp set.
llvm::SmallPtrSet<const CXXRecordDecl *, 2> HasVtorDispSet;
computeVtorDispSet(HasVtorDispSet, RD);
// Iterate through the virtual bases and lay them out.
const ASTRecordLayout *PreviousBaseLayout = nullptr;
for (const CXXBaseSpecifier &VBase : RD->vbases()) {
const CXXRecordDecl *BaseDecl = VBase.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
bool HasVtordisp = HasVtorDispSet.count(BaseDecl) > 0;
// Insert padding between two bases if the left first one is zero sized or
// contains a zero sized subobject and the right is zero sized or one leads
// with a zero sized base. The padding between virtual bases is 4
// bytes (in both 32 and 64 bits modes) and always involves rounding up to
// the required alignment, we don't know why.
if ((PreviousBaseLayout && PreviousBaseLayout->hasZeroSizedSubObject() &&
BaseLayout.leadsWithZeroSizedBase()) || HasVtordisp) {
Size = Size.RoundUpToAlignment(VtorDispAlignment) + VtorDispSize;
Alignment = std::max(VtorDispAlignment, Alignment);
}
// Insert the virtual base.
ElementInfo Info = getAdjustedElementInfo(BaseLayout);
CharUnits BaseOffset;
// Respect the external AST source base offset, if present.
bool FoundBase = false;
if (UseExternalLayout) {
FoundBase = External.getExternalVBaseOffset(BaseDecl, BaseOffset);
if (FoundBase)
assert(BaseOffset >= Size && "base offset already allocated");
}
if (!FoundBase)
BaseOffset = Size.RoundUpToAlignment(Info.Alignment);
VBases.insert(std::make_pair(BaseDecl,
ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp)));
Size = BaseOffset + BaseLayout.getNonVirtualSize();
PreviousBaseLayout = &BaseLayout;
}
}
void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) {
// Respect required alignment. Note that in 32-bit mode Required alignment
// may be 0 and cause size not to be updated.
DataSize = Size;
if (!RequiredAlignment.isZero()) {
Alignment = std::max(Alignment, RequiredAlignment);
auto RoundingAlignment = Alignment;
if (!MaxFieldAlignment.isZero())
RoundingAlignment = std::min(RoundingAlignment, MaxFieldAlignment);
RoundingAlignment = std::max(RoundingAlignment, RequiredAlignment);
Size = Size.RoundUpToAlignment(RoundingAlignment);
}
if (Size.isZero()) {
EndsWithZeroSizedObject = true;
LeadsWithZeroSizedBase = true;
if (!Context.getLangOpts().HLSL) { // HLSL Change - allow empty structs to be zero sized
// Zero-sized structures have size equal to their alignment if a
// __declspec(align) came into play.
if (RequiredAlignment >= MinEmptyStructSize)
Size = Alignment;
else
Size = MinEmptyStructSize;
} // HLSL Change
}
if (UseExternalLayout) {
Size = Context.toCharUnitsFromBits(External.Size);
if (External.Align)
Alignment = Context.toCharUnitsFromBits(External.Align);
}
}
// Recursively walks the non-virtual bases of a class and determines if any of
// them are in the bases with overridden methods set.
static bool
RequiresVtordisp(const llvm::SmallPtrSetImpl<const CXXRecordDecl *> &
BasesWithOverriddenMethods,
const CXXRecordDecl *RD) {
if (BasesWithOverriddenMethods.count(RD))
return true;
// If any of a virtual bases non-virtual bases (recursively) requires a
// vtordisp than so does this virtual base.
for (const CXXBaseSpecifier &Base : RD->bases())
if (!Base.isVirtual() &&
RequiresVtordisp(BasesWithOverriddenMethods,
Base.getType()->getAsCXXRecordDecl()))
return true;
return false;
}
void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtordispSet,
const CXXRecordDecl *RD) const {
// /vd2 or #pragma vtordisp(2): Always use vtordisps for virtual bases with
// vftables.
if (RD->getMSVtorDispMode() == MSVtorDispAttr::ForVFTable) {
for (const CXXBaseSpecifier &Base : RD->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
if (Layout.hasExtendableVFPtr())
HasVtordispSet.insert(BaseDecl);
}
return;
}
// If any of our bases need a vtordisp for this type, so do we. Check our
// direct bases for vtordisp requirements.
for (const CXXBaseSpecifier &Base : RD->bases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
for (const auto &bi : Layout.getVBaseOffsetsMap())
if (bi.second.hasVtorDisp())
HasVtordispSet.insert(bi.first);
}
// We don't introduce any additional vtordisps if either:
// * A user declared constructor or destructor aren't declared.
// * #pragma vtordisp(0) or the /vd0 flag are in use.
if ((!RD->hasUserDeclaredConstructor() && !RD->hasUserDeclaredDestructor()) ||
RD->getMSVtorDispMode() == MSVtorDispAttr::Never)
return;
// /vd1 or #pragma vtordisp(1): Try to guess based on whether we think it's
// possible for a partially constructed object with virtual base overrides to
// escape a non-trivial constructor.
assert(RD->getMSVtorDispMode() == MSVtorDispAttr::ForVBaseOverride);
// Compute a set of base classes which define methods we override. A virtual
// base in this set will require a vtordisp. A virtual base that transitively
// contains one of these bases as a non-virtual base will also require a
// vtordisp.
llvm::SmallPtrSet<const CXXMethodDecl *, 8> Work;
llvm::SmallPtrSet<const CXXRecordDecl *, 2> BasesWithOverriddenMethods;
// Seed the working set with our non-destructor, non-pure virtual methods.
for (const CXXMethodDecl *MD : RD->methods())
if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD) && !MD->isPure())
Work.insert(MD);
while (!Work.empty()) {
const CXXMethodDecl *MD = *Work.begin();
CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(),
e = MD->end_overridden_methods();
// If a virtual method has no-overrides it lives in its parent's vtable.
if (i == e)
BasesWithOverriddenMethods.insert(MD->getParent());
else
Work.insert(i, e);
// We've finished processing this element, remove it from the working set.
Work.erase(MD);
}
// For each of our virtual bases, check if it is in the set of overridden
// bases or if it transitively contains a non-virtual base that is.
for (const CXXBaseSpecifier &Base : RD->vbases()) {
const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
if (!HasVtordispSet.count(BaseDecl) &&
RequiresVtordisp(BasesWithOverriddenMethods, BaseDecl))
HasVtordispSet.insert(BaseDecl);
}
}
/// \brief Get or compute information about the layout of the specified record
/// (struct/union/class), which indicates its size and field position
/// information.
const ASTRecordLayout *
ASTContext::BuildMicrosoftASTRecordLayout(const RecordDecl *D) const {
MicrosoftRecordLayoutBuilder Builder(*this);
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
Builder.cxxLayout(RD);
return new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
Builder.HasOwnVFPtr,
Builder.HasOwnVFPtr || Builder.PrimaryBase,
Builder.VBPtrOffset, Builder.NonVirtualSize, Builder.FieldOffsets.data(),
Builder.FieldOffsets.size(), Builder.NonVirtualSize,
Builder.Alignment, CharUnits::Zero(), Builder.PrimaryBase,
false, Builder.SharedVBPtrBase,
Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase,
Builder.Bases, Builder.VBases);
} else {
Builder.layout(D);
return new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment, Builder.RequiredAlignment,
Builder.Size, Builder.FieldOffsets.data(), Builder.FieldOffsets.size());
}
}
/// getASTRecordLayout - Get or compute information about the layout of the
/// specified record (struct/union/class), which indicates its size and field
/// position information.
const ASTRecordLayout &
ASTContext::getASTRecordLayout(const RecordDecl *D) const {
// These asserts test different things. A record has a definition
// as soon as we begin to parse the definition. That definition is
// not a complete definition (which is what isDefinition() tests)
// until we *finish* parsing the definition.
if (D->hasExternalLexicalStorage() && !D->getDefinition())
getExternalSource()->CompleteType(const_cast<RecordDecl*>(D));
D = D->getDefinition();
assert(D && "Cannot get layout of forward declarations!");
assert(!D->isInvalidDecl() && "Cannot get layout of invalid decl!");
assert(D->isCompleteDefinition() && "Cannot layout type before complete!");
// Look up this layout, if already laid out, return what we have.
// Note that we can't save a reference to the entry because this function
// is recursive.
const ASTRecordLayout *Entry = ASTRecordLayouts[D];
if (Entry) return *Entry;
const ASTRecordLayout *NewEntry = nullptr;
if (isMsLayout(D)) {
NewEntry = BuildMicrosoftASTRecordLayout(D);
} else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
EmptySubobjectMap EmptySubobjects(*this, RD);
RecordLayoutBuilder Builder(*this, &EmptySubobjects);
Builder.Layout(RD);
// In certain situations, we are allowed to lay out objects in the
// tail-padding of base classes. This is ABI-dependent.
// FIXME: this should be stored in the record layout.
bool skipTailPadding =
mustSkipTailPadding(getTargetInfo().getCXXABI(), cast<CXXRecordDecl>(D));
// FIXME: This should be done in FinalizeLayout.
CharUnits DataSize =
skipTailPadding ? Builder.getSize() : Builder.getDataSize();
CharUnits NonVirtualSize =
skipTailPadding ? DataSize : Builder.NonVirtualSize;
NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
/*RequiredAlignment : used by MS-ABI)*/
Builder.Alignment,
Builder.HasOwnVFPtr,
RD->isDynamicClass(),
CharUnits::fromQuantity(-1),
DataSize,
Builder.FieldOffsets.data(),
Builder.FieldOffsets.size(),
NonVirtualSize,
Builder.NonVirtualAlignment,
EmptySubobjects.SizeOfLargestEmptySubobject,
Builder.PrimaryBase,
Builder.PrimaryBaseIsVirtual,
nullptr, false, false,
Builder.Bases, Builder.VBases);
} else {
RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
Builder.Layout(D);
NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
/*RequiredAlignment : used by MS-ABI)*/
Builder.Alignment,
Builder.getSize(),
Builder.FieldOffsets.data(),
Builder.FieldOffsets.size());
}
ASTRecordLayouts[D] = NewEntry;
if (getLangOpts().DumpRecordLayouts) {
llvm::outs() << "\n*** Dumping AST Record Layout\n";
DumpRecordLayout(D, llvm::outs(), getLangOpts().DumpRecordLayoutsSimple);
}
return *NewEntry;
}
const CXXMethodDecl *ASTContext::getCurrentKeyFunction(const CXXRecordDecl *RD) {
if (!getTargetInfo().getCXXABI().hasKeyFunctions())
return nullptr;
assert(RD->getDefinition() && "Cannot get key function for forward decl!");
RD = cast<CXXRecordDecl>(RD->getDefinition());
// Beware:
// 1) computing the key function might trigger deserialization, which might
// invalidate iterators into KeyFunctions
// 2) 'get' on the LazyDeclPtr might also trigger deserialization and
// invalidate the LazyDeclPtr within the map itself
LazyDeclPtr Entry = KeyFunctions[RD];
const Decl *Result =
Entry ? Entry.get(getExternalSource()) : computeKeyFunction(*this, RD);
// Store it back if it changed.
if (Entry.isOffset() || Entry.isValid() != bool(Result))
KeyFunctions[RD] = const_cast<Decl*>(Result);
return cast_or_null<CXXMethodDecl>(Result);
}
void ASTContext::setNonKeyFunction(const CXXMethodDecl *Method) {
assert(Method == Method->getFirstDecl() &&
"not working with method declaration from class definition");
// Look up the cache entry. Since we're working with the first
// declaration, its parent must be the class definition, which is
// the correct key for the KeyFunctions hash.
const auto &Map = KeyFunctions;
auto I = Map.find(Method->getParent());
// If it's not cached, there's nothing to do.
if (I == Map.end()) return;
// If it is cached, check whether it's the target method, and if so,
// remove it from the cache. Note, the call to 'get' might invalidate
// the iterator and the LazyDeclPtr object within the map.
LazyDeclPtr Ptr = I->second;
if (Ptr.get(getExternalSource()) == Method) {
// FIXME: remember that we did this for module / chained PCH state?
KeyFunctions.erase(Method->getParent());
}
}
static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD) {
const ASTRecordLayout &Layout = C.getASTRecordLayout(FD->getParent());
return Layout.getFieldOffset(FD->getFieldIndex());
}
uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const {
uint64_t OffsetInBits;
if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) {
OffsetInBits = ::getFieldOffset(*this, FD);
} else {
const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD);
OffsetInBits = 0;
for (const NamedDecl *ND : IFD->chain())
OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(ND));
}
return OffsetInBits;
}
/// getObjCLayout - Get or compute information about the layout of the
/// given interface.
///
/// \param Impl - If given, also include the layout of the interface's
/// implementation. This may differ by including synthesized ivars.
const ASTRecordLayout &
ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
const ObjCImplementationDecl *Impl) const {
// Retrieve the definition
if (D->hasExternalLexicalStorage() && !D->getDefinition())
getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D));
D = D->getDefinition();
assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
// Look up this layout, if already laid out, return what we have.
const ObjCContainerDecl *Key =
Impl ? (const ObjCContainerDecl*) Impl : (const ObjCContainerDecl*) D;
if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
return *Entry;
// Add in synthesized ivar count if laying out an implementation.
if (Impl) {
unsigned SynthCount = CountNonClassIvars(D);
// If there aren't any sythesized ivars then reuse the interface
// entry. Note we can't cache this because we simply free all
// entries later; however we shouldn't look up implementations
// frequently.
if (SynthCount == 0)
return getObjCLayout(D, nullptr);
}
RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
Builder.Layout(D);
const ASTRecordLayout *NewEntry =
new (*this) ASTRecordLayout(*this, Builder.getSize(),
Builder.Alignment,
/*RequiredAlignment : used by MS-ABI)*/
Builder.Alignment,
Builder.getDataSize(),
Builder.FieldOffsets.data(),
Builder.FieldOffsets.size());
ObjCLayouts[Key] = NewEntry;
return *NewEntry;
}
static void PrintOffset(raw_ostream &OS,
CharUnits Offset, unsigned IndentLevel) {
OS << llvm::format("%4" PRId64 " | ", (int64_t)Offset.getQuantity());
OS.indent(IndentLevel * 2);
}
static void PrintIndentNoOffset(raw_ostream &OS, unsigned IndentLevel) {
OS << " | ";
OS.indent(IndentLevel * 2);
}
static void DumpCXXRecordLayout(raw_ostream &OS,
const CXXRecordDecl *RD, const ASTContext &C,
CharUnits Offset,
unsigned IndentLevel,
const char* Description,
bool IncludeVirtualBases) {
const ASTRecordLayout &Layout = C.getASTRecordLayout(RD);
PrintOffset(OS, Offset, IndentLevel);
OS << C.getTypeDeclType(const_cast<CXXRecordDecl *>(RD)).getAsString();
if (Description)
OS << ' ' << Description;
if (RD->isEmpty())
OS << " (empty)";
OS << '\n';
IndentLevel++;
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
bool HasOwnVFPtr = Layout.hasOwnVFPtr();
bool HasOwnVBPtr = Layout.hasOwnVBPtr();
// Vtable pointer.
if (RD->isDynamicClass() && !PrimaryBase && !isMsLayout(RD)) {
PrintOffset(OS, Offset, IndentLevel);
OS << '(' << *RD << " vtable pointer)\n";
} else if (HasOwnVFPtr) {
PrintOffset(OS, Offset, IndentLevel);
// vfptr (for Microsoft C++ ABI)
OS << '(' << *RD << " vftable pointer)\n";
}
// Collect nvbases.
SmallVector<const CXXRecordDecl *, 4> Bases;
for (const CXXBaseSpecifier &Base : RD->bases()) {
assert(!Base.getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
if (!Base.isVirtual())
Bases.push_back(Base.getType()->getAsCXXRecordDecl());
}
// Sort nvbases by offset.
std::stable_sort(Bases.begin(), Bases.end(),
[&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
});
// Dump (non-virtual) bases
for (const CXXRecordDecl *Base : Bases) {
CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
Base == PrimaryBase ? "(primary base)" : "(base)",
/*IncludeVirtualBases=*/false);
}
// vbptr (for Microsoft C++ ABI)
if (HasOwnVBPtr) {
PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
OS << '(' << *RD << " vbtable pointer)\n";
}
// Dump fields.
uint64_t FieldNo = 0;
for (CXXRecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I, ++FieldNo) {
const FieldDecl &Field = **I;
CharUnits FieldOffset = Offset +
C.toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
if (const CXXRecordDecl *D = Field.getType()->getAsCXXRecordDecl()) {
DumpCXXRecordLayout(OS, D, C, FieldOffset, IndentLevel,
Field.getName().data(),
/*IncludeVirtualBases=*/true);
continue;
}
PrintOffset(OS, FieldOffset, IndentLevel);
OS << Field.getType().getAsString() << ' ' << Field << '\n';
}
if (!IncludeVirtualBases)
return;
// Dump virtual bases.
const ASTRecordLayout::VBaseOffsetsMapTy &vtordisps =
Layout.getVBaseOffsetsMap();
for (const CXXBaseSpecifier &Base : RD->vbases()) {
assert(Base.isVirtual() && "Found non-virtual class!");
const CXXRecordDecl *VBase = Base.getType()->getAsCXXRecordDecl();
CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
if (vtordisps.find(VBase)->second.hasVtorDisp()) {
PrintOffset(OS, VBaseOffset - CharUnits::fromQuantity(4), IndentLevel);
OS << "(vtordisp for vbase " << *VBase << ")\n";
}
DumpCXXRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
VBase == PrimaryBase ?
"(primary virtual base)" : "(virtual base)",
/*IncludeVirtualBases=*/false);
}
PrintIndentNoOffset(OS, IndentLevel - 1);
OS << "[sizeof=" << Layout.getSize().getQuantity();
if (!isMsLayout(RD))
OS << ", dsize=" << Layout.getDataSize().getQuantity();
OS << ", align=" << Layout.getAlignment().getQuantity() << '\n';
PrintIndentNoOffset(OS, IndentLevel - 1);
OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
OS << ", nvalign=" << Layout.getNonVirtualAlignment().getQuantity() << "]\n";
}
void ASTContext::DumpRecordLayout(const RecordDecl *RD,
raw_ostream &OS,
bool Simple) const {
const ASTRecordLayout &Info = getASTRecordLayout(RD);
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (!Simple)
return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, nullptr,
/*IncludeVirtualBases=*/true);
OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
if (!Simple) {
OS << "Record: ";
RD->dump();
}
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
if (!isMsLayout(RD))
OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
OS << " FieldOffsets: [";
for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
if (i) OS << ", ";
OS << Info.getFieldOffset(i);
}
OS << "]>\n";
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ExprClassification.cpp | //===--- ExprClassification.cpp - Expression AST Node Implementation ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements Expr::classify.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Expr.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
typedef Expr::Classification Cl;
static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E);
static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D);
static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T);
static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E);
static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E);
static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
const Expr *trueExpr,
const Expr *falseExpr);
static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
Cl::Kinds Kind, SourceLocation &Loc);
Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
assert(!TR->isReferenceType() && "Expressions can't have reference type.");
Cl::Kinds kind = ClassifyInternal(Ctx, this);
// C99 6.3.2.1: An lvalue is an expression with an object type or an
// incomplete type other than void.
if (!Ctx.getLangOpts().CPlusPlus) {
// Thus, no functions.
if (TR->isFunctionType() || TR == Ctx.OverloadTy)
kind = Cl::CL_Function;
// No void either, but qualified void is OK because it is "other than void".
// Void "lvalues" are classified as addressable void values, which are void
// expressions whose address can be taken.
else if (TR->isVoidType() && !TR.hasQualifiers())
kind = (kind == Cl::CL_LValue ? Cl::CL_AddressableVoid : Cl::CL_Void);
}
// Enable this assertion for testing.
switch (kind) {
case Cl::CL_LValue: assert(getValueKind() == VK_LValue); break;
case Cl::CL_XValue: assert(getValueKind() == VK_XValue); break;
case Cl::CL_Function:
case Cl::CL_Void:
case Cl::CL_AddressableVoid:
case Cl::CL_DuplicateVectorComponents:
case Cl::CL_DuplicateMatrixComponents: // HLSL Change
case Cl::CL_MemberFunction:
case Cl::CL_SubObjCPropertySetting:
case Cl::CL_ClassTemporary:
case Cl::CL_ArrayTemporary:
case Cl::CL_ObjCMessageRValue:
case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break;
}
Cl::ModifiableType modifiable = Cl::CM_Untested;
if (Loc)
modifiable = IsModifiable(Ctx, this, kind, *Loc);
return Classification(kind, modifiable);
}
/// Classify an expression which creates a temporary, based on its type.
static Cl::Kinds ClassifyTemporary(QualType T) {
if (T->isRecordType())
return Cl::CL_ClassTemporary;
if (T->isArrayType())
return Cl::CL_ArrayTemporary;
// No special classification: these don't behave differently from normal
// prvalues.
return Cl::CL_PRValue;
}
static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang,
const Expr *E,
ExprValueKind Kind) {
switch (Kind) {
case VK_RValue:
return Lang.CPlusPlus ? ClassifyTemporary(E->getType()) : Cl::CL_PRValue;
case VK_LValue:
return Cl::CL_LValue;
case VK_XValue:
return Cl::CL_XValue;
}
llvm_unreachable("Invalid value category of implicit cast.");
}
static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// This function takes the first stab at classifying expressions.
const LangOptions &Lang = Ctx.getLangOpts();
// HLSL Change Starts
// 'this' is an LValue rather than PRValue to be consistent with
// the way 'this' is represented in HLSL.
if (Lang.HLSL && E->getStmtClass() == Expr::CXXThisExprClass)
return Cl::CL_LValue;
// HLSL Change Ends
switch (E->getStmtClass()) {
case Stmt::NoStmtClass:
#define ABSTRACT_STMT(Kind)
#define STMT(Kind, Base) case Expr::Kind##Class:
#define EXPR(Kind, Base)
#include "clang/AST/StmtNodes.inc"
llvm_unreachable("cannot classify a statement");
// First come the expressions that are always lvalues, unconditionally.
case Expr::ObjCIsaExprClass:
// C++ [expr.prim.general]p1: A string literal is an lvalue.
case Expr::StringLiteralClass:
// @encode is equivalent to its string
case Expr::ObjCEncodeExprClass:
// __func__ and friends are too.
case Expr::PredefinedExprClass:
// Property references are lvalues
case Expr::ObjCSubscriptRefExprClass:
case Expr::ObjCPropertyRefExprClass:
// C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
case Expr::CXXTypeidExprClass:
// Unresolved lookups and uncorrected typos get classified as lvalues.
// FIXME: Is this wise? Should they get their own kind?
case Expr::UnresolvedLookupExprClass:
case Expr::UnresolvedMemberExprClass:
case Expr::TypoExprClass:
case Expr::CXXDependentScopeMemberExprClass:
case Expr::DependentScopeDeclRefExprClass:
// ObjC instance variables are lvalues
// FIXME: ObjC++0x might have different rules
case Expr::ObjCIvarRefExprClass:
case Expr::FunctionParmPackExprClass:
case Expr::MSPropertyRefExprClass:
return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
// In C++, they're prvalue temporaries.
case Expr::CompoundLiteralExprClass:
return Ctx.getLangOpts().CPlusPlus ? ClassifyTemporary(E->getType())
: Cl::CL_LValue;
// Expressions that are prvalues.
case Expr::CXXBoolLiteralExprClass:
case Expr::CXXPseudoDestructorExprClass:
case Expr::UnaryExprOrTypeTraitExprClass:
case Expr::CXXNewExprClass:
case Expr::CXXThisExprClass:
case Expr::CXXNullPtrLiteralExprClass:
case Expr::ImaginaryLiteralClass:
case Expr::GNUNullExprClass:
case Expr::OffsetOfExprClass:
case Expr::CXXThrowExprClass:
case Expr::ShuffleVectorExprClass:
case Expr::ConvertVectorExprClass:
case Expr::IntegerLiteralClass:
case Expr::CharacterLiteralClass:
case Expr::AddrLabelExprClass:
case Expr::CXXDeleteExprClass:
case Expr::ImplicitValueInitExprClass:
case Expr::BlockExprClass:
case Expr::FloatingLiteralClass:
case Expr::CXXNoexceptExprClass:
case Expr::CXXScalarValueInitExprClass:
case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCBoxedExprClass:
case Expr::ObjCArrayLiteralClass:
case Expr::ObjCDictionaryLiteralClass:
case Expr::ObjCBoolLiteralExprClass:
case Expr::ParenListExprClass:
case Expr::SizeOfPackExprClass:
case Expr::SubstNonTypeTemplateParmPackExprClass:
case Expr::AsTypeExprClass:
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::AtomicExprClass:
case Expr::CXXFoldExprClass:
case Expr::NoInitExprClass:
case Expr::DesignatedInitUpdateExprClass:
return Cl::CL_PRValue;
// Next come the complicated cases.
case Expr::SubstNonTypeTemplateParmExprClass:
return ClassifyInternal(Ctx,
cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
// C++ [expr.sub]p1: The result is an lvalue of type "T".
// However, subscripting vector types is more like member access.
case Expr::ArraySubscriptExprClass:
if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType())
return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase());
return Cl::CL_LValue;
// C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
// function or variable and a prvalue otherwise.
case Expr::DeclRefExprClass:
if (E->getType() == Ctx.UnknownAnyTy)
return isa<FunctionDecl>(cast<DeclRefExpr>(E)->getDecl())
? Cl::CL_PRValue : Cl::CL_LValue;
return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl());
// Member access is complex.
case Expr::MemberExprClass:
return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E));
case Expr::UnaryOperatorClass:
switch (cast<UnaryOperator>(E)->getOpcode()) {
// C++ [expr.unary.op]p1: The unary * operator performs indirection:
// [...] the result is an lvalue referring to the object or function
// to which the expression points.
case UO_Deref:
return Cl::CL_LValue;
// GNU extensions, simply look through them.
case UO_Extension:
return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
// Treat _Real and _Imag basically as if they were member
// expressions: l-value only if the operand is a true l-value.
case UO_Real:
case UO_Imag: {
const Expr *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
Cl::Kinds K = ClassifyInternal(Ctx, Op);
if (K != Cl::CL_LValue) return K;
if (isa<ObjCPropertyRefExpr>(Op))
return Cl::CL_SubObjCPropertySetting;
return Cl::CL_LValue;
}
// C++ [expr.pre.incr]p1: The result is the updated operand; it is an
// lvalue, [...]
// Not so in C.
case UO_PreInc:
case UO_PreDec:
return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue;
default:
return Cl::CL_PRValue;
}
case Expr::OpaqueValueExprClass:
return ClassifyExprValueKind(Lang, E, E->getValueKind());
// Pseudo-object expressions can produce l-values with reference magic.
case Expr::PseudoObjectExprClass:
return ClassifyExprValueKind(Lang, E,
cast<PseudoObjectExpr>(E)->getValueKind());
// Implicit casts are lvalues if they're lvalue casts. Other than that, we
// only specifically record class temporaries.
case Expr::ImplicitCastExprClass:
return ClassifyExprValueKind(Lang, E, E->getValueKind());
// C++ [expr.prim.general]p4: The presence of parentheses does not affect
// whether the expression is an lvalue.
case Expr::ParenExprClass:
return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr());
// C11 6.5.1.1p4: [A generic selection] is an lvalue, a function designator,
// or a void expression if its result expression is, respectively, an
// lvalue, a function designator, or a void expression.
case Expr::GenericSelectionExprClass:
if (cast<GenericSelectionExpr>(E)->isResultDependent())
return Cl::CL_PRValue;
return ClassifyInternal(Ctx,cast<GenericSelectionExpr>(E)->getResultExpr());
case Expr::BinaryOperatorClass:
case Expr::CompoundAssignOperatorClass:
// C doesn't have any binary expressions that are lvalues.
if (Lang.CPlusPlus)
return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E));
return Cl::CL_PRValue;
case Expr::CallExprClass:
case Expr::CXXOperatorCallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::UserDefinedLiteralClass:
case Expr::CUDAKernelCallExprClass:
return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType(Ctx));
// __builtin_choose_expr is equivalent to the chosen expression.
case Expr::ChooseExprClass:
return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr());
// Extended vector element access is an lvalue unless there are duplicates
// in the shuffle expression.
case Expr::ExtVectorElementExprClass:
if (cast<ExtVectorElementExpr>(E)->containsDuplicateElements())
return Cl::CL_DuplicateVectorComponents;
if (cast<ExtVectorElementExpr>(E)->isArrow())
return Cl::CL_LValue;
return ClassifyInternal(Ctx, cast<ExtVectorElementExpr>(E)->getBase());
// HLSL Change Starts
case Expr::ExtMatrixElementExprClass:
if (cast<ExtMatrixElementExpr>(E)->containsDuplicateElements())
return Cl::CL_DuplicateMatrixComponents;
if (cast<ExtMatrixElementExpr>(E)->isArrow())
return Cl::CL_LValue;
return ClassifyInternal(Ctx, cast<ExtMatrixElementExpr>(E)->getBase());
case Expr::HLSLVectorElementExprClass:
if (cast<HLSLVectorElementExpr>(E)->containsDuplicateElements())
return Cl::CL_DuplicateVectorComponents;
if (cast<HLSLVectorElementExpr>(E)->isArrow())
return Cl::CL_LValue;
return ClassifyInternal(Ctx, cast<HLSLVectorElementExpr>(E)->getBase());
// HLSL Change Ends
// Simply look at the actual default argument.
case Expr::CXXDefaultArgExprClass:
return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr());
// Same idea for default initializers.
case Expr::CXXDefaultInitExprClass:
return ClassifyInternal(Ctx, cast<CXXDefaultInitExpr>(E)->getExpr());
// Same idea for temporary binding.
case Expr::CXXBindTemporaryExprClass:
return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr());
// And the cleanups guard.
case Expr::ExprWithCleanupsClass:
return ClassifyInternal(Ctx, cast<ExprWithCleanups>(E)->getSubExpr());
// Casts depend completely on the target type. All casts work the same.
case Expr::CStyleCastExprClass:
// HLSL Change Starts
// C-style casts will be lvalue for shortened matrices and vectors;
// perform regular processing otherwise, which is based purely on type.
if (Lang.HLSL && E->getValueKind() == VK_LValue) return Cl::CL_LValue;
LLVM_FALLTHROUGH;
// HLSL Change Ends
case Expr::CXXFunctionalCastExprClass:
case Expr::CXXStaticCastExprClass:
case Expr::CXXDynamicCastExprClass:
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass:
// Only in C++ can casts be interesting at all.
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
case Expr::CXXUnresolvedConstructExprClass:
return ClassifyUnnamed(Ctx,
cast<CXXUnresolvedConstructExpr>(E)->getTypeAsWritten());
case Expr::BinaryConditionalOperatorClass: {
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E);
return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
}
case Expr::ConditionalOperatorClass: {
// Once again, only C++ is interesting.
if (!Lang.CPlusPlus) return Cl::CL_PRValue;
const ConditionalOperator *co = cast<ConditionalOperator>(E);
return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
}
// ObjC message sends are effectively function calls, if the target function
// is known.
case Expr::ObjCMessageExprClass:
if (const ObjCMethodDecl *Method =
cast<ObjCMessageExpr>(E)->getMethodDecl()) {
Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getReturnType());
return (kind == Cl::CL_PRValue) ? Cl::CL_ObjCMessageRValue : kind;
}
return Cl::CL_PRValue;
// Some C++ expressions are always class temporaries.
case Expr::CXXConstructExprClass:
case Expr::CXXTemporaryObjectExprClass:
case Expr::LambdaExprClass:
case Expr::CXXStdInitializerListExprClass:
return Cl::CL_ClassTemporary;
case Expr::VAArgExprClass:
return ClassifyUnnamed(Ctx, E->getType());
case Expr::DesignatedInitExprClass:
return ClassifyInternal(Ctx, cast<DesignatedInitExpr>(E)->getInit());
case Expr::StmtExprClass: {
const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt();
if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
return ClassifyUnnamed(Ctx, LastExpr->getType());
return Cl::CL_PRValue;
}
case Expr::CXXUuidofExprClass:
return Cl::CL_LValue;
case Expr::PackExpansionExprClass:
return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
case Expr::MaterializeTemporaryExprClass:
return cast<MaterializeTemporaryExpr>(E)->isBoundToLvalueReference()
? Cl::CL_LValue
: Cl::CL_XValue;
case Expr::InitListExprClass:
// An init list can be an lvalue if it is bound to a reference and
// contains only one element. In that case, we look at that element
// for an exact classification. Init list creation takes care of the
// value kind for us, so we only need to fine-tune.
if (E->isRValue())
return ClassifyExprValueKind(Lang, E, E->getValueKind());
assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
"Only 1-element init lists can be glvalues.");
return ClassifyInternal(Ctx, cast<InitListExpr>(E)->getInit(0));
}
llvm_unreachable("unhandled expression kind in classification");
}
/// ClassifyDecl - Return the classification of an expression referencing the
/// given declaration.
static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
// C++ [expr.prim.general]p6: The result is an lvalue if the entity is a
// function, variable, or data member and a prvalue otherwise.
// In C, functions are not lvalues.
// In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an
// lvalue unless it's a reference type (C++ [temp.param]p6), so we need to
// special-case this.
if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
return Cl::CL_MemberFunction;
bool islvalue;
if (const NonTypeTemplateParmDecl *NTTParm =
dyn_cast<NonTypeTemplateParmDecl>(D))
islvalue = NTTParm->getType()->isReferenceType();
else
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
(Ctx.getLangOpts().CPlusPlus &&
(isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
isa<FunctionTemplateDecl>(D)));
return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
}
/// ClassifyUnnamed - Return the classification of an expression yielding an
/// unnamed value of the given type. This applies in particular to function
/// calls and casts.
static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
// In C, function calls are always rvalues.
if (!Ctx.getLangOpts().CPlusPlus) return Cl::CL_PRValue;
// C++ [expr.call]p10: A function call is an lvalue if the result type is an
// lvalue reference type or an rvalue reference to function type, an xvalue
// if the result type is an rvalue reference to object type, and a prvalue
// otherwise.
if (T->isLValueReferenceType())
return Cl::CL_LValue;
const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
if (!RV) // Could still be a class temporary, though.
return ClassifyTemporary(T);
return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
}
static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
if (E->getType() == Ctx.UnknownAnyTy)
return (isa<FunctionDecl>(E->getMemberDecl())
? Cl::CL_PRValue : Cl::CL_LValue);
// Handle C first, it's easier.
if (!Ctx.getLangOpts().CPlusPlus) {
// C99 6.5.2.3p3
// For dot access, the expression is an lvalue if the first part is. For
// arrow access, it always is an lvalue.
if (E->isArrow())
return Cl::CL_LValue;
// ObjC property accesses are not lvalues, but get special treatment.
Expr *Base = E->getBase()->IgnoreParens();
if (isa<ObjCPropertyRefExpr>(Base))
return Cl::CL_SubObjCPropertySetting;
return ClassifyInternal(Ctx, Base);
}
NamedDecl *Member = E->getMemberDecl();
// C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2.
// C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then
// E1.E2 is an lvalue.
if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
if (Value->getType()->isReferenceType())
return Cl::CL_LValue;
// Otherwise, one of the following rules applies.
// -- If E2 is a static member [...] then E1.E2 is an lvalue.
if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
return Cl::CL_LValue;
// -- If E2 is a non-static data member [...]. If E1 is an lvalue, then
// E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue;
// otherwise, it is a prvalue.
if (isa<FieldDecl>(Member)) {
// *E1 is an lvalue
if (E->isArrow())
return Cl::CL_LValue;
Expr *Base = E->getBase()->IgnoreParenImpCasts();
if (isa<ObjCPropertyRefExpr>(Base))
return Cl::CL_SubObjCPropertySetting;
return ClassifyInternal(Ctx, E->getBase());
}
// -- If E2 is a [...] member function, [...]
// -- If it refers to a static member function [...], then E1.E2 is an
// lvalue; [...]
// -- Otherwise [...] E1.E2 is a prvalue.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
// -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
// So is everything else we haven't handled yet.
return Cl::CL_PRValue;
}
static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
assert(Ctx.getLangOpts().CPlusPlus &&
"This is only relevant for C++.");
// C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
// Except we override this for writes to ObjC properties.
if (E->isAssignmentOp())
return (E->getLHS()->getObjectKind() == OK_ObjCProperty
? Cl::CL_PRValue : Cl::CL_LValue);
// HLSL Change Starts
// In HLSL, BO_Comma yields a prvalue.
if (E->getOpcode() == BO_Comma && Ctx.getLangOpts().HLSL)
return Cl::CL_PRValue;
// HLSL Change Ends
// C++ [expr.comma]p1: the result is of the same value category as its right
// operand, [...].
if (E->getOpcode() == BO_Comma)
return ClassifyInternal(Ctx, E->getRHS());
// C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand
// is a pointer to a data member is of the same value category as its first
// operand.
if (E->getOpcode() == BO_PtrMemD)
return (E->getType()->isFunctionType() ||
E->hasPlaceholderType(BuiltinType::BoundMember))
? Cl::CL_MemberFunction
: ClassifyInternal(Ctx, E->getLHS());
// C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its
// second operand is a pointer to data member and a prvalue otherwise.
if (E->getOpcode() == BO_PtrMemI)
return (E->getType()->isFunctionType() ||
E->hasPlaceholderType(BuiltinType::BoundMember))
? Cl::CL_MemberFunction
: Cl::CL_LValue;
// All other binary operations are prvalues.
return Cl::CL_PRValue;
}
static Cl::Kinds ClassifyConditional(ASTContext &Ctx, const Expr *True,
const Expr *False) {
assert(Ctx.getLangOpts().CPlusPlus &&
"This is only relevant for C++.");
// C++ [expr.cond]p2
// If either the second or the third operand has type (cv) void,
// one of the following shall hold:
if (True->getType()->isVoidType() || False->getType()->isVoidType()) {
// The second or the third operand (but not both) is a (possibly
// parenthesized) throw-expression; the result is of the [...] value
// category of the other.
bool TrueIsThrow = isa<CXXThrowExpr>(True->IgnoreParenImpCasts());
bool FalseIsThrow = isa<CXXThrowExpr>(False->IgnoreParenImpCasts());
if (const Expr *NonThrow = TrueIsThrow ? (FalseIsThrow ? nullptr : False)
: (FalseIsThrow ? True : nullptr))
return ClassifyInternal(Ctx, NonThrow);
// [Otherwise] the result [...] is a prvalue.
return Cl::CL_PRValue;
}
// Note that at this point, we have already performed all conversions
// according to [expr.cond]p3.
// C++ [expr.cond]p4: If the second and third operands are glvalues of the
// same value category [...], the result is of that [...] value category.
// C++ [expr.cond]p5: Otherwise, the result is a prvalue.
Cl::Kinds LCl = ClassifyInternal(Ctx, True),
RCl = ClassifyInternal(Ctx, False);
return LCl == RCl ? LCl : Cl::CL_PRValue;
}
static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
Cl::Kinds Kind, SourceLocation &Loc) {
// As a general rule, we only care about lvalues. But there are some rvalues
// for which we want to generate special results.
if (Kind == Cl::CL_PRValue) {
// For the sake of better diagnostics, we want to specifically recognize
// use of the GCC cast-as-lvalue extension.
if (const ExplicitCastExpr *CE =
dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) {
if (CE->getSubExpr()->IgnoreParenImpCasts()->isLValue()) {
Loc = CE->getExprLoc();
return Cl::CM_LValueCast;
}
}
}
if (Kind != Cl::CL_LValue)
return Cl::CM_RValue;
// This is the lvalue case.
// Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6)
if (Ctx.getLangOpts().CPlusPlus && E->getType()->isFunctionType())
return Cl::CM_Function;
// Assignment to a property in ObjC is an implicit setter access. But a
// setter might not exist.
if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
if (Expr->isImplicitProperty() &&
Expr->getImplicitPropertySetter() == nullptr)
return Cl::CM_NoSetterProperty;
}
CanQualType CT = Ctx.getCanonicalType(E->getType());
// Const stuff is obviously not modifiable.
if (CT.isConstQualified())
return Cl::CM_ConstQualified;
if (CT.getQualifiers().getAddressSpace() == LangAS::opencl_constant)
return Cl::CM_ConstAddrSpace;
// Arrays are not modifiable, only their elements are.
if (CT->isArrayType() && !Ctx.getLangOpts().HLSL) // HLSL Change: arrays are assignable
return Cl::CM_ArrayType;
// Incomplete types are not modifiable.
if (CT->isIncompleteType())
return Cl::CM_IncompleteType;
// Records with any const fields (recursively) are not modifiable.
if (const RecordType *R = CT->getAs<RecordType>())
if (R->hasConstFields())
return Cl::CM_ConstQualified;
return Cl::CM_Modifiable;
}
Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const {
Classification VC = Classify(Ctx);
switch (VC.getKind()) {
case Cl::CL_LValue: return LV_Valid;
case Cl::CL_XValue: return LV_InvalidExpression;
case Cl::CL_Function: return LV_NotObjectType;
case Cl::CL_Void: return LV_InvalidExpression;
case Cl::CL_AddressableVoid: return LV_IncompleteVoidType;
case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents;
case Cl::CL_DuplicateMatrixComponents: return LV_DuplicateMatrixComponents; // HLSL Change
case Cl::CL_MemberFunction: return LV_MemberFunction;
case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
case Cl::CL_ClassTemporary: return LV_ClassTemporary;
case Cl::CL_ArrayTemporary: return LV_ArrayTemporary;
case Cl::CL_ObjCMessageRValue: return LV_InvalidMessageExpression;
case Cl::CL_PRValue: return LV_InvalidExpression;
}
llvm_unreachable("Unhandled kind");
}
Expr::isModifiableLvalueResult
Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
SourceLocation dummy;
Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy);
switch (VC.getKind()) {
case Cl::CL_LValue: break;
case Cl::CL_XValue: return MLV_InvalidExpression;
case Cl::CL_Function: return MLV_NotObjectType;
case Cl::CL_Void: return MLV_InvalidExpression;
case Cl::CL_AddressableVoid: return MLV_IncompleteVoidType;
case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
case Cl::CL_DuplicateMatrixComponents: return MLV_DuplicateMatrixComponents; // HLSL Change
case Cl::CL_MemberFunction: return MLV_MemberFunction;
case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
case Cl::CL_ArrayTemporary: return MLV_ArrayTemporary;
case Cl::CL_ObjCMessageRValue: return MLV_InvalidMessageExpression;
case Cl::CL_PRValue:
return VC.getModifiable() == Cl::CM_LValueCast ?
MLV_LValueCast : MLV_InvalidExpression;
}
assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind");
switch (VC.getModifiable()) {
case Cl::CM_Untested: llvm_unreachable("Did not test modifiability");
case Cl::CM_Modifiable: return MLV_Valid;
case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match");
case Cl::CM_Function: return MLV_NotObjectType;
case Cl::CM_LValueCast:
llvm_unreachable("CM_LValueCast and CL_LValue don't match");
case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty;
case Cl::CM_ConstQualified: return MLV_ConstQualified;
case Cl::CM_ConstAddrSpace: return MLV_ConstAddrSpace;
case Cl::CM_ArrayType: return MLV_ArrayType;
case Cl::CM_IncompleteType: return MLV_IncompleteType;
}
llvm_unreachable("Unhandled modifiable type");
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ExprCXX.cpp | //===--- ExprCXX.cpp - (C++) Expression AST Node Implementation -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the subclesses of Expr class declared in ExprCXX.h
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/IdentifierTable.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// Child Iterators for iterating over subexpressions/substatements
//===----------------------------------------------------------------------===//
bool CXXTypeidExpr::isPotentiallyEvaluated() const {
if (isTypeOperand())
return false;
// C++11 [expr.typeid]p3:
// When typeid is applied to an expression other than a glvalue of
// polymorphic class type, [...] the expression is an unevaluated operand.
const Expr *E = getExprOperand();
if (const CXXRecordDecl *RD = E->getType()->getAsCXXRecordDecl())
if (RD->isPolymorphic() && E->isGLValue())
return true;
return false;
}
QualType CXXTypeidExpr::getTypeOperand(ASTContext &Context) const {
assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
Qualifiers Quals;
return Context.getUnqualifiedArrayType(
Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals);
}
QualType CXXUuidofExpr::getTypeOperand(ASTContext &Context) const {
assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
Qualifiers Quals;
return Context.getUnqualifiedArrayType(
Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType(), Quals);
}
// static
const UuidAttr *CXXUuidofExpr::GetUuidAttrOfType(QualType QT,
bool *RDHasMultipleGUIDsPtr) {
// Optionally remove one level of pointer, reference or array indirection.
const Type *Ty = QT.getTypePtr();
if (QT->isPointerType() || QT->isReferenceType())
Ty = QT->getPointeeType().getTypePtr();
else if (QT->isArrayType())
Ty = Ty->getBaseElementTypeUnsafe();
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
if (!RD)
return nullptr;
if (const UuidAttr *Uuid = RD->getMostRecentDecl()->getAttr<UuidAttr>())
return Uuid;
// __uuidof can grab UUIDs from template arguments.
if (const ClassTemplateSpecializationDecl *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
const UuidAttr *UuidForRD = nullptr;
for (const TemplateArgument &TA : TAL.asArray()) {
bool SeenMultipleGUIDs = false;
const UuidAttr *UuidForTA = nullptr;
if (TA.getKind() == TemplateArgument::Type)
UuidForTA = GetUuidAttrOfType(TA.getAsType(), &SeenMultipleGUIDs);
else if (TA.getKind() == TemplateArgument::Declaration)
UuidForTA =
GetUuidAttrOfType(TA.getAsDecl()->getType(), &SeenMultipleGUIDs);
// If the template argument has a UUID, there are three cases:
// - This is the first UUID seen for this RecordDecl.
// - This is a different UUID than previously seen for this RecordDecl.
// - This is the same UUID than previously seen for this RecordDecl.
if (UuidForTA) {
if (!UuidForRD)
UuidForRD = UuidForTA;
else if (UuidForRD != UuidForTA)
SeenMultipleGUIDs = true;
}
// Seeing multiple UUIDs means that we couldn't find a UUID
if (SeenMultipleGUIDs) {
if (RDHasMultipleGUIDsPtr)
*RDHasMultipleGUIDsPtr = true;
return nullptr;
}
}
return UuidForRD;
}
return nullptr;
}
StringRef CXXUuidofExpr::getUuidAsStringRef(ASTContext &Context) const {
StringRef Uuid;
if (isTypeOperand())
Uuid = CXXUuidofExpr::GetUuidAttrOfType(getTypeOperand(Context))->getGuid();
else {
// Special case: __uuidof(0) means an all-zero GUID.
Expr *Op = getExprOperand();
if (!Op->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
Uuid = CXXUuidofExpr::GetUuidAttrOfType(Op->getType())->getGuid();
else
Uuid = "00000000-0000-0000-0000-000000000000";
}
return Uuid;
}
// CXXScalarValueInitExpr
SourceLocation CXXScalarValueInitExpr::getLocStart() const {
return TypeInfo ? TypeInfo->getTypeLoc().getBeginLoc() : RParenLoc;
}
// CXXNewExpr
CXXNewExpr::CXXNewExpr(const ASTContext &C, bool globalNew,
FunctionDecl *operatorNew, FunctionDecl *operatorDelete,
bool usualArrayDeleteWantsSize,
ArrayRef<Expr*> placementArgs,
SourceRange typeIdParens, Expr *arraySize,
InitializationStyle initializationStyle,
Expr *initializer, QualType ty,
TypeSourceInfo *allocatedTypeInfo,
SourceRange Range, SourceRange directInitRange)
: Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
ty->isDependentType(), ty->isDependentType(),
ty->isInstantiationDependentType(),
ty->containsUnexpandedParameterPack()),
SubExprs(nullptr), OperatorNew(operatorNew), OperatorDelete(operatorDelete),
AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
Range(Range), DirectInitRange(directInitRange),
GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
assert((initializer != nullptr || initializationStyle == NoInit) &&
"Only NoInit can have no initializer.");
StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
AllocateArgsArray(C, arraySize != nullptr, placementArgs.size(),
initializer != nullptr);
unsigned i = 0;
if (Array) {
if (arraySize->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (arraySize->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
SubExprs[i++] = arraySize;
}
if (initializer) {
if (initializer->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (initializer->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
SubExprs[i++] = initializer;
}
for (unsigned j = 0; j != placementArgs.size(); ++j) {
if (placementArgs[j]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (placementArgs[j]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
SubExprs[i++] = placementArgs[j];
}
switch (getInitializationStyle()) {
case CallInit:
this->Range.setEnd(DirectInitRange.getEnd()); break;
case ListInit:
this->Range.setEnd(getInitializer()->getSourceRange().getEnd()); break;
default:
if (TypeIdParens.isValid())
this->Range.setEnd(TypeIdParens.getEnd());
break;
}
}
void CXXNewExpr::AllocateArgsArray(const ASTContext &C, bool isArray,
unsigned numPlaceArgs, bool hasInitializer){
assert(SubExprs == nullptr && "SubExprs already allocated");
Array = isArray;
NumPlacementArgs = numPlaceArgs;
unsigned TotalSize = Array + hasInitializer + NumPlacementArgs;
SubExprs = new (C) Stmt*[TotalSize];
}
bool CXXNewExpr::shouldNullCheckAllocation(const ASTContext &Ctx) const {
return getOperatorNew()->getType()->castAs<FunctionProtoType>()->isNothrow(
Ctx) &&
!getOperatorNew()->isReservedGlobalPlacementOperator();
}
// CXXDeleteExpr
QualType CXXDeleteExpr::getDestroyedType() const {
const Expr *Arg = getArgument();
// The type-to-delete may not be a pointer if it's a dependent type.
const QualType ArgType = Arg->getType();
if (ArgType->isDependentType() && !ArgType->isPointerType())
return QualType();
return ArgType->getAs<PointerType>()->getPointeeType();
}
// CXXPseudoDestructorExpr
PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
: Type(Info)
{
Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
}
CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(const ASTContext &Context,
Expr *Base, bool isArrow, SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc, TypeSourceInfo *ScopeType,
SourceLocation ColonColonLoc, SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType)
: Expr(CXXPseudoDestructorExprClass,
Context.BoundMemberTy,
VK_RValue, OK_Ordinary,
/*isTypeDependent=*/(Base->isTypeDependent() ||
(DestroyedType.getTypeSourceInfo() &&
DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
/*isValueDependent=*/Base->isValueDependent(),
(Base->isInstantiationDependent() ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) ||
(ScopeType &&
ScopeType->getType()->isInstantiationDependentType()) ||
(DestroyedType.getTypeSourceInfo() &&
DestroyedType.getTypeSourceInfo()->getType()
->isInstantiationDependentType())),
// ContainsUnexpandedParameterPack
(Base->containsUnexpandedParameterPack() ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()
->containsUnexpandedParameterPack()) ||
(ScopeType &&
ScopeType->getType()->containsUnexpandedParameterPack()) ||
(DestroyedType.getTypeSourceInfo() &&
DestroyedType.getTypeSourceInfo()->getType()
->containsUnexpandedParameterPack()))),
Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
DestroyedType(DestroyedType) { }
QualType CXXPseudoDestructorExpr::getDestroyedType() const {
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
return TInfo->getType();
return QualType();
}
SourceLocation CXXPseudoDestructorExpr::getLocEnd() const {
SourceLocation End = DestroyedType.getLocation();
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
End = TInfo->getTypeLoc().getLocalSourceRange().getEnd();
return End;
}
// UnresolvedLookupExpr
UnresolvedLookupExpr *
UnresolvedLookupExpr::Create(const ASTContext &C,
CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool ADL,
const TemplateArgumentListInfo *Args,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End)
{
assert(Args || TemplateKWLoc.isValid());
unsigned num_args = Args ? Args->size() : 0;
void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
ASTTemplateKWAndArgsInfo::sizeFor(num_args));
return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
TemplateKWLoc, NameInfo,
ADL, /*Overload*/ true, Args,
Begin, End);
}
UnresolvedLookupExpr *
UnresolvedLookupExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t size = sizeof(UnresolvedLookupExpr);
if (HasTemplateKWAndArgsInfo)
size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedLookupExpr>());
UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End,
bool KnownDependent,
bool KnownInstantiationDependent,
bool KnownContainsUnexpandedParameterPack)
: Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
KnownDependent,
(KnownInstantiationDependent ||
NameInfo.isInstantiationDependent() ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
(KnownContainsUnexpandedParameterPack ||
NameInfo.containsUnexpandedParameterPack() ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()
->containsUnexpandedParameterPack()))),
NameInfo(NameInfo), QualifierLoc(QualifierLoc),
Results(nullptr), NumResults(End - Begin),
HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
TemplateKWLoc.isValid()) {
NumResults = End - Begin;
if (NumResults) {
// Determine whether this expression is type-dependent.
for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
if ((*I)->getDeclContext()->isDependentContext() ||
isa<UnresolvedUsingValueDecl>(*I)) {
ExprBits.TypeDependent = true;
ExprBits.ValueDependent = true;
ExprBits.InstantiationDependent = true;
}
}
Results = static_cast<DeclAccessPair *>(
C.Allocate(sizeof(DeclAccessPair) * NumResults,
llvm::alignOf<DeclAccessPair>()));
memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
}
// If we have explicit template arguments, check for dependent
// template arguments and whether they contain any unexpanded pack
// expansions.
if (TemplateArgs) {
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
Dependent,
InstantiationDependent,
ContainsUnexpandedParameterPack);
if (Dependent) {
ExprBits.TypeDependent = true;
ExprBits.ValueDependent = true;
}
if (InstantiationDependent)
ExprBits.InstantiationDependent = true;
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
} else if (TemplateKWLoc.isValid()) {
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
if (isTypeDependent())
setType(C.DependentTy);
}
void OverloadExpr::initializeResults(const ASTContext &C,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End) {
assert(!Results && "Results already initialized!");
NumResults = End - Begin;
if (NumResults) {
Results = static_cast<DeclAccessPair *>(
C.Allocate(sizeof(DeclAccessPair) * NumResults,
llvm::alignOf<DeclAccessPair>()));
memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
}
}
CXXRecordDecl *OverloadExpr::getNamingClass() const {
if (isa<UnresolvedLookupExpr>(this))
return cast<UnresolvedLookupExpr>(this)->getNamingClass();
else
return cast<UnresolvedMemberExpr>(this)->getNamingClass();
}
// DependentScopeDeclRefExpr
DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args)
: Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
true, true,
(NameInfo.isInstantiationDependent() ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
(NameInfo.containsUnexpandedParameterPack() ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()
->containsUnexpandedParameterPack()))),
QualifierLoc(QualifierLoc), NameInfo(NameInfo),
HasTemplateKWAndArgsInfo(Args != nullptr || TemplateKWLoc.isValid())
{
if (Args) {
bool Dependent = true;
bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack
= ExprBits.ContainsUnexpandedParameterPack;
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *Args,
Dependent,
InstantiationDependent,
ContainsUnexpandedParameterPack);
ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
} else if (TemplateKWLoc.isValid()) {
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
}
DependentScopeDeclRefExpr *
DependentScopeDeclRefExpr::Create(const ASTContext &C,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *Args) {
assert(QualifierLoc && "should be created for dependent qualifiers");
std::size_t size = sizeof(DependentScopeDeclRefExpr);
if (Args)
size += ASTTemplateKWAndArgsInfo::sizeFor(Args->size());
else if (TemplateKWLoc.isValid())
size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = C.Allocate(size);
return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
TemplateKWLoc, NameInfo, Args);
}
DependentScopeDeclRefExpr *
DependentScopeDeclRefExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t size = sizeof(DependentScopeDeclRefExpr);
if (HasTemplateKWAndArgsInfo)
size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size);
DependentScopeDeclRefExpr *E
= new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
SourceLocation(),
DeclarationNameInfo(), nullptr);
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
SourceLocation CXXConstructExpr::getLocStart() const {
if (isa<CXXTemporaryObjectExpr>(this))
return cast<CXXTemporaryObjectExpr>(this)->getLocStart();
return Loc;
}
SourceLocation CXXConstructExpr::getLocEnd() const {
if (isa<CXXTemporaryObjectExpr>(this))
return cast<CXXTemporaryObjectExpr>(this)->getLocEnd();
if (ParenOrBraceRange.isValid())
return ParenOrBraceRange.getEnd();
SourceLocation End = Loc;
for (unsigned I = getNumArgs(); I > 0; --I) {
const Expr *Arg = getArg(I-1);
if (!Arg->isDefaultArgument()) {
SourceLocation NewEnd = Arg->getLocEnd();
if (NewEnd.isValid()) {
End = NewEnd;
break;
}
}
}
return End;
}
SourceRange CXXOperatorCallExpr::getSourceRangeImpl() const {
OverloadedOperatorKind Kind = getOperator();
if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
if (getNumArgs() == 1)
// Prefix operator
return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
else
// Postfix operator
return SourceRange(getArg(0)->getLocStart(), getOperatorLoc());
} else if (Kind == OO_Arrow) {
return getArg(0)->getSourceRange();
} else if (Kind == OO_Call) {
return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
} else if (Kind == OO_Subscript) {
return SourceRange(getArg(0)->getLocStart(), getRParenLoc());
} else if (getNumArgs() == 1) {
return SourceRange(getOperatorLoc(), getArg(0)->getLocEnd());
} else if (getNumArgs() == 2) {
return SourceRange(getArg(0)->getLocStart(), getArg(1)->getLocEnd());
} else {
return getOperatorLoc();
}
}
Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
const Expr *Callee = getCallee()->IgnoreParens();
if (const MemberExpr *MemExpr = dyn_cast<MemberExpr>(Callee))
return MemExpr->getBase();
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Callee))
if (BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI)
return BO->getLHS();
// FIXME: Will eventually need to cope with member pointers.
return nullptr;
}
CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
if (const MemberExpr *MemExpr =
dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
return cast<CXXMethodDecl>(MemExpr->getMemberDecl());
// FIXME: Will eventually need to cope with member pointers.
return nullptr;
}
CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() const {
Expr* ThisArg = getImplicitObjectArgument();
if (!ThisArg)
return nullptr;
if (ThisArg->getType()->isAnyPointerType())
return ThisArg->getType()->getPointeeType()->getAsCXXRecordDecl();
return ThisArg->getType()->getAsCXXRecordDecl();
}
//===----------------------------------------------------------------------===//
// Named casts
//===----------------------------------------------------------------------===//
/// getCastName - Get the name of the C++ cast being used, e.g.,
/// "static_cast", "dynamic_cast", "reinterpret_cast", or
/// "const_cast". The returned pointer must not be freed.
const char *CXXNamedCastExpr::getCastName() const {
switch (getStmtClass()) {
case CXXStaticCastExprClass: return "static_cast";
case CXXDynamicCastExprClass: return "dynamic_cast";
case CXXReinterpretCastExprClass: return "reinterpret_cast";
case CXXConstCastExprClass: return "const_cast";
default: return "<invalid cast>";
}
}
CXXStaticCastExpr *CXXStaticCastExpr::Create(const ASTContext &C, QualType T,
ExprValueKind VK,
CastKind K, Expr *Op,
const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy,
SourceLocation L,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(sizeof(CXXStaticCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
CXXStaticCastExpr *E =
new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
void *Buffer =
C.Allocate(sizeof(CXXStaticCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize);
}
CXXDynamicCastExpr *CXXDynamicCastExpr::Create(const ASTContext &C, QualType T,
ExprValueKind VK,
CastKind K, Expr *Op,
const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy,
SourceLocation L,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(sizeof(CXXDynamicCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
CXXDynamicCastExpr *E =
new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
void *Buffer =
C.Allocate(sizeof(CXXDynamicCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
return new (Buffer) CXXDynamicCastExpr(EmptyShell(), PathSize);
}
/// isAlwaysNull - Return whether the result of the dynamic_cast is proven
/// to always be null. For example:
///
/// struct A { };
/// struct B final : A { };
/// struct C { };
///
/// C *f(B* b) { return dynamic_cast<C*>(b); }
bool CXXDynamicCastExpr::isAlwaysNull() const
{
QualType SrcType = getSubExpr()->getType();
QualType DestType = getType();
if (const PointerType *SrcPTy = SrcType->getAs<PointerType>()) {
SrcType = SrcPTy->getPointeeType();
DestType = DestType->castAs<PointerType>()->getPointeeType();
}
if (DestType->isVoidType())
return false;
const CXXRecordDecl *SrcRD =
cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
if (!SrcRD->hasAttr<FinalAttr>())
return false;
const CXXRecordDecl *DestRD =
cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
return !DestRD->isDerivedFrom(SrcRD);
}
CXXReinterpretCastExpr *
CXXReinterpretCastExpr::Create(const ASTContext &C, QualType T,
ExprValueKind VK, CastKind K, Expr *Op,
const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy, SourceLocation L,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer =
C.Allocate(sizeof(CXXReinterpretCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
CXXReinterpretCastExpr *E =
new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
RParenLoc, AngleBrackets);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
CXXReinterpretCastExpr *
CXXReinterpretCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
void *Buffer = C.Allocate(sizeof(CXXReinterpretCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize);
}
CXXConstCastExpr *CXXConstCastExpr::Create(const ASTContext &C, QualType T,
ExprValueKind VK, Expr *Op,
TypeSourceInfo *WrittenTy,
SourceLocation L,
SourceLocation RParenLoc,
SourceRange AngleBrackets) {
return new (C) CXXConstCastExpr(T, VK, Op, WrittenTy, L, RParenLoc, AngleBrackets);
}
CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(const ASTContext &C) {
return new (C) CXXConstCastExpr(EmptyShell());
}
CXXFunctionalCastExpr *
CXXFunctionalCastExpr::Create(const ASTContext &C, QualType T, ExprValueKind VK,
TypeSourceInfo *Written, CastKind K, Expr *Op,
const CXXCastPath *BasePath,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
CXXFunctionalCastExpr *E =
new (Buffer) CXXFunctionalCastExpr(T, VK, Written, K, Op, PathSize, L, R);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
CXXFunctionalCastExpr *
CXXFunctionalCastExpr::CreateEmpty(const ASTContext &C, unsigned PathSize) {
void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
+ PathSize * sizeof(CXXBaseSpecifier*));
return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
}
SourceLocation CXXFunctionalCastExpr::getLocStart() const {
return getTypeInfoAsWritten()->getTypeLoc().getLocStart();
}
SourceLocation CXXFunctionalCastExpr::getLocEnd() const {
return RParenLoc.isValid() ? RParenLoc : getSubExpr()->getLocEnd();
}
UserDefinedLiteral::LiteralOperatorKind
UserDefinedLiteral::getLiteralOperatorKind() const {
if (getNumArgs() == 0)
return LOK_Template;
if (getNumArgs() == 2)
return LOK_String;
assert(getNumArgs() == 1 && "unexpected #args in literal operator call");
QualType ParamTy =
cast<FunctionDecl>(getCalleeDecl())->getParamDecl(0)->getType();
if (ParamTy->isPointerType())
return LOK_Raw;
if (ParamTy->isAnyCharacterType())
return LOK_Character;
if (ParamTy->isIntegerType())
return LOK_Integer;
if (ParamTy->isFloatingType())
return LOK_Floating;
llvm_unreachable("unknown kind of literal operator");
}
Expr *UserDefinedLiteral::getCookedLiteral() {
#ifndef NDEBUG
LiteralOperatorKind LOK = getLiteralOperatorKind();
assert(LOK != LOK_Template && LOK != LOK_Raw && "not a cooked literal");
#endif
return getArg(0);
}
const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
}
CXXDefaultArgExpr *
CXXDefaultArgExpr::Create(const ASTContext &C, SourceLocation Loc,
ParmVarDecl *Param, Expr *SubExpr) {
void *Mem = C.Allocate(sizeof(CXXDefaultArgExpr) + sizeof(Stmt *));
return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param,
SubExpr);
}
CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &C, SourceLocation Loc,
FieldDecl *Field, QualType T)
: Expr(CXXDefaultInitExprClass, T.getNonLValueExprType(C),
T->isLValueReferenceType() ? VK_LValue : T->isRValueReferenceType()
? VK_XValue
: VK_RValue,
/*FIXME*/ OK_Ordinary, false, false, false, false),
Field(Field), Loc(Loc) {
assert(Field->hasInClassInitializer());
}
CXXTemporary *CXXTemporary::Create(const ASTContext &C,
const CXXDestructorDecl *Destructor) {
return new (C) CXXTemporary(Destructor);
}
CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(const ASTContext &C,
CXXTemporary *Temp,
Expr* SubExpr) {
assert((SubExpr->getType()->isRecordType() ||
SubExpr->getType()->isArrayType()) &&
"Expression bound to a temporary must have record or array type!");
return new (C) CXXBindTemporaryExpr(Temp, SubExpr);
}
CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(const ASTContext &C,
CXXConstructorDecl *Cons,
TypeSourceInfo *Type,
ArrayRef<Expr*> Args,
SourceRange ParenOrBraceRange,
bool HadMultipleCandidates,
bool ListInitialization,
bool StdInitListInitialization,
bool ZeroInitialization)
: CXXConstructExpr(C, CXXTemporaryObjectExprClass,
Type->getType().getNonReferenceType(),
Type->getTypeLoc().getBeginLoc(),
Cons, false, Args,
HadMultipleCandidates,
ListInitialization,
StdInitListInitialization,
ZeroInitialization,
CXXConstructExpr::CK_Complete, ParenOrBraceRange),
Type(Type) {
}
SourceLocation CXXTemporaryObjectExpr::getLocStart() const {
return Type->getTypeLoc().getBeginLoc();
}
SourceLocation CXXTemporaryObjectExpr::getLocEnd() const {
SourceLocation Loc = getParenOrBraceRange().getEnd();
if (Loc.isInvalid() && getNumArgs())
Loc = getArg(getNumArgs()-1)->getLocEnd();
return Loc;
}
CXXConstructExpr *CXXConstructExpr::Create(const ASTContext &C, QualType T,
SourceLocation Loc,
CXXConstructorDecl *D, bool Elidable,
ArrayRef<Expr*> Args,
bool HadMultipleCandidates,
bool ListInitialization,
bool StdInitListInitialization,
bool ZeroInitialization,
ConstructionKind ConstructKind,
SourceRange ParenOrBraceRange) {
return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D,
Elidable, Args,
HadMultipleCandidates, ListInitialization,
StdInitListInitialization,
ZeroInitialization, ConstructKind,
ParenOrBraceRange);
}
CXXConstructExpr::CXXConstructExpr(const ASTContext &C, StmtClass SC,
QualType T, SourceLocation Loc,
CXXConstructorDecl *D, bool elidable,
ArrayRef<Expr*> args,
bool HadMultipleCandidates,
bool ListInitialization,
bool StdInitListInitialization,
bool ZeroInitialization,
ConstructionKind ConstructKind,
SourceRange ParenOrBraceRange)
: Expr(SC, T, VK_RValue, OK_Ordinary,
T->isDependentType(), T->isDependentType(),
T->isInstantiationDependentType(),
T->containsUnexpandedParameterPack()),
Constructor(D), Loc(Loc), ParenOrBraceRange(ParenOrBraceRange),
NumArgs(args.size()),
Elidable(elidable), HadMultipleCandidates(HadMultipleCandidates),
ListInitialization(ListInitialization),
StdInitListInitialization(StdInitListInitialization),
ZeroInitialization(ZeroInitialization),
ConstructKind(ConstructKind), Args(nullptr)
{
if (NumArgs) {
Args = new (C) Stmt*[args.size()];
for (unsigned i = 0; i != args.size(); ++i) {
assert(args[i] && "NULL argument in CXXConstructExpr");
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
if (args[i]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
Args[i] = args[i];
}
}
}
LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
LambdaCaptureKind Kind, VarDecl *Var,
SourceLocation EllipsisLoc)
: DeclAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc)
{
unsigned Bits = 0;
if (Implicit)
Bits |= Capture_Implicit;
switch (Kind) {
case LCK_This:
assert(!Var && "'this' capture cannot have a variable!");
break;
case LCK_ByCopy:
Bits |= Capture_ByCopy;
LLVM_FALLTHROUGH; // HLSL Change
case LCK_ByRef:
assert(Var && "capture must have a variable!");
break;
case LCK_VLAType:
assert(!Var && "VLA type capture cannot have a variable!");
Bits |= Capture_ByCopy;
break;
}
DeclAndBits.setInt(Bits);
}
LambdaCaptureKind LambdaCapture::getCaptureKind() const {
Decl *D = DeclAndBits.getPointer();
bool CapByCopy = DeclAndBits.getInt() & Capture_ByCopy;
if (!D)
return CapByCopy ? LCK_VLAType : LCK_This;
return CapByCopy ? LCK_ByCopy : LCK_ByRef;
}
LambdaExpr::LambdaExpr(QualType T,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
ArrayRef<Capture> Captures,
bool ExplicitParams,
bool ExplicitResultType,
ArrayRef<Expr *> CaptureInits,
ArrayRef<VarDecl *> ArrayIndexVars,
ArrayRef<unsigned> ArrayIndexStarts,
SourceLocation ClosingBrace,
bool ContainsUnexpandedParameterPack)
: Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary,
T->isDependentType(), T->isDependentType(), T->isDependentType(),
ContainsUnexpandedParameterPack),
IntroducerRange(IntroducerRange),
CaptureDefaultLoc(CaptureDefaultLoc),
NumCaptures(Captures.size()),
CaptureDefault(CaptureDefault),
ExplicitParams(ExplicitParams),
ExplicitResultType(ExplicitResultType),
ClosingBrace(ClosingBrace)
{
assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
CXXRecordDecl *Class = getLambdaClass();
CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
// FIXME: Propagate "has unexpanded parameter pack" bit.
// Copy captures.
const ASTContext &Context = Class->getASTContext();
Data.NumCaptures = NumCaptures;
Data.NumExplicitCaptures = 0;
Data.Captures = (Capture *)Context.Allocate(sizeof(Capture) * NumCaptures);
Capture *ToCapture = Data.Captures;
for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
if (Captures[I].isExplicit())
++Data.NumExplicitCaptures;
*ToCapture++ = Captures[I];
}
// Copy initialization expressions for the non-static data members.
Stmt **Stored = getStoredStmts();
for (unsigned I = 0, N = CaptureInits.size(); I != N; ++I)
*Stored++ = CaptureInits[I];
// Copy the body of the lambda.
*Stored++ = getCallOperator()->getBody();
// Copy the array index variables, if any.
HasArrayIndexVars = !ArrayIndexVars.empty();
if (HasArrayIndexVars) {
assert(ArrayIndexStarts.size() == NumCaptures);
memcpy(getArrayIndexVars(), ArrayIndexVars.data(),
sizeof(VarDecl *) * ArrayIndexVars.size());
memcpy(getArrayIndexStarts(), ArrayIndexStarts.data(),
sizeof(unsigned) * Captures.size());
getArrayIndexStarts()[Captures.size()] = ArrayIndexVars.size();
}
}
LambdaExpr *LambdaExpr::Create(const ASTContext &Context,
CXXRecordDecl *Class,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
ArrayRef<Capture> Captures,
bool ExplicitParams,
bool ExplicitResultType,
ArrayRef<Expr *> CaptureInits,
ArrayRef<VarDecl *> ArrayIndexVars,
ArrayRef<unsigned> ArrayIndexStarts,
SourceLocation ClosingBrace,
bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
QualType T = Context.getTypeDeclType(Class);
unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1);
if (!ArrayIndexVars.empty()) {
Size += sizeof(unsigned) * (Captures.size() + 1);
// Realign for following VarDecl array.
Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<VarDecl*>());
Size += sizeof(VarDecl *) * ArrayIndexVars.size();
}
void *Mem = Context.Allocate(Size);
return new (Mem) LambdaExpr(T, IntroducerRange,
CaptureDefault, CaptureDefaultLoc, Captures,
ExplicitParams, ExplicitResultType,
CaptureInits, ArrayIndexVars, ArrayIndexStarts,
ClosingBrace, ContainsUnexpandedParameterPack);
}
LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
unsigned NumCaptures,
unsigned NumArrayIndexVars) {
unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (NumCaptures + 1);
if (NumArrayIndexVars)
Size += sizeof(VarDecl) * NumArrayIndexVars
+ sizeof(unsigned) * (NumCaptures + 1);
void *Mem = C.Allocate(Size);
return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
}
bool LambdaExpr::isInitCapture(const LambdaCapture *C) const {
return (C->capturesVariable() && C->getCapturedVar()->isInitCapture() &&
(getCallOperator() == C->getCapturedVar()->getDeclContext()));
}
LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
return getLambdaClass()->getLambdaData().Captures;
}
LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
return capture_begin() + NumCaptures;
}
LambdaExpr::capture_range LambdaExpr::captures() const {
return capture_range(capture_begin(), capture_end());
}
LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const {
return capture_begin();
}
LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const {
struct CXXRecordDecl::LambdaDefinitionData &Data
= getLambdaClass()->getLambdaData();
return Data.Captures + Data.NumExplicitCaptures;
}
LambdaExpr::capture_range LambdaExpr::explicit_captures() const {
return capture_range(explicit_capture_begin(), explicit_capture_end());
}
LambdaExpr::capture_iterator LambdaExpr::implicit_capture_begin() const {
return explicit_capture_end();
}
LambdaExpr::capture_iterator LambdaExpr::implicit_capture_end() const {
return capture_end();
}
LambdaExpr::capture_range LambdaExpr::implicit_captures() const {
return capture_range(implicit_capture_begin(), implicit_capture_end());
}
ArrayRef<VarDecl *>
LambdaExpr::getCaptureInitIndexVars(capture_init_iterator Iter) const {
assert(HasArrayIndexVars && "No array index-var data?");
unsigned Index = Iter - capture_init_begin();
assert(Index < getLambdaClass()->getLambdaData().NumCaptures &&
"Capture index out-of-range");
VarDecl **IndexVars = getArrayIndexVars();
unsigned *IndexStarts = getArrayIndexStarts();
return llvm::makeArrayRef(IndexVars + IndexStarts[Index],
IndexVars + IndexStarts[Index + 1]);
}
CXXRecordDecl *LambdaExpr::getLambdaClass() const {
return getType()->getAsCXXRecordDecl();
}
CXXMethodDecl *LambdaExpr::getCallOperator() const {
CXXRecordDecl *Record = getLambdaClass();
return Record->getLambdaCallOperator();
}
TemplateParameterList *LambdaExpr::getTemplateParameterList() const {
CXXRecordDecl *Record = getLambdaClass();
return Record->getGenericLambdaTemplateParameterList();
}
CompoundStmt *LambdaExpr::getBody() const {
if (!getStoredStmts()[NumCaptures])
getStoredStmts()[NumCaptures] = getCallOperator()->getBody();
return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
}
bool LambdaExpr::isMutable() const {
return !getCallOperator()->isConst();
}
ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
ArrayRef<CleanupObject> objects)
: Expr(ExprWithCleanupsClass, subexpr->getType(),
subexpr->getValueKind(), subexpr->getObjectKind(),
subexpr->isTypeDependent(), subexpr->isValueDependent(),
subexpr->isInstantiationDependent(),
subexpr->containsUnexpandedParameterPack()),
SubExpr(subexpr) {
ExprWithCleanupsBits.NumObjects = objects.size();
for (unsigned i = 0, e = objects.size(); i != e; ++i)
getObjectsBuffer()[i] = objects[i];
}
ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr,
ArrayRef<CleanupObject> objects) {
size_t size = sizeof(ExprWithCleanups)
+ objects.size() * sizeof(CleanupObject);
void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
return new (buffer) ExprWithCleanups(subexpr, objects);
}
ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
: Expr(ExprWithCleanupsClass, empty) {
ExprWithCleanupsBits.NumObjects = numObjects;
}
ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
EmptyShell empty,
unsigned numObjects) {
size_t size = sizeof(ExprWithCleanups) + numObjects * sizeof(CleanupObject);
void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
return new (buffer) ExprWithCleanups(empty, numObjects);
}
CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
ArrayRef<Expr*> Args,
SourceLocation RParenLoc)
: Expr(CXXUnresolvedConstructExprClass,
Type->getType().getNonReferenceType(),
(Type->getType()->isLValueReferenceType() ? VK_LValue
:Type->getType()->isRValueReferenceType()? VK_XValue
:VK_RValue),
OK_Ordinary,
Type->getType()->isDependentType(), true, true,
Type->getType()->containsUnexpandedParameterPack()),
Type(Type),
LParenLoc(LParenLoc),
RParenLoc(RParenLoc),
NumArgs(Args.size()) {
Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
for (unsigned I = 0; I != Args.size(); ++I) {
if (Args[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
StoredArgs[I] = Args[I];
}
}
CXXUnresolvedConstructExpr *
CXXUnresolvedConstructExpr::Create(const ASTContext &C,
TypeSourceInfo *Type,
SourceLocation LParenLoc,
ArrayRef<Expr*> Args,
SourceLocation RParenLoc) {
void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
sizeof(Expr *) * Args.size());
return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc, Args, RParenLoc);
}
CXXUnresolvedConstructExpr *
CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &C, unsigned NumArgs) {
Stmt::EmptyShell Empty;
void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
sizeof(Expr *) * NumArgs);
return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
}
SourceLocation CXXUnresolvedConstructExpr::getLocStart() const {
return Type->getTypeLoc().getBeginLoc();
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(const ASTContext &C,
Expr *Base, QualType BaseType,
bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs)
: Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
VK_LValue, OK_Ordinary, true, true, true,
((Base && Base->containsUnexpandedParameterPack()) ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()
->containsUnexpandedParameterPack()) ||
MemberNameInfo.containsUnexpandedParameterPack())),
Base(Base), BaseType(BaseType), IsArrow(IsArrow),
HasTemplateKWAndArgsInfo(TemplateArgs != nullptr ||
TemplateKWLoc.isValid()),
OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
FirstQualifierFoundInScope(FirstQualifierFoundInScope),
MemberNameInfo(MemberNameInfo) {
if (TemplateArgs) {
bool Dependent = true;
bool InstantiationDependent = true;
bool ContainsUnexpandedParameterPack = false;
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
Dependent,
InstantiationDependent,
ContainsUnexpandedParameterPack);
if (ContainsUnexpandedParameterPack)
ExprBits.ContainsUnexpandedParameterPack = true;
} else if (TemplateKWLoc.isValid()) {
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(const ASTContext &C,
Expr *Base, QualType BaseType,
bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo)
: Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
VK_LValue, OK_Ordinary, true, true, true,
((Base && Base->containsUnexpandedParameterPack()) ||
(QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->
containsUnexpandedParameterPack()) ||
MemberNameInfo.containsUnexpandedParameterPack())),
Base(Base), BaseType(BaseType), IsArrow(IsArrow),
HasTemplateKWAndArgsInfo(false),
OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
FirstQualifierFoundInScope(FirstQualifierFoundInScope),
MemberNameInfo(MemberNameInfo) { }
CXXDependentScopeMemberExpr *
CXXDependentScopeMemberExpr::Create(const ASTContext &C,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierFoundInScope,
DeclarationNameInfo MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
if (!TemplateArgs && !TemplateKWLoc.isValid())
return new (C) CXXDependentScopeMemberExpr(C, Base, BaseType,
IsArrow, OperatorLoc,
QualifierLoc,
FirstQualifierFoundInScope,
MemberNameInfo);
unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
std::size_t size = sizeof(CXXDependentScopeMemberExpr)
+ ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
IsArrow, OperatorLoc,
QualifierLoc,
TemplateKWLoc,
FirstQualifierFoundInScope,
MemberNameInfo, TemplateArgs);
}
CXXDependentScopeMemberExpr *
CXXDependentScopeMemberExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
if (!HasTemplateKWAndArgsInfo)
return new (C) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
0, SourceLocation(),
NestedNameSpecifierLoc(),
nullptr, DeclarationNameInfo());
std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
CXXDependentScopeMemberExpr *E
= new (Mem) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
0, SourceLocation(),
NestedNameSpecifierLoc(),
SourceLocation(), nullptr,
DeclarationNameInfo(), nullptr);
E->HasTemplateKWAndArgsInfo = true;
return E;
}
bool CXXDependentScopeMemberExpr::isImplicitAccess() const {
if (!Base)
return true;
return cast<Expr>(Base)->isImplicitCXXThis();
}
static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin,
UnresolvedSetIterator end) {
do {
NamedDecl *decl = *begin;
if (isa<UnresolvedUsingValueDecl>(decl))
return false;
// Unresolved member expressions should only contain methods and
// method templates.
if (cast<CXXMethodDecl>(decl->getUnderlyingDecl()->getAsFunction())
->isStatic())
return false;
} while (++begin != end);
return true;
}
UnresolvedMemberExpr::UnresolvedMemberExpr(const ASTContext &C,
bool HasUnresolvedUsing,
Expr *Base, QualType BaseType,
bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End)
: OverloadExpr(UnresolvedMemberExprClass, C, QualifierLoc, TemplateKWLoc,
MemberNameInfo, TemplateArgs, Begin, End,
// Dependent
((Base && Base->isTypeDependent()) ||
BaseType->isDependentType()),
((Base && Base->isInstantiationDependent()) ||
BaseType->isInstantiationDependentType()),
// Contains unexpanded parameter pack
((Base && Base->containsUnexpandedParameterPack()) ||
BaseType->containsUnexpandedParameterPack())),
IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing),
Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) {
// Check whether all of the members are non-static member functions,
// and if so, mark give this bound-member type instead of overload type.
if (hasOnlyNonStaticMemberFunctions(Begin, End))
setType(C.BoundMemberTy);
}
bool UnresolvedMemberExpr::isImplicitAccess() const {
if (!Base)
return true;
return cast<Expr>(Base)->isImplicitCXXThis();
}
UnresolvedMemberExpr *
UnresolvedMemberExpr::Create(const ASTContext &C, bool HasUnresolvedUsing,
Expr *Base, QualType BaseType, bool IsArrow,
SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &MemberNameInfo,
const TemplateArgumentListInfo *TemplateArgs,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End) {
std::size_t size = sizeof(UnresolvedMemberExpr);
if (TemplateArgs)
size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
else if (TemplateKWLoc.isValid())
size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
return new (Mem) UnresolvedMemberExpr(C,
HasUnresolvedUsing, Base, BaseType,
IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc,
MemberNameInfo, TemplateArgs, Begin, End);
}
UnresolvedMemberExpr *
UnresolvedMemberExpr::CreateEmpty(const ASTContext &C,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t size = sizeof(UnresolvedMemberExpr);
if (HasTemplateKWAndArgsInfo)
size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
}
CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
// Unlike for UnresolvedLookupExpr, it is very easy to re-derive this.
// If there was a nested name specifier, it names the naming class.
// It can't be dependent: after all, we were actually able to do the
// lookup.
CXXRecordDecl *Record = nullptr;
auto *NNS = getQualifier();
if (NNS && NNS->getKind() != NestedNameSpecifier::Super) {
const Type *T = getQualifier()->getAsType();
assert(T && "qualifier in member expression does not name type");
Record = T->getAsCXXRecordDecl();
assert(Record && "qualifier in member expression does not name record");
}
// Otherwise the naming class must have been the base class.
else {
QualType BaseType = getBaseType().getNonReferenceType();
if (isArrow()) {
const PointerType *PT = BaseType->getAs<PointerType>();
assert(PT && "base of arrow member access is not pointer");
BaseType = PT->getPointeeType();
}
Record = BaseType->getAsCXXRecordDecl();
assert(Record && "base of member expression does not name record");
}
return Record;
}
SubstNonTypeTemplateParmPackExpr::
SubstNonTypeTemplateParmPackExpr(QualType T,
NonTypeTemplateParmDecl *Param,
SourceLocation NameLoc,
const TemplateArgument &ArgPack)
: Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary,
true, true, true, true),
Param(Param), Arguments(ArgPack.pack_begin()),
NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { }
TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
return TemplateArgument(Arguments, NumArguments);
}
FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
SourceLocation NameLoc,
unsigned NumParams,
Decl * const *Params)
: Expr(FunctionParmPackExprClass, T, VK_LValue, OK_Ordinary,
true, true, true, true),
ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
if (Params)
std::uninitialized_copy(Params, Params + NumParams,
reinterpret_cast<Decl**>(this+1));
}
FunctionParmPackExpr *
FunctionParmPackExpr::Create(const ASTContext &Context, QualType T,
ParmVarDecl *ParamPack, SourceLocation NameLoc,
ArrayRef<Decl *> Params) {
return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
sizeof(ParmVarDecl*) * Params.size()))
FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
}
FunctionParmPackExpr *
FunctionParmPackExpr::CreateEmpty(const ASTContext &Context,
unsigned NumParams) {
return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
sizeof(ParmVarDecl*) * NumParams))
FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr);
}
void MaterializeTemporaryExpr::setExtendingDecl(const ValueDecl *ExtendedBy,
unsigned ManglingNumber) {
// We only need extra state if we have to remember more than just the Stmt.
if (!ExtendedBy)
return;
// We may need to allocate extra storage for the mangling number and the
// extended-by ValueDecl.
if (!State.is<ExtraState *>()) {
auto ES = new (ExtendedBy->getASTContext()) ExtraState;
ES->Temporary = State.get<Stmt *>();
State = ES;
}
auto ES = State.get<ExtraState *>();
ES->ExtendingDecl = ExtendedBy;
ES->ManglingNumber = ManglingNumber;
}
TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc,
bool Value)
: Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary,
/*TypeDependent=*/false,
/*ValueDependent=*/false,
/*InstantiationDependent=*/false,
/*ContainsUnexpandedParameterPack=*/false),
Loc(Loc), RParenLoc(RParenLoc)
{
TypeTraitExprBits.Kind = Kind;
TypeTraitExprBits.Value = Value;
TypeTraitExprBits.NumArgs = Args.size();
TypeSourceInfo **ToArgs = getTypeSourceInfos();
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
if (Args[I]->getType()->isDependentType())
setValueDependent(true);
if (Args[I]->getType()->isInstantiationDependentType())
setInstantiationDependent(true);
if (Args[I]->getType()->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack(true);
ToArgs[I] = Args[I];
}
}
TypeTraitExpr *TypeTraitExpr::Create(const ASTContext &C, QualType T,
SourceLocation Loc,
TypeTrait Kind,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc,
bool Value) {
unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * Args.size();
void *Mem = C.Allocate(Size);
return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value);
}
TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
unsigned NumArgs) {
unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * NumArgs;
void *Mem = C.Allocate(Size);
return new (Mem) TypeTraitExpr(EmptyShell());
}
void ArrayTypeTraitExpr::anchor() { }
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/Expr.cpp | //===--- Expr.cpp - Expression AST Node Implementation --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Expr class and subclasses.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstring>
using namespace clang;
const CXXRecordDecl *Expr::getBestDynamicClassType() const {
const Expr *E = ignoreParenBaseCasts();
QualType DerivedType = E->getType();
if (const PointerType *PTy = DerivedType->getAs<PointerType>())
DerivedType = PTy->getPointeeType();
if (DerivedType->isDependentType())
return nullptr;
const RecordType *Ty = DerivedType->castAs<RecordType>();
Decl *D = Ty->getDecl();
return cast<CXXRecordDecl>(D);
}
const Expr *Expr::skipRValueSubobjectAdjustments(
SmallVectorImpl<const Expr *> &CommaLHSs,
SmallVectorImpl<SubobjectAdjustment> &Adjustments) const {
const Expr *E = this;
while (true) {
E = E->IgnoreParens();
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
if ((CE->getCastKind() == CK_DerivedToBase ||
CE->getCastKind() == CK_UncheckedDerivedToBase) &&
E->getType()->isRecordType()) {
E = CE->getSubExpr();
CXXRecordDecl *Derived
= cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
Adjustments.push_back(SubobjectAdjustment(CE, Derived));
continue;
}
if (CE->getCastKind() == CK_NoOp) {
E = CE->getSubExpr();
continue;
}
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
if (!ME->isArrow()) {
assert(ME->getBase()->getType()->isRecordType());
if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
if (!Field->isBitField() && !Field->getType()->isReferenceType()) {
E = ME->getBase();
Adjustments.push_back(SubobjectAdjustment(Field));
continue;
}
}
}
} else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->isPtrMemOp()) {
assert(BO->getRHS()->isRValue());
E = BO->getLHS();
const MemberPointerType *MPT =
BO->getRHS()->getType()->getAs<MemberPointerType>();
Adjustments.push_back(SubobjectAdjustment(MPT, BO->getRHS()));
continue;
} else if (BO->getOpcode() == BO_Comma) {
CommaLHSs.push_back(BO->getLHS());
E = BO->getRHS();
continue;
}
}
// Nothing changed.
break;
}
return E;
}
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
/// that is known to return 0 or 1. This happens for _Bool/bool expressions
/// but also int expressions which are produced by things like comparisons in
/// C.
bool Expr::isKnownToHaveBooleanValue() const {
const Expr *E = IgnoreParens();
// If this value has _Bool type, it is obvious 0/1.
if (E->getType()->isBooleanType()) return true;
// If this is a non-scalar-integer type, we don't care enough to try.
if (!E->getType()->isIntegralOrEnumerationType()) return false;
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
switch (UO->getOpcode()) {
case UO_Plus:
return UO->getSubExpr()->isKnownToHaveBooleanValue();
case UO_LNot:
return true;
default:
return false;
}
}
// Only look through implicit casts. If the user writes
// '(int) (a && b)' treat it as an arbitrary int.
if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E))
return CE->getSubExpr()->isKnownToHaveBooleanValue();
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
switch (BO->getOpcode()) {
default: return false;
case BO_LT: // Relational operators.
case BO_GT:
case BO_LE:
case BO_GE:
case BO_EQ: // Equality operators.
case BO_NE:
case BO_LAnd: // AND operator.
case BO_LOr: // Logical OR operator.
return true;
case BO_And: // Bitwise AND operator.
case BO_Xor: // Bitwise XOR operator.
case BO_Or: // Bitwise OR operator.
// Handle things like (x==2)|(y==12).
return BO->getLHS()->isKnownToHaveBooleanValue() &&
BO->getRHS()->isKnownToHaveBooleanValue();
case BO_Comma:
case BO_Assign:
return BO->getRHS()->isKnownToHaveBooleanValue();
}
}
if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
return CO->getTrueExpr()->isKnownToHaveBooleanValue() &&
CO->getFalseExpr()->isKnownToHaveBooleanValue();
return false;
}
// Amusing macro metaprogramming hack: check whether a class provides
// a more specific implementation of getExprLoc().
//
// See also Stmt.cpp:{getLocStart(),getLocEnd()}.
namespace {
/// This implementation is used when a class provides a custom
/// implementation of getExprLoc.
template <class E, class T>
SourceLocation getExprLocImpl(const Expr *expr,
SourceLocation (T::*v)() const) {
return static_cast<const E*>(expr)->getExprLoc();
}
/// This implementation is used when a class doesn't provide
/// a custom implementation of getExprLoc. Overload resolution
/// should pick it over the implementation above because it's
/// more specialized according to function template partial ordering.
template <class E>
SourceLocation getExprLocImpl(const Expr *expr,
SourceLocation (Expr::*v)() const) {
return static_cast<const E*>(expr)->getLocStart();
}
}
SourceLocation Expr::getExprLoc() const {
switch (getStmtClass()) {
case Stmt::NoStmtClass: llvm_unreachable("statement without class");
#define ABSTRACT_STMT(type)
#define STMT(type, base) \
case Stmt::type##Class: break;
#define EXPR(type, base) \
case Stmt::type##Class: return getExprLocImpl<type>(this, &type::getExprLoc);
#include "clang/AST/StmtNodes.inc"
}
llvm_unreachable("unknown expression kind");
}
//===----------------------------------------------------------------------===//
// Primary Expressions.
//===----------------------------------------------------------------------===//
/// \brief Compute the type-, value-, and instantiation-dependence of a
/// declaration reference
/// based on the declaration being referenced.
static void computeDeclRefDependence(const ASTContext &Ctx, NamedDecl *D,
QualType T, bool &TypeDependent,
bool &ValueDependent,
bool &InstantiationDependent) {
TypeDependent = false;
ValueDependent = false;
InstantiationDependent = false;
// (TD) C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
//
// and
//
// (VD) C++ [temp.dep.constexpr]p2:
// An identifier is value-dependent if it is:
// (TD) - an identifier that was declared with dependent type
// (VD) - a name declared with a dependent type,
if (T->isDependentType()) {
TypeDependent = true;
ValueDependent = true;
InstantiationDependent = true;
return;
} else if (T->isInstantiationDependentType()) {
InstantiationDependent = true;
}
// (TD) - a conversion-function-id that specifies a dependent type
if (D->getDeclName().getNameKind()
== DeclarationName::CXXConversionFunctionName) {
QualType T = D->getDeclName().getCXXNameType();
if (T->isDependentType()) {
TypeDependent = true;
ValueDependent = true;
InstantiationDependent = true;
return;
}
if (T->isInstantiationDependentType())
InstantiationDependent = true;
}
// (VD) - the name of a non-type template parameter,
if (isa<NonTypeTemplateParmDecl>(D)) {
ValueDependent = true;
InstantiationDependent = true;
return;
}
// (VD) - a constant with integral or enumeration type and is
// initialized with an expression that is value-dependent.
// (VD) - a constant with literal type and is initialized with an
// expression that is value-dependent [C++11].
// (VD) - FIXME: Missing from the standard:
// - an entity with reference type and is initialized with an
// expression that is value-dependent [C++11]
if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
if ((Ctx.getLangOpts().CPlusPlus11 ?
Var->getType()->isLiteralType(Ctx) :
Var->getType()->isIntegralOrEnumerationType()) &&
(Var->getType().isConstQualified() ||
Var->getType()->isReferenceType())) {
if (const Expr *Init = Var->getAnyInitializer())
if (Init->isValueDependent()) {
ValueDependent = true;
InstantiationDependent = true;
}
}
// (VD) - FIXME: Missing from the standard:
// - a member function or a static data member of the current
// instantiation
if (Var->isStaticDataMember() &&
Var->getDeclContext()->isDependentContext()) {
ValueDependent = true;
InstantiationDependent = true;
TypeSourceInfo *TInfo = Var->getFirstDecl()->getTypeSourceInfo();
if (TInfo->getType()->isIncompleteArrayType())
TypeDependent = true;
}
return;
}
// (VD) - FIXME: Missing from the standard:
// - a member function or a static data member of the current
// instantiation
if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
ValueDependent = true;
InstantiationDependent = true;
}
}
void DeclRefExpr::computeDependence(const ASTContext &Ctx) {
bool TypeDependent = false;
bool ValueDependent = false;
bool InstantiationDependent = false;
computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
ValueDependent, InstantiationDependent);
ExprBits.TypeDependent |= TypeDependent;
ExprBits.ValueDependent |= ValueDependent;
ExprBits.InstantiationDependent |= InstantiationDependent;
// Is the declaration a parameter pack?
if (getDecl()->isParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
}
DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
ValueDecl *D, bool RefersToEnclosingVariableOrCapture,
const DeclarationNameInfo &NameInfo,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs,
QualType T, ExprValueKind VK)
: Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
if (QualifierLoc) {
getInternalQualifierLoc() = QualifierLoc;
auto *NNS = QualifierLoc.getNestedNameSpecifier();
if (NNS->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (NNS->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
}
DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
if (FoundD)
getInternalFoundDecl() = FoundD;
DeclRefExprBits.HasTemplateKWAndArgsInfo
= (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
DeclRefExprBits.RefersToEnclosingVariableOrCapture =
RefersToEnclosingVariableOrCapture;
if (TemplateArgs) {
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
Dependent,
InstantiationDependent,
ContainsUnexpandedParameterPack);
assert(!Dependent && "built a DeclRefExpr with dependent template args");
ExprBits.InstantiationDependent |= InstantiationDependent;
ExprBits.ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
} else if (TemplateKWLoc.isValid()) {
getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
DeclRefExprBits.HadMultipleCandidates = 0;
computeDependence(Ctx);
}
DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
ValueDecl *D,
bool RefersToEnclosingVariableOrCapture,
SourceLocation NameLoc,
QualType T,
ExprValueKind VK,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs) {
return Create(Context, QualifierLoc, TemplateKWLoc, D,
RefersToEnclosingVariableOrCapture,
DeclarationNameInfo(D->getDeclName(), NameLoc),
T, VK, FoundD, TemplateArgs);
}
DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TemplateKWLoc,
ValueDecl *D,
bool RefersToEnclosingVariableOrCapture,
const DeclarationNameInfo &NameInfo,
QualType T,
ExprValueKind VK,
NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs) {
// Filter out cases where the found Decl is the same as the value refenenced.
if (D == FoundD)
FoundD = nullptr;
std::size_t Size = sizeof(DeclRefExpr);
if (QualifierLoc)
Size += sizeof(NestedNameSpecifierLoc);
if (FoundD)
Size += sizeof(NamedDecl *);
if (TemplateArgs)
Size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
else if (TemplateKWLoc.isValid())
Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
RefersToEnclosingVariableOrCapture,
NameInfo, FoundD, TemplateArgs, T, VK);
}
DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
bool HasQualifier,
bool HasFoundDecl,
bool HasTemplateKWAndArgsInfo,
unsigned NumTemplateArgs) {
std::size_t Size = sizeof(DeclRefExpr);
if (HasQualifier)
Size += sizeof(NestedNameSpecifierLoc);
if (HasFoundDecl)
Size += sizeof(NamedDecl *);
if (HasTemplateKWAndArgsInfo)
Size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
return new (Mem) DeclRefExpr(EmptyShell());
}
SourceLocation DeclRefExpr::getLocStart() const {
if (hasQualifier())
return getQualifierLoc().getBeginLoc();
return getNameInfo().getLocStart();
}
SourceLocation DeclRefExpr::getLocEnd() const {
if (hasExplicitTemplateArgs())
return getRAngleLoc();
return getNameInfo().getLocEnd();
}
PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentType IT,
StringLiteral *SL)
: Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary,
FNTy->isDependentType(), FNTy->isDependentType(),
FNTy->isInstantiationDependentType(),
/*ContainsUnexpandedParameterPack=*/false),
Loc(L), Type(IT), FnName(SL) {}
StringLiteral *PredefinedExpr::getFunctionName() {
return cast_or_null<StringLiteral>(FnName);
}
StringRef PredefinedExpr::getIdentTypeName(PredefinedExpr::IdentType IT) {
switch (IT) {
case Func:
return "__func__";
case Function:
return "__FUNCTION__";
case FuncDName:
return "__FUNCDNAME__";
case LFunction:
return "L__FUNCTION__";
case PrettyFunction:
return "__PRETTY_FUNCTION__";
case FuncSig:
return "__FUNCSIG__";
case PrettyFunctionNoVirtual:
break;
}
llvm_unreachable("Unknown ident type for PredefinedExpr");
}
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
ASTContext &Context = CurrentDecl->getASTContext();
if (IT == PredefinedExpr::FuncDName) {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) {
std::unique_ptr<MangleContext> MC;
MC.reset(Context.createMangleContext());
if (MC->shouldMangleDeclName(ND)) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(ND))
MC->mangleCXXCtor(CD, Ctor_Base, Out);
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(ND))
MC->mangleCXXDtor(DD, Dtor_Base, Out);
else
MC->mangleName(ND, Out);
Out.flush();
if (!Buffer.empty() && Buffer.front() == '\01')
return Buffer.substr(1);
return Buffer.str();
} else
return ND->getIdentifier()->getName();
}
return "";
}
if (auto *BD = dyn_cast<BlockDecl>(CurrentDecl)) {
std::unique_ptr<MangleContext> MC;
MC.reset(Context.createMangleContext());
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
auto DC = CurrentDecl->getDeclContext();
if (DC->isFileContext())
MC->mangleGlobalBlock(BD, /*ID*/ nullptr, Out);
else if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
MC->mangleCtorBlock(CD, /*CT*/ Ctor_Complete, BD, Out);
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
MC->mangleDtorBlock(DD, /*DT*/ Dtor_Complete, BD, Out);
else
MC->mangleBlock(DC, BD, Out);
return Out.str();
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual && IT != FuncSig)
return FD->getNameAsString();
SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MD->isVirtual() && IT != PrettyFunctionNoVirtual)
Out << "virtual ";
if (MD->isStatic())
Out << "static ";
}
PrintingPolicy Policy(Context.getLangOpts());
std::string Proto;
llvm::raw_string_ostream POut(Proto);
const FunctionDecl *Decl = FD;
if (const FunctionDecl* Pattern = FD->getTemplateInstantiationPattern())
Decl = Pattern;
const FunctionType *AFT = Decl->getType()->getAs<FunctionType>();
const FunctionProtoType *FT = nullptr;
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
if (IT == FuncSig) {
switch (FT->getCallConv()) {
case CC_C: POut << "__cdecl "; break;
case CC_X86StdCall: POut << "__stdcall "; break;
case CC_X86FastCall: POut << "__fastcall "; break;
case CC_X86ThisCall: POut << "__thiscall "; break;
case CC_X86VectorCall: POut << "__vectorcall "; break;
// Only bother printing the conventions that MSVC knows about.
default: break;
}
}
FD->printQualifiedName(POut, Policy);
POut << "(";
if (FT) {
for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) {
if (i) POut << ", ";
POut << Decl->getParamDecl(i)->getType().stream(Policy);
}
if (FT->isVariadic()) {
if (FD->getNumParams()) POut << ", ";
POut << "...";
}
}
POut << ")";
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
const FunctionType *FT = MD->getType()->castAs<FunctionType>();
if (FT->isConst())
POut << " const";
if (FT->isVolatile())
POut << " volatile";
RefQualifierKind Ref = MD->getRefQualifier();
if (Ref == RQ_LValue)
POut << " &";
else if (Ref == RQ_RValue)
POut << " &&";
}
typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy;
SpecsTy Specs;
const DeclContext *Ctx = FD->getDeclContext();
while (Ctx && isa<NamedDecl>(Ctx)) {
const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
if (Spec && !Spec->isExplicitSpecialization())
Specs.push_back(Spec);
Ctx = Ctx->getParent();
}
std::string TemplateParams;
llvm::raw_string_ostream TOut(TemplateParams);
for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend();
I != E; ++I) {
const TemplateParameterList *Params
= (*I)->getSpecializedTemplate()->getTemplateParameters();
const TemplateArgumentList &Args = (*I)->getTemplateArgs();
assert(Params->size() == Args.size());
for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) {
StringRef Param = Params->getParam(i)->getName();
if (Param.empty()) continue;
TOut << Param << " = ";
Args.get(i).print(Policy, TOut);
TOut << ", ";
}
}
FunctionTemplateSpecializationInfo *FSI
= FD->getTemplateSpecializationInfo();
if (FSI && !FSI->isExplicitSpecialization()) {
const TemplateParameterList* Params
= FSI->getTemplate()->getTemplateParameters();
const TemplateArgumentList* Args = FSI->TemplateArguments;
assert(Params->size() == Args->size());
for (unsigned i = 0, e = Params->size(); i != e; ++i) {
StringRef Param = Params->getParam(i)->getName();
if (Param.empty()) continue;
TOut << Param << " = ";
Args->get(i).print(Policy, TOut);
TOut << ", ";
}
}
TOut.flush();
if (!TemplateParams.empty()) {
// remove the trailing comma and space
TemplateParams.resize(TemplateParams.size() - 2);
POut << " [" << TemplateParams << "]";
}
POut.flush();
// Print "auto" for all deduced return types. This includes C++1y return
// type deduction and lambdas. For trailing return types resolve the
// decltype expression. Otherwise print the real type when this is
// not a constructor or destructor.
if (isa<CXXMethodDecl>(FD) &&
cast<CXXMethodDecl>(FD)->getParent()->isLambda())
Proto = "auto " + Proto;
else if (FT && FT->getReturnType()->getAs<DecltypeType>())
FT->getReturnType()
->getAs<DecltypeType>()
->getUnderlyingType()
.getAsStringInternal(Proto, Policy);
else if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD))
AFT->getReturnType().getAsStringInternal(Proto, Policy);
Out << Proto;
Out.flush();
return Name.str().str();
}
if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(CurrentDecl)) {
for (const DeclContext *DC = CD->getParent(); DC; DC = DC->getParent())
// Skip to its enclosing function or method, but not its enclosing
// CapturedDecl.
if (DC->isFunctionOrMethod() && (DC->getDeclKind() != Decl::Captured)) {
const Decl *D = Decl::castFromDeclContext(DC);
return ComputeName(IT, D);
}
llvm_unreachable("CapturedDecl not inside a function or method");
}
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurrentDecl)) {
SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
Out << (MD->isInstanceMethod() ? '-' : '+');
Out << '[';
// For incorrect code, there might not be an ObjCInterfaceDecl. Do
// a null check to avoid a crash.
if (const ObjCInterfaceDecl *ID = MD->getClassInterface())
Out << *ID;
if (const ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(MD->getDeclContext()))
Out << '(' << *CID << ')';
Out << ' ';
MD->getSelector().print(Out);
Out << ']';
Out.flush();
return Name.str().str();
}
if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) {
// __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
return "top level";
}
return "";
}
void APNumericStorage::setIntValue(const ASTContext &C,
const llvm::APInt &Val) {
if (hasAllocation())
C.Deallocate(pVal);
BitWidth = Val.getBitWidth();
unsigned NumWords = Val.getNumWords();
const uint64_t* Words = Val.getRawData();
if (NumWords > 1) {
pVal = new (C) uint64_t[NumWords];
std::copy(Words, Words + NumWords, pVal);
} else if (NumWords == 1)
VAL = Words[0];
else
VAL = 0;
}
IntegerLiteral::IntegerLiteral(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l)
: Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
false, false),
Loc(l) {
assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
assert(V.getBitWidth() == C.getIntWidth(type) &&
"Integer type is not the correct size for constant.");
setValue(C, V);
}
IntegerLiteral *
IntegerLiteral::Create(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l) {
return new (C) IntegerLiteral(C, V, type, l);
}
IntegerLiteral *
IntegerLiteral::Create(const ASTContext &C, EmptyShell Empty) {
return new (C) IntegerLiteral(Empty);
}
FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L)
: Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
false, false), Loc(L) {
setSemantics(V.getSemantics());
FloatingLiteralBits.IsExact = isexact;
setValue(C, V);
}
FloatingLiteral::FloatingLiteral(const ASTContext &C, EmptyShell Empty)
: Expr(FloatingLiteralClass, Empty) {
setRawSemantics(IEEEhalf);
FloatingLiteralBits.IsExact = false;
}
FloatingLiteral *
FloatingLiteral::Create(const ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L) {
return new (C) FloatingLiteral(C, V, isexact, Type, L);
}
FloatingLiteral *
FloatingLiteral::Create(const ASTContext &C, EmptyShell Empty) {
return new (C) FloatingLiteral(C, Empty);
}
const llvm::fltSemantics &FloatingLiteral::getSemantics() const {
switch(FloatingLiteralBits.Semantics) {
case IEEEhalf:
return llvm::APFloat::IEEEhalf;
case IEEEsingle:
return llvm::APFloat::IEEEsingle;
case IEEEdouble:
return llvm::APFloat::IEEEdouble;
case x87DoubleExtended:
return llvm::APFloat::x87DoubleExtended;
case IEEEquad:
return llvm::APFloat::IEEEquad;
case PPCDoubleDouble:
return llvm::APFloat::PPCDoubleDouble;
}
llvm_unreachable("Unrecognised floating semantics");
}
void FloatingLiteral::setSemantics(const llvm::fltSemantics &Sem) {
if (&Sem == &llvm::APFloat::IEEEhalf)
FloatingLiteralBits.Semantics = IEEEhalf;
else if (&Sem == &llvm::APFloat::IEEEsingle)
FloatingLiteralBits.Semantics = IEEEsingle;
else if (&Sem == &llvm::APFloat::IEEEdouble)
FloatingLiteralBits.Semantics = IEEEdouble;
else if (&Sem == &llvm::APFloat::x87DoubleExtended)
FloatingLiteralBits.Semantics = x87DoubleExtended;
else if (&Sem == &llvm::APFloat::IEEEquad)
FloatingLiteralBits.Semantics = IEEEquad;
else if (&Sem == &llvm::APFloat::PPCDoubleDouble)
FloatingLiteralBits.Semantics = PPCDoubleDouble;
else
llvm_unreachable("Unknown floating semantics");
}
/// getValueAsApproximateDouble - This returns the value as an inaccurate
/// double. Note that this may cause loss of precision, but is useful for
/// debugging dumps, etc.
double FloatingLiteral::getValueAsApproximateDouble() const {
llvm::APFloat V = getValue();
bool ignored;
V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
&ignored);
return V.convertToDouble();
}
int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) {
int CharByteWidth = 0;
switch(k) {
case Ascii:
case UTF8:
CharByteWidth = target.getCharWidth();
break;
case Wide:
CharByteWidth = target.getWCharWidth();
break;
case UTF16:
CharByteWidth = target.getChar16Width();
break;
case UTF32:
CharByteWidth = target.getChar32Width();
break;
}
assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
CharByteWidth /= 8;
assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4)
&& "character byte widths supported are 1, 2, and 4 only");
return CharByteWidth;
}
StringLiteral *StringLiteral::Create(const ASTContext &C, StringRef Str,
StringKind Kind, bool Pascal, QualType Ty,
const SourceLocation *Loc,
unsigned NumStrs) {
assert(C.getAsConstantArrayType(Ty) &&
"StringLiteral must be of constant array type!");
// Allocate enough space for the StringLiteral plus an array of locations for
// any concatenated string tokens.
void *Mem = C.Allocate(sizeof(StringLiteral)+
sizeof(SourceLocation)*(NumStrs-1),
llvm::alignOf<StringLiteral>());
StringLiteral *SL = new (Mem) StringLiteral(Ty);
// OPTIMIZE: could allocate this appended to the StringLiteral.
SL->setString(C,Str,Kind,Pascal);
SL->TokLocs[0] = Loc[0];
SL->NumConcatenated = NumStrs;
if (NumStrs != 1)
memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1));
return SL;
}
StringLiteral *StringLiteral::CreateEmpty(const ASTContext &C,
unsigned NumStrs) {
void *Mem = C.Allocate(sizeof(StringLiteral)+
sizeof(SourceLocation)*(NumStrs-1),
llvm::alignOf<StringLiteral>());
StringLiteral *SL = new (Mem) StringLiteral(QualType());
SL->CharByteWidth = 0;
SL->Length = 0;
SL->NumConcatenated = NumStrs;
return SL;
}
void StringLiteral::outputString(raw_ostream &OS) const {
switch (getKind()) {
case Ascii: break; // no prefix.
case Wide: OS << 'L'; break;
case UTF8: OS << "u8"; break;
case UTF16: OS << 'u'; break;
case UTF32: OS << 'U'; break;
}
OS << '"';
static const char Hex[] = "0123456789ABCDEF";
unsigned LastSlashX = getLength();
for (unsigned I = 0, N = getLength(); I != N; ++I) {
switch (uint32_t Char = getCodeUnit(I)) {
default:
// FIXME: Convert UTF-8 back to codepoints before rendering.
// Convert UTF-16 surrogate pairs back to codepoints before rendering.
// Leave invalid surrogates alone; we'll use \x for those.
if (getKind() == UTF16 && I != N - 1 && Char >= 0xd800 &&
Char <= 0xdbff) {
uint32_t Trail = getCodeUnit(I + 1);
if (Trail >= 0xdc00 && Trail <= 0xdfff) {
Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
++I;
}
}
if (Char > 0xff) {
// If this is a wide string, output characters over 0xff using \x
// escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
// codepoint: use \x escapes for invalid codepoints.
if (getKind() == Wide ||
(Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
// FIXME: Is this the best way to print wchar_t?
OS << "\\x";
int Shift = 28;
while ((Char >> Shift) == 0)
Shift -= 4;
for (/**/; Shift >= 0; Shift -= 4)
OS << Hex[(Char >> Shift) & 15];
LastSlashX = I;
break;
}
if (Char > 0xffff)
OS << "\\U00"
<< Hex[(Char >> 20) & 15]
<< Hex[(Char >> 16) & 15];
else
OS << "\\u";
OS << Hex[(Char >> 12) & 15]
<< Hex[(Char >> 8) & 15]
<< Hex[(Char >> 4) & 15]
<< Hex[(Char >> 0) & 15];
break;
}
// If we used \x... for the previous character, and this character is a
// hexadecimal digit, prevent it being slurped as part of the \x.
if (LastSlashX + 1 == I) {
switch (Char) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
OS << "\"\"";
}
}
assert(Char <= 0xff &&
"Characters above 0xff should already have been handled.");
if (isPrintable(Char))
OS << (char)Char;
else // Output anything hard as an octal escape.
OS << '\\'
<< (char)('0' + ((Char >> 6) & 7))
<< (char)('0' + ((Char >> 3) & 7))
<< (char)('0' + ((Char >> 0) & 7));
break;
// Handle some common non-printable cases to make dumps prettier.
case '\\': OS << "\\\\"; break;
case '"': OS << "\\\""; break;
case '\n': OS << "\\n"; break;
case '\t': OS << "\\t"; break;
case '\a': OS << "\\a"; break;
case '\b': OS << "\\b"; break;
}
}
OS << '"';
}
void StringLiteral::setString(const ASTContext &C, StringRef Str,
StringKind Kind, bool IsPascal) {
//FIXME: we assume that the string data comes from a target that uses the same
// code unit size and endianess for the type of string.
this->Kind = Kind;
this->IsPascal = IsPascal;
CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind);
assert((Str.size()%CharByteWidth == 0)
&& "size of data must be multiple of CharByteWidth");
Length = Str.size()/CharByteWidth;
switch(CharByteWidth) {
case 1: {
char *AStrData = new (C) char[Length];
std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
StrData.asChar = AStrData;
break;
}
case 2: {
uint16_t *AStrData = new (C) uint16_t[Length];
std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
StrData.asUInt16 = AStrData;
break;
}
case 4: {
uint32_t *AStrData = new (C) uint32_t[Length];
std::memcpy(AStrData,Str.data(),Length*sizeof(*AStrData));
StrData.asUInt32 = AStrData;
break;
}
default:
assert(false && "unsupported CharByteWidth");
}
}
/// getLocationOfByte - Return a source location that points to the specified
/// byte of this string literal.
///
/// Strings are amazingly complex. They can be formed from multiple tokens and
/// can have escape sequences in them in addition to the usual trigraph and
/// escaped newline business. This routine handles this complexity.
///
SourceLocation StringLiteral::
getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const LangOptions &Features, const TargetInfo &Target) const {
assert((Kind == StringLiteral::Ascii || Kind == StringLiteral::UTF8) &&
"Only narrow string literals are currently supported");
// Loop over all of the tokens in this string until we find the one that
// contains the byte we're looking for.
unsigned TokNo = 0;
while (1) {
assert(TokNo < getNumConcatenated() && "Invalid byte number!");
SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
// Get the spelling of the string so that we can get the data that makes up
// the string literal, not the identifier for the macro it is potentially
// expanded through.
SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
// Re-lex the token to get its length and original spelling.
std::pair<FileID, unsigned> LocInfo =SM.getDecomposedLoc(StrTokSpellingLoc);
bool Invalid = false;
StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
if (Invalid)
return StrTokSpellingLoc;
const char *StrData = Buffer.data()+LocInfo.second;
// Create a lexer starting at the beginning of this token.
Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), Features,
Buffer.begin(), StrData, Buffer.end());
Token TheTok;
TheLexer.LexFromRawLexer(TheTok);
// Use the StringLiteralParser to compute the length of the string in bytes.
StringLiteralParser SLP(TheTok, SM, Features, Target);
unsigned TokNumBytes = SLP.GetStringLength();
// If the byte is in this token, return the location of the byte.
if (ByteNo < TokNumBytes ||
(ByteNo == TokNumBytes && TokNo == getNumConcatenated() - 1)) {
unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
// Now that we know the offset of the token in the spelling, use the
// preprocessor to get the offset in the original source.
return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features);
}
// Move to the next string token.
++TokNo;
ByteNo -= TokNumBytes;
}
}
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "sizeof" or "[pre]++".
StringRef UnaryOperator::getOpcodeStr(Opcode Op) {
switch (Op) {
case UO_PostInc: return "++";
case UO_PostDec: return "--";
case UO_PreInc: return "++";
case UO_PreDec: return "--";
case UO_AddrOf: return "&";
case UO_Deref: return "*";
case UO_Plus: return "+";
case UO_Minus: return "-";
case UO_Not: return "~";
case UO_LNot: return "!";
case UO_Real: return "__real";
case UO_Imag: return "__imag";
case UO_Extension: return "__extension__";
}
llvm_unreachable("Unknown unary operator");
}
UnaryOperatorKind
UnaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix) {
switch (OO) {
default: llvm_unreachable("No unary operator for overloaded function");
case OO_PlusPlus: return Postfix ? UO_PostInc : UO_PreInc;
case OO_MinusMinus: return Postfix ? UO_PostDec : UO_PreDec;
case OO_Amp: return UO_AddrOf;
case OO_Star: return UO_Deref;
case OO_Plus: return UO_Plus;
case OO_Minus: return UO_Minus;
case OO_Tilde: return UO_Not;
case OO_Exclaim: return UO_LNot;
}
}
OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
switch (Opc) {
case UO_PostInc: case UO_PreInc: return OO_PlusPlus;
case UO_PostDec: case UO_PreDec: return OO_MinusMinus;
case UO_AddrOf: return OO_Amp;
case UO_Deref: return OO_Star;
case UO_Plus: return OO_Plus;
case UO_Minus: return OO_Minus;
case UO_Not: return OO_Tilde;
case UO_LNot: return OO_Exclaim;
default: return OO_None;
}
}
//===----------------------------------------------------------------------===//
// Postfix Operators.
//===----------------------------------------------------------------------===//
CallExpr::CallExpr(const ASTContext& C, StmtClass SC, Expr *fn,
unsigned NumPreArgs, ArrayRef<Expr*> args, QualType t,
ExprValueKind VK, SourceLocation rparenloc)
: Expr(SC, t, VK, OK_Ordinary,
fn->isTypeDependent(),
fn->isValueDependent(),
fn->isInstantiationDependent(),
fn->containsUnexpandedParameterPack()),
NumArgs(args.size()) {
SubExprs = new (C) Stmt*[args.size()+PREARGS_START+NumPreArgs];
SubExprs[FN] = fn;
for (unsigned i = 0; i != args.size(); ++i) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
if (args[i]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
SubExprs[i+PREARGS_START+NumPreArgs] = args[i];
}
CallExprBits.NumPreArgs = NumPreArgs;
RParenLoc = rparenloc;
}
CallExpr::CallExpr(const ASTContext &C, Expr *fn, ArrayRef<Expr *> args,
QualType t, ExprValueKind VK, SourceLocation rparenloc)
: CallExpr(C, CallExprClass, fn, /*NumPreArgs=*/0, args, t, VK, rparenloc) {
}
CallExpr::CallExpr(const ASTContext &C, StmtClass SC, EmptyShell Empty)
: CallExpr(C, SC, /*NumPreArgs=*/0, Empty) {}
CallExpr::CallExpr(const ASTContext &C, StmtClass SC, unsigned NumPreArgs,
EmptyShell Empty)
: Expr(SC, Empty), SubExprs(nullptr), NumArgs(0) {
// FIXME: Why do we allocate this?
SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs];
CallExprBits.NumPreArgs = NumPreArgs;
}
Decl *CallExpr::getCalleeDecl() {
Expr *CEE = getCallee()->IgnoreParenImpCasts();
while (SubstNonTypeTemplateParmExpr *NTTP
= dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
CEE = NTTP->getReplacement()->IgnoreParenCasts();
}
// If we're calling a dereference, look at the pointer instead.
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
if (BO->isPtrMemOp())
CEE = BO->getRHS()->IgnoreParenCasts();
} else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
if (UO->getOpcode() == UO_Deref)
CEE = UO->getSubExpr()->IgnoreParenCasts();
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
return DRE->getDecl();
if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
return ME->getMemberDecl();
return nullptr;
}
FunctionDecl *CallExpr::getDirectCallee() {
return dyn_cast_or_null<FunctionDecl>(getCalleeDecl());
}
/// setNumArgs - This changes the number of arguments present in this call.
/// Any orphaned expressions are deleted by this, and any new operands are set
/// to null.
void CallExpr::setNumArgs(const ASTContext& C, unsigned NumArgs) {
// No change, just return.
if (NumArgs == getNumArgs()) return;
// If shrinking # arguments, just delete the extras and forgot them.
if (NumArgs < getNumArgs()) {
this->NumArgs = NumArgs;
return;
}
// Otherwise, we are growing the # arguments. New an bigger argument array.
unsigned NumPreArgs = getNumPreArgs();
Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs];
// Copy over args.
for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i)
NewSubExprs[i] = SubExprs[i];
// Null out new args.
for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs;
i != NumArgs+PREARGS_START+NumPreArgs; ++i)
NewSubExprs[i] = nullptr;
if (SubExprs) C.Deallocate(SubExprs);
SubExprs = NewSubExprs;
this->NumArgs = NumArgs;
}
/// getBuiltinCallee - If this is a call to a builtin, return the builtin ID. If
/// not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
// All simple function calls (e.g. func()) are implicitly cast to pointer to
// function. As a result, we try and obtain the DeclRefExpr from the
// ImplicitCastExpr.
const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee());
if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
return 0;
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
if (!DRE)
return 0;
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
if (!FDecl)
return 0;
if (!FDecl->getIdentifier())
return 0;
return FDecl->getBuiltinID();
}
bool CallExpr::isUnevaluatedBuiltinCall(const ASTContext &Ctx) const {
if (unsigned BI = getBuiltinCallee())
return Ctx.BuiltinInfo.isUnevaluated(BI);
return false;
}
QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const {
const Expr *Callee = getCallee();
QualType CalleeType = Callee->getType();
if (const auto *FnTypePtr = CalleeType->getAs<PointerType>()) {
CalleeType = FnTypePtr->getPointeeType();
} else if (const auto *BPT = CalleeType->getAs<BlockPointerType>()) {
CalleeType = BPT->getPointeeType();
} else if (CalleeType->isSpecificPlaceholderType(BuiltinType::BoundMember)) {
if (isa<CXXPseudoDestructorExpr>(Callee->IgnoreParens()))
return Ctx.VoidTy;
// This should never be overloaded and so should never return null.
CalleeType = Expr::findBoundMemberType(Callee);
}
const FunctionType *FnType = CalleeType->castAs<FunctionType>();
return FnType->getReturnType();
}
SourceLocation CallExpr::getLocStart() const {
if (isa<CXXOperatorCallExpr>(this))
return cast<CXXOperatorCallExpr>(this)->getLocStart();
SourceLocation begin = getCallee()->getLocStart();
if (begin.isInvalid() && getNumArgs() > 0 && getArg(0))
begin = getArg(0)->getLocStart();
return begin;
}
SourceLocation CallExpr::getLocEnd() const {
if (isa<CXXOperatorCallExpr>(this))
return cast<CXXOperatorCallExpr>(this)->getLocEnd();
SourceLocation end = getRParenLoc();
if (end.isInvalid() && getNumArgs() > 0 && getArg(getNumArgs() - 1))
end = getArg(getNumArgs() - 1)->getLocEnd();
return end;
}
OffsetOfExpr *OffsetOfExpr::Create(const ASTContext &C, QualType type,
SourceLocation OperatorLoc,
TypeSourceInfo *tsi,
ArrayRef<OffsetOfNode> comps,
ArrayRef<Expr*> exprs,
SourceLocation RParenLoc) {
void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
sizeof(OffsetOfNode) * comps.size() +
sizeof(Expr*) * exprs.size());
return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, comps, exprs,
RParenLoc);
}
OffsetOfExpr *OffsetOfExpr::CreateEmpty(const ASTContext &C,
unsigned numComps, unsigned numExprs) {
void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
sizeof(OffsetOfNode) * numComps +
sizeof(Expr*) * numExprs);
return new (Mem) OffsetOfExpr(numComps, numExprs);
}
OffsetOfExpr::OffsetOfExpr(const ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
ArrayRef<OffsetOfNode> comps, ArrayRef<Expr*> exprs,
SourceLocation RParenLoc)
: Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
/*TypeDependent=*/false,
/*ValueDependent=*/tsi->getType()->isDependentType(),
tsi->getType()->isInstantiationDependentType(),
tsi->getType()->containsUnexpandedParameterPack()),
OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
NumComps(comps.size()), NumExprs(exprs.size())
{
for (unsigned i = 0; i != comps.size(); ++i) {
setComponent(i, comps[i]);
}
for (unsigned i = 0; i != exprs.size(); ++i) {
if (exprs[i]->isTypeDependent() || exprs[i]->isValueDependent())
ExprBits.ValueDependent = true;
if (exprs[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
setIndexExpr(i, exprs[i]);
}
}
IdentifierInfo *OffsetOfExpr::OffsetOfNode::getFieldName() const {
assert(getKind() == Field || getKind() == Identifier);
if (getKind() == Field)
return getField()->getIdentifier();
return reinterpret_cast<IdentifierInfo *> (Data & ~(uintptr_t)Mask);
}
UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
UnaryExprOrTypeTrait ExprKind, Expr *E, QualType resultType,
SourceLocation op, SourceLocation rp)
: Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary,
false, // Never type-dependent (C++ [temp.dep.expr]p3).
// Value-dependent if the argument is type-dependent.
E->isTypeDependent(), E->isInstantiationDependent(),
E->containsUnexpandedParameterPack()),
OpLoc(op), RParenLoc(rp) {
UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
UnaryExprOrTypeTraitExprBits.IsType = false;
Argument.Ex = E;
// Check to see if we are in the situation where alignof(decl) should be
// dependent because decl's alignment is dependent.
if (ExprKind == UETT_AlignOf) {
if (!isValueDependent() || !isInstantiationDependent()) {
E = E->IgnoreParens();
const ValueDecl *D = nullptr;
if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
D = DRE->getDecl();
else if (const auto *ME = dyn_cast<MemberExpr>(E))
D = ME->getMemberDecl();
if (D) {
for (const auto *I : D->specific_attrs<AlignedAttr>()) {
if (I->isAlignmentDependent()) {
setValueDependent(true);
setInstantiationDependent(true);
break;
}
}
}
}
}
}
MemberExpr *MemberExpr::Create(
const ASTContext &C, Expr *base, bool isarrow, SourceLocation OperatorLoc,
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
ValueDecl *memberdecl, DeclAccessPair founddecl,
DeclarationNameInfo nameinfo, const TemplateArgumentListInfo *targs,
QualType ty, ExprValueKind vk, ExprObjectKind ok) {
std::size_t Size = sizeof(MemberExpr);
bool hasQualOrFound = (QualifierLoc ||
founddecl.getDecl() != memberdecl ||
founddecl.getAccess() != memberdecl->getAccess());
if (hasQualOrFound)
Size += sizeof(MemberNameQualifier);
if (targs)
Size += ASTTemplateKWAndArgsInfo::sizeFor(targs->size());
else if (TemplateKWLoc.isValid())
Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
MemberExpr *E = new (Mem)
MemberExpr(base, isarrow, OperatorLoc, memberdecl, nameinfo, ty, vk, ok);
if (hasQualOrFound) {
// FIXME: Wrong. We should be looking at the member declaration we found.
if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
E->setValueDependent(true);
E->setTypeDependent(true);
E->setInstantiationDependent(true);
}
else if (QualifierLoc &&
QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
E->setInstantiationDependent(true);
E->HasQualifierOrFoundDecl = true;
MemberNameQualifier *NQ = E->getMemberQualifier();
NQ->QualifierLoc = QualifierLoc;
NQ->FoundDecl = founddecl;
}
E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid());
if (targs) {
bool Dependent = false;
bool InstantiationDependent = false;
bool ContainsUnexpandedParameterPack = false;
E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *targs,
Dependent,
InstantiationDependent,
ContainsUnexpandedParameterPack);
if (InstantiationDependent)
E->setInstantiationDependent(true);
} else if (TemplateKWLoc.isValid()) {
E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
}
return E;
}
SourceLocation MemberExpr::getLocStart() const {
if (isImplicitAccess()) {
if (hasQualifier())
return getQualifierLoc().getBeginLoc();
return MemberLoc;
}
// FIXME: We don't want this to happen. Rather, we should be able to
// detect all kinds of implicit accesses more cleanly.
SourceLocation BaseStartLoc = getBase()->getLocStart();
if (BaseStartLoc.isValid())
return BaseStartLoc;
return MemberLoc;
}
SourceLocation MemberExpr::getLocEnd() const {
SourceLocation EndLoc = getMemberNameInfo().getEndLoc();
if (hasExplicitTemplateArgs())
EndLoc = getRAngleLoc();
else if (EndLoc.isInvalid())
EndLoc = getBase()->getLocEnd();
return EndLoc;
}
bool CastExpr::CastConsistency() const {
switch (getCastKind()) {
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerived:
case CK_BaseToDerivedMemberPointer:
assert(!path_empty() && "Cast kind should have a base path!");
break;
case CK_CPointerToObjCPointerCast:
assert(getType()->isObjCObjectPointerType());
assert(getSubExpr()->getType()->isPointerType());
goto CheckNoBasePath;
case CK_BlockPointerToObjCPointerCast:
assert(getType()->isObjCObjectPointerType());
assert(getSubExpr()->getType()->isBlockPointerType());
goto CheckNoBasePath;
case CK_ReinterpretMemberPointer:
assert(getType()->isMemberPointerType());
assert(getSubExpr()->getType()->isMemberPointerType());
goto CheckNoBasePath;
case CK_BitCast:
// Arbitrary casts to C pointer types count as bitcasts.
// Otherwise, we should only have block and ObjC pointer casts
// here if they stay within the type kind.
if (!getType()->isPointerType()) {
assert(getType()->isObjCObjectPointerType() ==
getSubExpr()->getType()->isObjCObjectPointerType());
assert(getType()->isBlockPointerType() ==
getSubExpr()->getType()->isBlockPointerType());
}
goto CheckNoBasePath;
case CK_AnyPointerToBlockPointerCast:
assert(getType()->isBlockPointerType());
assert(getSubExpr()->getType()->isAnyPointerType() &&
!getSubExpr()->getType()->isBlockPointerType());
goto CheckNoBasePath;
case CK_CopyAndAutoreleaseBlockObject:
assert(getType()->isBlockPointerType());
assert(getSubExpr()->getType()->isBlockPointerType());
goto CheckNoBasePath;
case CK_FunctionToPointerDecay:
assert(getType()->isPointerType());
assert(getSubExpr()->getType()->isFunctionType());
goto CheckNoBasePath;
case CK_AddressSpaceConversion:
assert(getType()->isPointerType());
assert(getSubExpr()->getType()->isPointerType());
assert(getType()->getPointeeType().getAddressSpace() !=
getSubExpr()->getType()->getPointeeType().getAddressSpace());
goto CheckNoBasePath;
// These should not have an inheritance path.
case CK_Dynamic:
case CK_ToUnion:
case CK_ArrayToPointerDecay:
case CK_NullToMemberPointer:
case CK_NullToPointer:
case CK_ConstructorConversion:
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_ToVoid:
case CK_VectorSplat:
case CK_IntegralCast:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
case CK_FloatingCast:
case CK_ObjCObjectLValueCast:
case CK_FloatingRealToComplex:
case CK_FloatingComplexToReal:
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
case CK_IntegralRealToComplex:
case CK_IntegralComplexToReal:
case CK_IntegralComplexCast:
case CK_IntegralComplexToFloatingComplex:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_ZeroToOCLEvent:
assert(!getType()->isBooleanType() && "unheralded conversion to bool");
goto CheckNoBasePath;
case CK_Dependent:
case CK_LValueToRValue:
case CK_NoOp:
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
case CK_PointerToBoolean:
case CK_IntegralToBoolean:
case CK_FloatingToBoolean:
case CK_MemberPointerToBoolean:
case CK_FloatingComplexToBoolean:
case CK_IntegralComplexToBoolean:
case CK_LValueBitCast: // -> bool&
case CK_UserDefinedConversion: // operator bool()
case CK_BuiltinFnToFnPtr:
CheckNoBasePath:
assert(path_empty() && "Cast kind should not have a base path!");
break;
}
return true;
}
const char *CastExpr::getCastKindName() const {
switch (getCastKind()) {
case CK_Dependent:
return "Dependent";
case CK_BitCast:
return "BitCast";
case CK_LValueBitCast:
return "LValueBitCast";
case CK_LValueToRValue:
return "LValueToRValue";
case CK_NoOp:
return "NoOp";
case CK_BaseToDerived:
return "BaseToDerived";
case CK_DerivedToBase:
return "DerivedToBase";
case CK_UncheckedDerivedToBase:
return "UncheckedDerivedToBase";
case CK_Dynamic:
return "Dynamic";
case CK_ToUnion:
return "ToUnion";
case CK_ArrayToPointerDecay:
return "ArrayToPointerDecay";
case CK_FunctionToPointerDecay:
return "FunctionToPointerDecay";
case CK_NullToMemberPointer:
return "NullToMemberPointer";
case CK_NullToPointer:
return "NullToPointer";
case CK_BaseToDerivedMemberPointer:
return "BaseToDerivedMemberPointer";
case CK_DerivedToBaseMemberPointer:
return "DerivedToBaseMemberPointer";
case CK_ReinterpretMemberPointer:
return "ReinterpretMemberPointer";
case CK_UserDefinedConversion:
return "UserDefinedConversion";
case CK_ConstructorConversion:
return "ConstructorConversion";
case CK_IntegralToPointer:
return "IntegralToPointer";
case CK_PointerToIntegral:
return "PointerToIntegral";
case CK_PointerToBoolean:
return "PointerToBoolean";
case CK_ToVoid:
return "ToVoid";
case CK_VectorSplat:
return "VectorSplat";
case CK_IntegralCast:
return "IntegralCast";
case CK_IntegralToBoolean:
return "IntegralToBoolean";
case CK_IntegralToFloating:
return "IntegralToFloating";
case CK_FloatingToIntegral:
return "FloatingToIntegral";
case CK_FloatingCast:
return "FloatingCast";
case CK_FloatingToBoolean:
return "FloatingToBoolean";
case CK_MemberPointerToBoolean:
return "MemberPointerToBoolean";
case CK_CPointerToObjCPointerCast:
return "CPointerToObjCPointerCast";
case CK_BlockPointerToObjCPointerCast:
return "BlockPointerToObjCPointerCast";
case CK_AnyPointerToBlockPointerCast:
return "AnyPointerToBlockPointerCast";
case CK_ObjCObjectLValueCast:
return "ObjCObjectLValueCast";
case CK_FloatingRealToComplex:
return "FloatingRealToComplex";
case CK_FloatingComplexToReal:
return "FloatingComplexToReal";
case CK_FloatingComplexToBoolean:
return "FloatingComplexToBoolean";
case CK_FloatingComplexCast:
return "FloatingComplexCast";
case CK_FloatingComplexToIntegralComplex:
return "FloatingComplexToIntegralComplex";
case CK_IntegralRealToComplex:
return "IntegralRealToComplex";
case CK_IntegralComplexToReal:
return "IntegralComplexToReal";
case CK_IntegralComplexToBoolean:
return "IntegralComplexToBoolean";
case CK_IntegralComplexCast:
return "IntegralComplexCast";
case CK_IntegralComplexToFloatingComplex:
return "IntegralComplexToFloatingComplex";
case CK_ARCConsumeObject:
return "ARCConsumeObject";
case CK_ARCProduceObject:
return "ARCProduceObject";
case CK_ARCReclaimReturnedObject:
return "ARCReclaimReturnedObject";
case CK_ARCExtendBlockObject:
return "ARCExtendBlockObject";
case CK_AtomicToNonAtomic:
return "AtomicToNonAtomic";
case CK_NonAtomicToAtomic:
return "NonAtomicToAtomic";
case CK_CopyAndAutoreleaseBlockObject:
return "CopyAndAutoreleaseBlockObject";
case CK_BuiltinFnToFnPtr:
return "BuiltinFnToFnPtr";
case CK_ZeroToOCLEvent:
return "ZeroToOCLEvent";
case CK_AddressSpaceConversion:
return "AddressSpaceConversion";
// HLSL Change Starts
case CK_FlatConversion:
return "FlatConversion";
case CK_HLSLVectorSplat:
return "HLSLVectorSplat";
case CK_HLSLMatrixSplat:
return "HLSLMatrixSplat";
case CK_HLSLVectorToScalarCast:
return "HLSLVectorToScalarCast";
case CK_HLSLMatrixToScalarCast:
return "HLSLMatrixToScalarCast";
case CK_HLSLVectorTruncationCast:
return "HLSLVectorTruncationCast";
case CK_HLSLMatrixTruncationCast:
return "HLSLMatrixTruncationCast";
case CK_HLSLVectorToMatrixCast:
return "HLSLVectorToMatrixCast";
case CK_HLSLMatrixToVectorCast:
return "HLSLMatrixToVectorCast";
case CK_HLSLDerivedToBase:
return "HLSLDerivedToBase";
case CK_HLSLCC_IntegralCast:
return "HLSLCC_IntegralCast";
case CK_HLSLCC_IntegralToBoolean:
return "HLSLCC_IntegralToBoolean";
case CK_HLSLCC_IntegralToFloating:
return "HLSLCC_IntegralToFloating";
case CK_HLSLCC_FloatingToIntegral:
return "HLSLCC_FloatingToIntegral";
case CK_HLSLCC_FloatingToBoolean:
return "HLSLCC_FloatingToBoolean";
case CK_HLSLCC_FloatingCast:
return "HLSLCC_FloatingCast";
// HLSL Change Ends
}
llvm_unreachable("Unhandled cast kind!");
}
Expr *CastExpr::getSubExprAsWritten() {
Expr *SubExpr = nullptr;
CastExpr *E = this;
do {
SubExpr = E->getSubExpr();
// Skip through reference binding to temporary.
if (MaterializeTemporaryExpr *Materialize
= dyn_cast<MaterializeTemporaryExpr>(SubExpr))
SubExpr = Materialize->GetTemporaryExpr();
// Skip any temporary bindings; they're implicit.
if (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(SubExpr))
SubExpr = Binder->getSubExpr();
// Conversions by constructor and conversion functions have a
// subexpression describing the call; strip it off.
if (E->getCastKind() == CK_ConstructorConversion)
SubExpr = cast<CXXConstructExpr>(SubExpr)->getArg(0);
else if (E->getCastKind() == CK_UserDefinedConversion)
SubExpr = cast<CXXMemberCallExpr>(SubExpr)->getImplicitObjectArgument();
// If the subexpression we're left with is an implicit cast, look
// through that, too.
} while ((E = dyn_cast<ImplicitCastExpr>(SubExpr)));
return SubExpr;
}
CXXBaseSpecifier **CastExpr::path_buffer() {
switch (getStmtClass()) {
#define ABSTRACT_STMT(x)
#define CASTEXPR(Type, Base) \
case Stmt::Type##Class: \
return reinterpret_cast<CXXBaseSpecifier**>(static_cast<Type*>(this)+1);
#define STMT(Type, Base)
#include "clang/AST/StmtNodes.inc"
default:
llvm_unreachable("non-cast expressions not possible here");
}
}
void CastExpr::setCastPath(const CXXCastPath &Path) {
assert(Path.size() == path_size());
memcpy(path_buffer(), Path.data(), Path.size() * sizeof(CXXBaseSpecifier*));
}
ImplicitCastExpr *ImplicitCastExpr::Create(const ASTContext &C, QualType T,
CastKind Kind, Expr *Operand,
const CXXCastPath *BasePath,
ExprValueKind VK) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer =
C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
ImplicitCastExpr *E =
new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
void *Buffer =
C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize);
}
CStyleCastExpr *CStyleCastExpr::Create(const ASTContext &C, QualType T,
ExprValueKind VK, CastKind K, Expr *Op,
const CXXCastPath *BasePath,
TypeSourceInfo *WrittenTy,
SourceLocation L, SourceLocation R) {
unsigned PathSize = (BasePath ? BasePath->size() : 0);
void *Buffer =
C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
CStyleCastExpr *E =
new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
if (PathSize) E->setCastPath(*BasePath);
return E;
}
CStyleCastExpr *CStyleCastExpr::CreateEmpty(const ASTContext &C,
unsigned PathSize) {
void *Buffer =
C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize);
}
/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
/// corresponds to, e.g. "<<=".
StringRef BinaryOperator::getOpcodeStr(Opcode Op) {
switch (Op) {
case BO_PtrMemD: return ".*";
case BO_PtrMemI: return "->*";
case BO_Mul: return "*";
case BO_Div: return "/";
case BO_Rem: return "%";
case BO_Add: return "+";
case BO_Sub: return "-";
case BO_Shl: return "<<";
case BO_Shr: return ">>";
case BO_LT: return "<";
case BO_GT: return ">";
case BO_LE: return "<=";
case BO_GE: return ">=";
case BO_EQ: return "==";
case BO_NE: return "!=";
case BO_And: return "&";
case BO_Xor: return "^";
case BO_Or: return "|";
case BO_LAnd: return "&&";
case BO_LOr: return "||";
case BO_Assign: return "=";
case BO_MulAssign: return "*=";
case BO_DivAssign: return "/=";
case BO_RemAssign: return "%=";
case BO_AddAssign: return "+=";
case BO_SubAssign: return "-=";
case BO_ShlAssign: return "<<=";
case BO_ShrAssign: return ">>=";
case BO_AndAssign: return "&=";
case BO_XorAssign: return "^=";
case BO_OrAssign: return "|=";
case BO_Comma: return ",";
}
llvm_unreachable("Invalid OpCode!");
}
BinaryOperatorKind
BinaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO) {
switch (OO) {
default: llvm_unreachable("Not an overloadable binary operator");
case OO_Plus: return BO_Add;
case OO_Minus: return BO_Sub;
case OO_Star: return BO_Mul;
case OO_Slash: return BO_Div;
case OO_Percent: return BO_Rem;
case OO_Caret: return BO_Xor;
case OO_Amp: return BO_And;
case OO_Pipe: return BO_Or;
case OO_Equal: return BO_Assign;
case OO_Less: return BO_LT;
case OO_Greater: return BO_GT;
case OO_PlusEqual: return BO_AddAssign;
case OO_MinusEqual: return BO_SubAssign;
case OO_StarEqual: return BO_MulAssign;
case OO_SlashEqual: return BO_DivAssign;
case OO_PercentEqual: return BO_RemAssign;
case OO_CaretEqual: return BO_XorAssign;
case OO_AmpEqual: return BO_AndAssign;
case OO_PipeEqual: return BO_OrAssign;
case OO_LessLess: return BO_Shl;
case OO_GreaterGreater: return BO_Shr;
case OO_LessLessEqual: return BO_ShlAssign;
case OO_GreaterGreaterEqual: return BO_ShrAssign;
case OO_EqualEqual: return BO_EQ;
case OO_ExclaimEqual: return BO_NE;
case OO_LessEqual: return BO_LE;
case OO_GreaterEqual: return BO_GE;
case OO_AmpAmp: return BO_LAnd;
case OO_PipePipe: return BO_LOr;
case OO_Comma: return BO_Comma;
case OO_ArrowStar: return BO_PtrMemI;
}
}
OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
static const OverloadedOperatorKind OverOps[] = {
/* .* Cannot be overloaded */OO_None, OO_ArrowStar,
OO_Star, OO_Slash, OO_Percent,
OO_Plus, OO_Minus,
OO_LessLess, OO_GreaterGreater,
OO_Less, OO_Greater, OO_LessEqual, OO_GreaterEqual,
OO_EqualEqual, OO_ExclaimEqual,
OO_Amp,
OO_Caret,
OO_Pipe,
OO_AmpAmp,
OO_PipePipe,
OO_Equal, OO_StarEqual,
OO_SlashEqual, OO_PercentEqual,
OO_PlusEqual, OO_MinusEqual,
OO_LessLessEqual, OO_GreaterGreaterEqual,
OO_AmpEqual, OO_CaretEqual,
OO_PipeEqual,
OO_Comma
};
return OverOps[Opc];
}
InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
ArrayRef<Expr*> initExprs, SourceLocation rbraceloc)
: Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
false, false),
InitExprs(C, initExprs.size()),
LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), AltForm(nullptr, true)
{
sawArrayRangeDesignator(false);
sawVectorInitWithCXXFunctionalCastExpr(false); // HLSL change.
for (unsigned I = 0; I != initExprs.size(); ++I) {
if (initExprs[I]->isTypeDependent())
ExprBits.TypeDependent = true;
if (initExprs[I]->isValueDependent())
ExprBits.ValueDependent = true;
if (initExprs[I]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (initExprs[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
}
InitExprs.insert(C, InitExprs.end(), initExprs.begin(), initExprs.end());
}
void InitListExpr::reserveInits(const ASTContext &C, unsigned NumInits) {
if (NumInits > InitExprs.size())
InitExprs.reserve(C, NumInits);
}
void InitListExpr::resizeInits(const ASTContext &C, unsigned NumInits) {
InitExprs.resize(C, NumInits, nullptr);
}
Expr *InitListExpr::updateInit(const ASTContext &C, unsigned Init, Expr *expr) {
if (Init >= InitExprs.size()) {
InitExprs.insert(C, InitExprs.end(), Init - InitExprs.size() + 1, nullptr);
setInit(Init, expr);
return nullptr;
}
Expr *Result = cast_or_null<Expr>(InitExprs[Init]);
setInit(Init, expr);
return Result;
}
void InitListExpr::setArrayFiller(Expr *filler) {
assert(!hasArrayFiller() && "Filler already set!");
ArrayFillerOrUnionFieldInit = filler;
// Fill out any "holes" in the array due to designated initializers.
Expr **inits = getInits();
for (unsigned i = 0, e = getNumInits(); i != e; ++i)
if (inits[i] == nullptr)
inits[i] = filler;
}
bool InitListExpr::isStringLiteralInit() const {
if (getNumInits() != 1)
return false;
const ArrayType *AT = getType()->getAsArrayTypeUnsafe();
if (!AT || !AT->getElementType()->isIntegerType())
return false;
// It is possible for getInit() to return null.
const Expr *Init = getInit(0);
if (!Init)
return false;
Init = Init->IgnoreParens();
return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
}
SourceLocation InitListExpr::getLocStart() const {
if (InitListExpr *SyntacticForm = getSyntacticForm())
return SyntacticForm->getLocStart();
SourceLocation Beg = LBraceLoc;
if (Beg.isInvalid()) {
// Find the first non-null initializer.
for (InitExprsTy::const_iterator I = InitExprs.begin(),
E = InitExprs.end();
I != E; ++I) {
if (Stmt *S = *I) {
Beg = S->getLocStart();
break;
}
}
}
return Beg;
}
SourceLocation InitListExpr::getLocEnd() const {
if (InitListExpr *SyntacticForm = getSyntacticForm())
return SyntacticForm->getLocEnd();
SourceLocation End = RBraceLoc;
if (End.isInvalid()) {
// Find the first non-null initializer from the end.
for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(),
E = InitExprs.rend();
I != E; ++I) {
if (Stmt *S = *I) {
End = S->getLocEnd();
break;
}
}
}
return End;
}
/// getFunctionType - Return the underlying function type for this block.
///
const FunctionProtoType *BlockExpr::getFunctionType() const {
// The block pointer is never sugared, but the function type might be.
return cast<BlockPointerType>(getType())
->getPointeeType()->castAs<FunctionProtoType>();
}
SourceLocation BlockExpr::getCaretLocation() const {
return TheBlock->getCaretLocation();
}
const Stmt *BlockExpr::getBody() const {
return TheBlock->getBody();
}
Stmt *BlockExpr::getBody() {
return TheBlock->getBody();
}
//===----------------------------------------------------------------------===//
// Generic Expression Routines
//===----------------------------------------------------------------------===//
/// isUnusedResultAWarning - Return true if this immediate expression should
/// be warned about if the result is unused. If so, fill in Loc and Ranges
/// with location to warn on and the source range[s] to report with the
/// warning.
bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
SourceRange &R1, SourceRange &R2,
ASTContext &Ctx) const {
// Don't warn if the expr is type dependent. The type could end up
// instantiating to void.
if (isTypeDependent())
return false;
switch (getStmtClass()) {
default:
if (getType()->isVoidType())
return false;
WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
case ParenExprClass:
return cast<ParenExpr>(this)->getSubExpr()->
isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case GenericSelectionExprClass:
return cast<GenericSelectionExpr>(this)->getResultExpr()->
isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case ChooseExprClass:
return cast<ChooseExpr>(this)->getChosenSubExpr()->
isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(this);
switch (UO->getOpcode()) {
case UO_Plus:
case UO_Minus:
case UO_AddrOf:
case UO_Not:
case UO_LNot:
case UO_Deref:
break;
case UO_PostInc:
case UO_PostDec:
case UO_PreInc:
case UO_PreDec: // ++/--
return false; // Not a warning.
case UO_Real:
case UO_Imag:
// accessing a piece of a volatile complex is a side-effect.
if (Ctx.getCanonicalType(UO->getSubExpr()->getType())
.isVolatileQualified())
return false;
break;
case UO_Extension:
return UO->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
WarnE = this;
Loc = UO->getOperatorLoc();
R1 = UO->getSubExpr()->getSourceRange();
return true;
}
case BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(this);
switch (BO->getOpcode()) {
default:
break;
// Consider the RHS of comma for side effects. LHS was checked by
// Sema::CheckCommaOperands.
case BO_Comma:
// ((foo = <blah>), 0) is an idiom for hiding the result (and
// lvalue-ness) of an assignment written in a macro.
if (IntegerLiteral *IE =
dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens()))
if (IE->getValue() == 0)
return false;
return BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
// Consider '||', '&&' to have side effects if the LHS or RHS does.
case BO_LAnd:
case BO_LOr:
if (!BO->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx) ||
!BO->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx))
return false;
break;
}
if (BO->isAssignmentOp())
return false;
WarnE = this;
Loc = BO->getOperatorLoc();
R1 = BO->getLHS()->getSourceRange();
R2 = BO->getRHS()->getSourceRange();
return true;
}
case CompoundAssignOperatorClass:
case VAArgExprClass:
case AtomicExprClass:
return false;
case ConditionalOperatorClass: {
// If only one of the LHS or RHS is a warning, the operator might
// be being used for control flow. Only warn if both the LHS and
// RHS are warnings.
const ConditionalOperator *Exp = cast<ConditionalOperator>(this);
if (!Exp->getRHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx))
return false;
if (!Exp->getLHS())
return true;
return Exp->getLHS()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
case MemberExprClass:
WarnE = this;
Loc = cast<MemberExpr>(this)->getMemberLoc();
R1 = SourceRange(Loc, Loc);
R2 = cast<MemberExpr>(this)->getBase()->getSourceRange();
return true;
case ArraySubscriptExprClass:
WarnE = this;
Loc = cast<ArraySubscriptExpr>(this)->getRBracketLoc();
R1 = cast<ArraySubscriptExpr>(this)->getLHS()->getSourceRange();
R2 = cast<ArraySubscriptExpr>(this)->getRHS()->getSourceRange();
return true;
case CXXOperatorCallExprClass: {
// Warn about operator ==,!=,<,>,<=, and >= even when user-defined operator
// overloads as there is no reasonable way to define these such that they
// have non-trivial, desirable side-effects. See the -Wunused-comparison
// warning: operators == and != are commonly typo'ed, and so warning on them
// provides additional value as well. If this list is updated,
// DiagnoseUnusedComparison should be as well.
const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this);
switch (Op->getOperator()) {
default:
break;
case OO_EqualEqual:
case OO_ExclaimEqual:
case OO_Less:
case OO_Greater:
case OO_GreaterEqual:
case OO_LessEqual:
if (Op->getCallReturnType(Ctx)->isReferenceType() ||
Op->getCallReturnType(Ctx)->isVoidType())
break;
WarnE = this;
Loc = Op->getOperatorLoc();
R1 = Op->getSourceRange();
return true;
}
// Fallthrough for generic call handling.
LLVM_FALLTHROUGH; // HLSL Change
}
case CallExprClass:
case CXXMemberCallExprClass:
case UserDefinedLiteralClass: {
// If this is a direct call, get the callee.
const CallExpr *CE = cast<CallExpr>(this);
if (const Decl *FD = CE->getCalleeDecl()) {
const FunctionDecl *Func = dyn_cast<FunctionDecl>(FD);
bool HasWarnUnusedResultAttr = Func ? Func->hasUnusedResultAttr()
: FD->hasAttr<WarnUnusedResultAttr>();
// If the callee has attribute pure, const, or warn_unused_result, warn
// about it. void foo() { strlen("bar"); } should warn.
//
// Note: If new cases are added here, DiagnoseUnusedExprResult should be
// updated to match for QoI.
if (HasWarnUnusedResultAttr ||
FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>()) {
WarnE = this;
Loc = CE->getCallee()->getLocStart();
R1 = CE->getCallee()->getSourceRange();
if (unsigned NumArgs = CE->getNumArgs())
R2 = SourceRange(CE->getArg(0)->getLocStart(),
CE->getArg(NumArgs-1)->getLocEnd());
return true;
}
}
return false;
}
// If we don't know precisely what we're looking at, let's not warn.
case UnresolvedLookupExprClass:
case CXXUnresolvedConstructExprClass:
return false;
case CXXTemporaryObjectExprClass:
case CXXConstructExprClass: {
if (const CXXRecordDecl *Type = getType()->getAsCXXRecordDecl()) {
if (Type->hasAttr<WarnUnusedAttr>()) {
WarnE = this;
Loc = getLocStart();
R1 = getSourceRange();
return true;
}
}
return false;
}
case ObjCMessageExprClass: {
const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this);
if (Ctx.getLangOpts().ObjCAutoRefCount &&
ME->isInstanceMessage() &&
!ME->getType()->isVoidType() &&
ME->getMethodFamily() == OMF_init) {
WarnE = this;
Loc = getExprLoc();
R1 = ME->getSourceRange();
return true;
}
if (const ObjCMethodDecl *MD = ME->getMethodDecl())
if (MD->hasAttr<WarnUnusedResultAttr>()) {
WarnE = this;
Loc = getExprLoc();
return true;
}
return false;
}
case ObjCPropertyRefExprClass:
WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
case PseudoObjectExprClass: {
const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
// Only complain about things that have the form of a getter.
if (isa<UnaryOperator>(PO->getSyntacticForm()) ||
isa<BinaryOperator>(PO->getSyntacticForm()))
return false;
WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
}
case StmtExprClass: {
// Statement exprs don't logically have side effects themselves, but are
// sometimes used in macros in ways that give them a type that is unused.
// For example ({ blah; foo(); }) will end up with a type if foo has a type.
// however, if the result of the stmt expr is dead, we don't want to emit a
// warning.
const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt();
if (!CS->body_empty()) {
if (const Expr *E = dyn_cast<Expr>(CS->body_back()))
return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
if (const LabelStmt *Label = dyn_cast<LabelStmt>(CS->body_back()))
if (const Expr *E = dyn_cast<Expr>(Label->getSubStmt()))
return E->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
if (getType()->isVoidType())
return false;
WarnE = this;
Loc = cast<StmtExpr>(this)->getLParenLoc();
R1 = getSourceRange();
return true;
}
case CXXFunctionalCastExprClass:
case CStyleCastExprClass: {
// Ignore an explicit cast to void unless the operand is a non-trivial
// volatile lvalue.
const CastExpr *CE = cast<CastExpr>(this);
if (CE->getCastKind() == CK_ToVoid) {
if (CE->getSubExpr()->isGLValue() &&
CE->getSubExpr()->getType().isVolatileQualified()) {
const DeclRefExpr *DRE =
dyn_cast<DeclRefExpr>(CE->getSubExpr()->IgnoreParens());
if (!(DRE && isa<VarDecl>(DRE->getDecl()) &&
cast<VarDecl>(DRE->getDecl())->hasLocalStorage())) {
return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc,
R1, R2, Ctx);
}
}
return false;
}
// If this is a cast to a constructor conversion, check the operand.
// Otherwise, the result of the cast is unused.
if (CE->getCastKind() == CK_ConstructorConversion)
return CE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
WarnE = this;
if (const CXXFunctionalCastExpr *CXXCE =
dyn_cast<CXXFunctionalCastExpr>(this)) {
Loc = CXXCE->getLocStart();
R1 = CXXCE->getSubExpr()->getSourceRange();
} else {
const CStyleCastExpr *CStyleCE = cast<CStyleCastExpr>(this);
Loc = CStyleCE->getLParenLoc();
R1 = CStyleCE->getSubExpr()->getSourceRange();
}
return true;
}
case ImplicitCastExprClass: {
const CastExpr *ICE = cast<ImplicitCastExpr>(this);
// lvalue-to-rvalue conversion on a volatile lvalue is a side-effect.
if (ICE->getCastKind() == CK_LValueToRValue &&
ICE->getSubExpr()->getType().isVolatileQualified())
return false;
return ICE->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
case CXXDefaultArgExprClass:
return (cast<CXXDefaultArgExpr>(this)
->getExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
case CXXDefaultInitExprClass:
return (cast<CXXDefaultInitExpr>(this)
->getExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
case CXXNewExprClass:
// FIXME: In theory, there might be new expressions that don't have side
// effects (e.g. a placement new with an uninitialized POD).
case CXXDeleteExprClass:
return false;
case CXXBindTemporaryExprClass:
return (cast<CXXBindTemporaryExpr>(this)
->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
case ExprWithCleanupsClass:
return (cast<ExprWithCleanups>(this)
->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
}
}
/// isOBJCGCCandidate - Check if an expression is objc gc'able.
/// returns true, if it is; false otherwise.
bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
const Expr *E = IgnoreParens();
switch (E->getStmtClass()) {
default:
return false;
case ObjCIvarRefExprClass:
return true;
case Expr::UnaryOperatorClass:
return cast<UnaryOperator>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
case ImplicitCastExprClass:
return cast<ImplicitCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
case MaterializeTemporaryExprClass:
return cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr()
->isOBJCGCCandidate(Ctx);
case CStyleCastExprClass:
return cast<CStyleCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
case DeclRefExprClass: {
const Decl *D = cast<DeclRefExpr>(E)->getDecl();
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasGlobalStorage())
return true;
QualType T = VD->getType();
// dereferencing to a pointer is always a gc'able candidate,
// unless it is __weak.
return T->isPointerType() &&
(Ctx.getObjCGCAttrKind(T) != Qualifiers::Weak);
}
return false;
}
case MemberExprClass: {
const MemberExpr *M = cast<MemberExpr>(E);
return M->getBase()->isOBJCGCCandidate(Ctx);
}
case ArraySubscriptExprClass:
return cast<ArraySubscriptExpr>(E)->getBase()->isOBJCGCCandidate(Ctx);
}
}
bool Expr::isBoundMemberFunction(ASTContext &Ctx) const {
if (isTypeDependent())
return false;
return ClassifyLValue(Ctx) == Expr::LV_MemberFunction;
}
QualType Expr::findBoundMemberType(const Expr *expr) {
assert(expr->hasPlaceholderType(BuiltinType::BoundMember));
// Bound member expressions are always one of these possibilities:
// x->m x.m x->*y x.*y
// (possibly parenthesized)
expr = expr->IgnoreParens();
if (const MemberExpr *mem = dyn_cast<MemberExpr>(expr)) {
assert(isa<CXXMethodDecl>(mem->getMemberDecl()));
return mem->getMemberDecl()->getType();
}
if (const BinaryOperator *op = dyn_cast<BinaryOperator>(expr)) {
QualType type = op->getRHS()->getType()->castAs<MemberPointerType>()
->getPointeeType();
assert(type->isFunctionType());
return type;
}
assert(isa<UnresolvedMemberExpr>(expr) || isa<CXXPseudoDestructorExpr>(expr));
return QualType();
}
Expr* Expr::IgnoreParens() {
Expr* E = this;
while (true) {
if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
E = P->getSubExpr();
continue;
}
if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
if (P->getOpcode() == UO_Extension) {
E = P->getSubExpr();
continue;
}
}
if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
if (!P->isResultDependent()) {
E = P->getResultExpr();
continue;
}
}
if (ChooseExpr* P = dyn_cast<ChooseExpr>(E)) {
if (!P->isConditionDependent()) {
E = P->getChosenSubExpr();
continue;
}
}
return E;
}
}
/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
/// or CastExprs or ImplicitCastExprs, returning their operand.
Expr *Expr::IgnoreParenCasts() {
Expr *E = this;
while (true) {
E = E->IgnoreParens();
if (CastExpr *P = dyn_cast<CastExpr>(E)) {
E = P->getSubExpr();
continue;
}
if (MaterializeTemporaryExpr *Materialize
= dyn_cast<MaterializeTemporaryExpr>(E)) {
E = Materialize->GetTemporaryExpr();
continue;
}
if (SubstNonTypeTemplateParmExpr *NTTP
= dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
E = NTTP->getReplacement();
continue;
}
return E;
}
}
Expr *Expr::IgnoreCasts() {
Expr *E = this;
while (true) {
if (CastExpr *P = dyn_cast<CastExpr>(E)) {
E = P->getSubExpr();
continue;
}
if (MaterializeTemporaryExpr *Materialize
= dyn_cast<MaterializeTemporaryExpr>(E)) {
E = Materialize->GetTemporaryExpr();
continue;
}
if (SubstNonTypeTemplateParmExpr *NTTP
= dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
E = NTTP->getReplacement();
continue;
}
return E;
}
}
/// IgnoreParenLValueCasts - Ignore parentheses and lvalue-to-rvalue
/// casts. This is intended purely as a temporary workaround for code
/// that hasn't yet been rewritten to do the right thing about those
/// casts, and may disappear along with the last internal use.
Expr *Expr::IgnoreParenLValueCasts() {
Expr *E = this;
while (true) {
E = E->IgnoreParens();
if (CastExpr *P = dyn_cast<CastExpr>(E)) {
if (P->getCastKind() == CK_LValueToRValue) {
E = P->getSubExpr();
continue;
}
} else if (MaterializeTemporaryExpr *Materialize
= dyn_cast<MaterializeTemporaryExpr>(E)) {
E = Materialize->GetTemporaryExpr();
continue;
} else if (SubstNonTypeTemplateParmExpr *NTTP
= dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
E = NTTP->getReplacement();
continue;
}
break;
}
return E;
}
Expr *Expr::ignoreParenBaseCasts() {
Expr *E = this;
while (true) {
E = E->IgnoreParens();
if (CastExpr *CE = dyn_cast<CastExpr>(E)) {
if (CE->getCastKind() == CK_DerivedToBase ||
CE->getCastKind() == CK_UncheckedDerivedToBase ||
CE->getCastKind() == CK_NoOp) {
E = CE->getSubExpr();
continue;
}
}
return E;
}
}
Expr *Expr::IgnoreParenImpCasts() {
Expr *E = this;
while (true) {
E = E->IgnoreParens();
if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E)) {
E = P->getSubExpr();
continue;
}
if (MaterializeTemporaryExpr *Materialize
= dyn_cast<MaterializeTemporaryExpr>(E)) {
E = Materialize->GetTemporaryExpr();
continue;
}
if (SubstNonTypeTemplateParmExpr *NTTP
= dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
E = NTTP->getReplacement();
continue;
}
return E;
}
}
Expr *Expr::IgnoreConversionOperator() {
if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
return MCE->getImplicitObjectArgument();
}
return this;
}
/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
/// value (including ptr->int casts of the same size). Strip off any
/// ParenExpr or CastExprs, returning their operand.
Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
Expr *E = this;
while (true) {
E = E->IgnoreParens();
if (CastExpr *P = dyn_cast<CastExpr>(E)) {
// We ignore integer <-> casts that are of the same width, ptr<->ptr and
// ptr<->int casts of the same width. We also ignore all identity casts.
Expr *SE = P->getSubExpr();
if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
E = SE;
continue;
}
if ((E->getType()->isPointerType() ||
E->getType()->isIntegralType(Ctx)) &&
(SE->getType()->isPointerType() ||
SE->getType()->isIntegralType(Ctx)) &&
Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
E = SE;
continue;
}
}
if (SubstNonTypeTemplateParmExpr *NTTP
= dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
E = NTTP->getReplacement();
continue;
}
return E;
}
}
bool Expr::isDefaultArgument() const {
const Expr *E = this;
if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
E = M->GetTemporaryExpr();
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
E = ICE->getSubExprAsWritten();
return isa<CXXDefaultArgExpr>(E);
}
/// \brief Skip over any no-op casts and any temporary-binding
/// expressions.
static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
E = M->GetTemporaryExpr();
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_NoOp)
E = ICE->getSubExpr();
else
break;
}
while (const CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(E))
E = BE->getSubExpr();
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_NoOp)
E = ICE->getSubExpr();
else
break;
}
return E->IgnoreParens();
}
/// isTemporaryObject - Determines if this expression produces a
/// temporary of the given class type.
bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy)))
return false;
const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this);
// Temporaries are by definition pr-values of class type.
if (!E->Classify(C).isPRValue()) {
// In this context, property reference is a message call and is pr-value.
if (!isa<ObjCPropertyRefExpr>(E))
return false;
}
// Black-list a few cases which yield pr-values of class type that don't
// refer to temporaries of that type:
// - implicit derived-to-base conversions
if (isa<ImplicitCastExpr>(E)) {
switch (cast<ImplicitCastExpr>(E)->getCastKind()) {
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
return false;
default:
break;
}
}
// - member expressions (all)
if (isa<MemberExpr>(E))
return false;
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E))
if (BO->isPtrMemOp())
return false;
// - opaque values (all)
if (isa<OpaqueValueExpr>(E))
return false;
return true;
}
bool Expr::isImplicitCXXThis() const {
const Expr *E = this;
// Strip away parentheses and casts we don't care about.
while (true) {
if (const ParenExpr *Paren = dyn_cast<ParenExpr>(E)) {
E = Paren->getSubExpr();
continue;
}
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_NoOp ||
ICE->getCastKind() == CK_LValueToRValue ||
ICE->getCastKind() == CK_DerivedToBase ||
ICE->getCastKind() == CK_UncheckedDerivedToBase) {
E = ICE->getSubExpr();
continue;
}
}
if (const UnaryOperator* UnOp = dyn_cast<UnaryOperator>(E)) {
if (UnOp->getOpcode() == UO_Extension) {
E = UnOp->getSubExpr();
continue;
}
}
if (const MaterializeTemporaryExpr *M
= dyn_cast<MaterializeTemporaryExpr>(E)) {
E = M->GetTemporaryExpr();
continue;
}
break;
}
if (const CXXThisExpr *This = dyn_cast<CXXThisExpr>(E))
return This->isImplicit();
return false;
}
/// hasAnyTypeDependentArguments - Determines if any of the expressions
/// in Exprs is type-dependent.
bool Expr::hasAnyTypeDependentArguments(ArrayRef<Expr *> Exprs) {
for (unsigned I = 0; I < Exprs.size(); ++I)
if (Exprs[I]->isTypeDependent())
return true;
return false;
}
bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
const Expr **Culprit) const {
// This function is attempting whether an expression is an initializer
// which can be evaluated at compile-time. It very closely parallels
// ConstExprEmitter in CGExprConstant.cpp; if they don't match, it
// will lead to unexpected results. Like ConstExprEmitter, it falls back
// to isEvaluatable most of the time.
//
// If we ever capture reference-binding directly in the AST, we can
// kill the second parameter.
if (IsForRef) {
EvalResult Result;
if (EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects)
return true;
if (Culprit)
*Culprit = this;
return false;
}
switch (getStmtClass()) {
default: break;
case StringLiteralClass:
case ObjCEncodeExprClass:
return true;
case CXXTemporaryObjectExprClass:
case CXXConstructExprClass: {
const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
if (CE->getConstructor()->isTrivial() &&
CE->getConstructor()->getParent()->hasTrivialDestructor()) {
// Trivial default constructor
if (!CE->getNumArgs()) return true;
// Trivial copy constructor
assert(CE->getNumArgs() == 1 && "trivial ctor with > 1 argument");
return CE->getArg(0)->isConstantInitializer(Ctx, false, Culprit);
}
break;
}
case CompoundLiteralExprClass: {
// This handles gcc's extension that allows global initializers like
// "struct x {int x;} x = (struct x) {};".
// FIXME: This accepts other cases it shouldn't!
const Expr *Exp = cast<CompoundLiteralExpr>(this)->getInitializer();
return Exp->isConstantInitializer(Ctx, false, Culprit);
}
case DesignatedInitUpdateExprClass: {
const DesignatedInitUpdateExpr *DIUE = cast<DesignatedInitUpdateExpr>(this);
return DIUE->getBase()->isConstantInitializer(Ctx, false, Culprit) &&
DIUE->getUpdater()->isConstantInitializer(Ctx, false, Culprit);
}
case InitListExprClass: {
const InitListExpr *ILE = cast<InitListExpr>(this);
if (ILE->getType()->isArrayType()) {
unsigned numInits = ILE->getNumInits();
for (unsigned i = 0; i < numInits; i++) {
if (!ILE->getInit(i)->isConstantInitializer(Ctx, false, Culprit))
return false;
}
return true;
}
if (ILE->getType()->isRecordType()) {
unsigned ElementNo = 0;
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
for (const auto *Field : RD->fields()) {
// If this is a union, skip all the fields that aren't being initialized.
if (RD->isUnion() && ILE->getInitializedFieldInUnion() != Field)
continue;
// Don't emit anonymous bitfields, they just affect layout.
if (Field->isUnnamedBitfield())
continue;
if (ElementNo < ILE->getNumInits()) {
const Expr *Elt = ILE->getInit(ElementNo++);
if (Field->isBitField()) {
// Bitfields have to evaluate to an integer.
llvm::APSInt ResultTmp;
if (!Elt->EvaluateAsInt(ResultTmp, Ctx)) {
if (Culprit)
*Culprit = Elt;
return false;
}
} else {
bool RefType = Field->getType()->isReferenceType();
if (!Elt->isConstantInitializer(Ctx, RefType, Culprit))
return false;
}
}
}
return true;
}
break;
}
case ImplicitValueInitExprClass:
case NoInitExprClass:
return true;
case ParenExprClass:
return cast<ParenExpr>(this)->getSubExpr()
->isConstantInitializer(Ctx, IsForRef, Culprit);
case GenericSelectionExprClass:
return cast<GenericSelectionExpr>(this)->getResultExpr()
->isConstantInitializer(Ctx, IsForRef, Culprit);
case ChooseExprClass:
if (cast<ChooseExpr>(this)->isConditionDependent()) {
if (Culprit)
*Culprit = this;
return false;
}
return cast<ChooseExpr>(this)->getChosenSubExpr()
->isConstantInitializer(Ctx, IsForRef, Culprit);
case UnaryOperatorClass: {
const UnaryOperator* Exp = cast<UnaryOperator>(this);
if (Exp->getOpcode() == UO_Extension)
return Exp->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
break;
}
case CXXFunctionalCastExprClass:
case CXXStaticCastExprClass:
case ImplicitCastExprClass:
case CStyleCastExprClass:
case ObjCBridgedCastExprClass:
case CXXDynamicCastExprClass:
case CXXReinterpretCastExprClass:
case CXXConstCastExprClass: {
const CastExpr *CE = cast<CastExpr>(this);
// Handle misc casts we want to ignore.
if (CE->getCastKind() == CK_NoOp ||
CE->getCastKind() == CK_LValueToRValue ||
CE->getCastKind() == CK_ToUnion ||
CE->getCastKind() == CK_ConstructorConversion ||
CE->getCastKind() == CK_NonAtomicToAtomic ||
CE->getCastKind() == CK_AtomicToNonAtomic)
return CE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
break;
}
case MaterializeTemporaryExprClass:
return cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr()
->isConstantInitializer(Ctx, false, Culprit);
case SubstNonTypeTemplateParmExprClass:
return cast<SubstNonTypeTemplateParmExpr>(this)->getReplacement()
->isConstantInitializer(Ctx, false, Culprit);
case CXXDefaultArgExprClass:
return cast<CXXDefaultArgExpr>(this)->getExpr()
->isConstantInitializer(Ctx, false, Culprit);
case CXXDefaultInitExprClass:
return cast<CXXDefaultInitExpr>(this)->getExpr()
->isConstantInitializer(Ctx, false, Culprit);
}
if (isEvaluatable(Ctx))
return true;
if (Culprit)
*Culprit = this;
return false;
}
namespace {
/// \brief Look for any side effects within a Stmt.
class SideEffectFinder : public ConstEvaluatedExprVisitor<SideEffectFinder> {
typedef ConstEvaluatedExprVisitor<SideEffectFinder> Inherited;
const bool IncludePossibleEffects;
bool HasSideEffects;
public:
explicit SideEffectFinder(const ASTContext &Context, bool IncludePossible)
: Inherited(Context),
IncludePossibleEffects(IncludePossible), HasSideEffects(false) { }
bool hasSideEffects() const { return HasSideEffects; }
void VisitExpr(const Expr *E) {
if (!HasSideEffects &&
E->HasSideEffects(Context, IncludePossibleEffects))
HasSideEffects = true;
}
};
}
bool Expr::HasSideEffects(const ASTContext &Ctx,
bool IncludePossibleEffects) const {
// In circumstances where we care about definite side effects instead of
// potential side effects, we want to ignore expressions that are part of a
// macro expansion as a potential side effect.
if (!IncludePossibleEffects && getExprLoc().isMacroID())
return false;
if (isInstantiationDependent())
return IncludePossibleEffects;
switch (getStmtClass()) {
case NoStmtClass:
#define ABSTRACT_STMT(Type)
#define STMT(Type, Base) case Type##Class:
#define EXPR(Type, Base)
#include "clang/AST/StmtNodes.inc"
llvm_unreachable("unexpected Expr kind");
case DependentScopeDeclRefExprClass:
case CXXUnresolvedConstructExprClass:
case CXXDependentScopeMemberExprClass:
case UnresolvedLookupExprClass:
case UnresolvedMemberExprClass:
case PackExpansionExprClass:
case SubstNonTypeTemplateParmPackExprClass:
case FunctionParmPackExprClass:
case TypoExprClass:
case CXXFoldExprClass:
llvm_unreachable("shouldn't see dependent / unresolved nodes here");
case DeclRefExprClass:
case ObjCIvarRefExprClass:
case PredefinedExprClass:
case IntegerLiteralClass:
case FloatingLiteralClass:
case ImaginaryLiteralClass:
case StringLiteralClass:
case CharacterLiteralClass:
case OffsetOfExprClass:
case ImplicitValueInitExprClass:
case UnaryExprOrTypeTraitExprClass:
case AddrLabelExprClass:
case GNUNullExprClass:
case NoInitExprClass:
case CXXBoolLiteralExprClass:
case CXXNullPtrLiteralExprClass:
case CXXThisExprClass:
case CXXScalarValueInitExprClass:
case TypeTraitExprClass:
case ArrayTypeTraitExprClass:
case ExpressionTraitExprClass:
case CXXNoexceptExprClass:
case SizeOfPackExprClass:
case ObjCStringLiteralClass:
case ObjCEncodeExprClass:
case ObjCBoolLiteralExprClass:
case CXXUuidofExprClass:
case OpaqueValueExprClass:
// These never have a side-effect.
return false;
case CallExprClass:
case CXXOperatorCallExprClass:
case CXXMemberCallExprClass:
case CUDAKernelCallExprClass:
case UserDefinedLiteralClass: {
// We don't know a call definitely has side effects, except for calls
// to pure/const functions that definitely don't.
// If the call itself is considered side-effect free, check the operands.
const Decl *FD = cast<CallExpr>(this)->getCalleeDecl();
bool IsPure = FD && (FD->hasAttr<ConstAttr>() || FD->hasAttr<PureAttr>());
if (IsPure || !IncludePossibleEffects)
break;
return true;
}
case BlockExprClass:
case CXXBindTemporaryExprClass:
if (!IncludePossibleEffects)
break;
return true;
case MSPropertyRefExprClass:
case CompoundAssignOperatorClass:
case VAArgExprClass:
case AtomicExprClass:
case CXXThrowExprClass:
case CXXNewExprClass:
case CXXDeleteExprClass:
case ExprWithCleanupsClass:
// These always have a side-effect.
return true;
case StmtExprClass: {
// StmtExprs have a side-effect if any substatement does.
SideEffectFinder Finder(Ctx, IncludePossibleEffects);
Finder.Visit(cast<StmtExpr>(this)->getSubStmt());
return Finder.hasSideEffects();
}
case ParenExprClass:
case ArraySubscriptExprClass:
case MemberExprClass:
case ConditionalOperatorClass:
case BinaryConditionalOperatorClass:
case CompoundLiteralExprClass:
case ExtVectorElementExprClass:
case ExtMatrixElementExprClass: // HLSL Change
case HLSLVectorElementExprClass: // HLSL Change
case DesignatedInitExprClass:
case DesignatedInitUpdateExprClass:
case ParenListExprClass:
case CXXPseudoDestructorExprClass:
case CXXStdInitializerListExprClass:
case SubstNonTypeTemplateParmExprClass:
case MaterializeTemporaryExprClass:
case ShuffleVectorExprClass:
case ConvertVectorExprClass:
case AsTypeExprClass:
// These have a side-effect if any subexpression does.
break;
case UnaryOperatorClass:
if (cast<UnaryOperator>(this)->isIncrementDecrementOp())
return true;
break;
case BinaryOperatorClass:
if (cast<BinaryOperator>(this)->isAssignmentOp())
return true;
break;
case InitListExprClass:
// FIXME: The children for an InitListExpr doesn't include the array filler.
if (const Expr *E = cast<InitListExpr>(this)->getArrayFiller())
if (E->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
break;
case GenericSelectionExprClass:
return cast<GenericSelectionExpr>(this)->getResultExpr()->
HasSideEffects(Ctx, IncludePossibleEffects);
case ChooseExprClass:
return cast<ChooseExpr>(this)->getChosenSubExpr()->HasSideEffects(
Ctx, IncludePossibleEffects);
case CXXDefaultArgExprClass:
return cast<CXXDefaultArgExpr>(this)->getExpr()->HasSideEffects(
Ctx, IncludePossibleEffects);
case CXXDefaultInitExprClass: {
const FieldDecl *FD = cast<CXXDefaultInitExpr>(this)->getField();
if (const Expr *E = FD->getInClassInitializer())
return E->HasSideEffects(Ctx, IncludePossibleEffects);
// If we've not yet parsed the initializer, assume it has side-effects.
return true;
}
case CXXDynamicCastExprClass: {
// A dynamic_cast expression has side-effects if it can throw.
const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(this);
if (DCE->getTypeAsWritten()->isReferenceType() &&
DCE->getCastKind() == CK_Dynamic)
return true;
} LLVM_FALLTHROUGH; // HLSL Change.
case ImplicitCastExprClass:
case CStyleCastExprClass:
case CXXStaticCastExprClass:
case CXXReinterpretCastExprClass:
case CXXConstCastExprClass:
case CXXFunctionalCastExprClass: {
// While volatile reads are side-effecting in both C and C++, we treat them
// as having possible (not definite) side-effects. This allows idiomatic
// code to behave without warning, such as sizeof(*v) for a volatile-
// qualified pointer.
if (!IncludePossibleEffects)
break;
const CastExpr *CE = cast<CastExpr>(this);
if (CE->getCastKind() == CK_LValueToRValue &&
CE->getSubExpr()->getType().isVolatileQualified())
return true;
break;
}
case CXXTypeidExprClass:
// typeid might throw if its subexpression is potentially-evaluated, so has
// side-effects in that case whether or not its subexpression does.
return cast<CXXTypeidExpr>(this)->isPotentiallyEvaluated();
case CXXConstructExprClass:
case CXXTemporaryObjectExprClass: {
const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
if (!CE->getConstructor()->isTrivial() && IncludePossibleEffects)
return true;
// A trivial constructor does not add any side-effects of its own. Just look
// at its arguments.
break;
}
case LambdaExprClass: {
const LambdaExpr *LE = cast<LambdaExpr>(this);
for (LambdaExpr::capture_iterator I = LE->capture_begin(),
E = LE->capture_end(); I != E; ++I)
if (I->getCaptureKind() == LCK_ByCopy)
// FIXME: Only has a side-effect if the variable is volatile or if
// the copy would invoke a non-trivial copy constructor.
return true;
return false;
}
case PseudoObjectExprClass: {
// Only look for side-effects in the semantic form, and look past
// OpaqueValueExpr bindings in that form.
const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
for (PseudoObjectExpr::const_semantics_iterator I = PO->semantics_begin(),
E = PO->semantics_end();
I != E; ++I) {
const Expr *Subexpr = *I;
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Subexpr))
Subexpr = OVE->getSourceExpr();
if (Subexpr->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
}
return false;
}
case ObjCBoxedExprClass:
case ObjCArrayLiteralClass:
case ObjCDictionaryLiteralClass:
case ObjCSelectorExprClass:
case ObjCProtocolExprClass:
case ObjCIsaExprClass:
case ObjCIndirectCopyRestoreExprClass:
case ObjCSubscriptRefExprClass:
case ObjCBridgedCastExprClass:
case ObjCMessageExprClass:
case ObjCPropertyRefExprClass:
// FIXME: Classify these cases better.
if (IncludePossibleEffects)
return true;
break;
}
// Recurse to children.
for (const Stmt *SubStmt : children())
if (SubStmt &&
cast<Expr>(SubStmt)->HasSideEffects(Ctx, IncludePossibleEffects))
return true;
return false;
}
namespace {
/// \brief Look for a call to a non-trivial function within an expression.
class NonTrivialCallFinder : public ConstEvaluatedExprVisitor<NonTrivialCallFinder>
{
typedef ConstEvaluatedExprVisitor<NonTrivialCallFinder> Inherited;
bool NonTrivial;
public:
explicit NonTrivialCallFinder(const ASTContext &Context)
: Inherited(Context), NonTrivial(false) { }
bool hasNonTrivialCall() const { return NonTrivial; }
void VisitCallExpr(const CallExpr *E) {
if (const CXXMethodDecl *Method
= dyn_cast_or_null<const CXXMethodDecl>(E->getCalleeDecl())) {
if (Method->isTrivial()) {
// Recurse to children of the call.
Inherited::VisitStmt(E);
return;
}
}
NonTrivial = true;
}
void VisitCXXConstructExpr(const CXXConstructExpr *E) {
if (E->getConstructor()->isTrivial()) {
// Recurse to children of the call.
Inherited::VisitStmt(E);
return;
}
NonTrivial = true;
}
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E) {
if (E->getTemporary()->getDestructor()->isTrivial()) {
Inherited::VisitStmt(E);
return;
}
NonTrivial = true;
}
};
}
bool Expr::hasNonTrivialCall(const ASTContext &Ctx) const {
NonTrivialCallFinder Finder(Ctx);
Finder.Visit(this);
return Finder.hasNonTrivialCall();
}
/// isNullPointerConstant - C99 6.3.2.3p3 - Return whether this is a null
/// pointer constant or not, as well as the specific kind of constant detected.
/// Null pointer constants can be integer constant expressions with the
/// value zero, casts of zero to void*, nullptr (C++0X), or __null
/// (a GNU extension).
Expr::NullPointerConstantKind
Expr::isNullPointerConstant(ASTContext &Ctx,
NullPointerConstantValueDependence NPC) const {
if (isValueDependent() &&
(!Ctx.getLangOpts().CPlusPlus11 || Ctx.getLangOpts().MSVCCompat)) {
switch (NPC) {
case NPC_NeverValueDependent:
llvm_unreachable("Unexpected value dependent expression!");
case NPC_ValueDependentIsNull:
if (isTypeDependent() || getType()->isIntegralType(Ctx))
return NPCK_ZeroExpression;
else
return NPCK_NotNull;
case NPC_ValueDependentIsNotNull:
return NPCK_NotNull;
}
}
// Strip off a cast to void*, if it exists. Except in C++.
if (const ExplicitCastExpr *CE = dyn_cast<ExplicitCastExpr>(this)) {
if (!Ctx.getLangOpts().CPlusPlus) {
// Check that it is a cast to void*.
if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
QualType Pointee = PT->getPointeeType();
if (!Pointee.hasQualifiers() &&
Pointee->isVoidType() && // to void*
CE->getSubExpr()->getType()->isIntegerType()) // from int.
return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
}
}
} else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(this)) {
// Ignore the ImplicitCastExpr type entirely.
return ICE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
} else if (const ParenExpr *PE = dyn_cast<ParenExpr>(this)) {
// Accept ((void*)0) as a null pointer constant, as many other
// implementations do.
return PE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
} else if (const GenericSelectionExpr *GE =
dyn_cast<GenericSelectionExpr>(this)) {
if (GE->isResultDependent())
return NPCK_NotNull;
return GE->getResultExpr()->isNullPointerConstant(Ctx, NPC);
} else if (const ChooseExpr *CE = dyn_cast<ChooseExpr>(this)) {
if (CE->isConditionDependent())
return NPCK_NotNull;
return CE->getChosenSubExpr()->isNullPointerConstant(Ctx, NPC);
} else if (const CXXDefaultArgExpr *DefaultArg
= dyn_cast<CXXDefaultArgExpr>(this)) {
// See through default argument expressions.
return DefaultArg->getExpr()->isNullPointerConstant(Ctx, NPC);
} else if (const CXXDefaultInitExpr *DefaultInit
= dyn_cast<CXXDefaultInitExpr>(this)) {
// See through default initializer expressions.
return DefaultInit->getExpr()->isNullPointerConstant(Ctx, NPC);
} else if (isa<GNUNullExpr>(this)) {
// The GNU __null extension is always a null pointer constant.
return NPCK_GNUNull;
} else if (const MaterializeTemporaryExpr *M
= dyn_cast<MaterializeTemporaryExpr>(this)) {
return M->GetTemporaryExpr()->isNullPointerConstant(Ctx, NPC);
} else if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(this)) {
if (const Expr *Source = OVE->getSourceExpr())
return Source->isNullPointerConstant(Ctx, NPC);
}
// C++11 nullptr_t is always a null pointer constant.
if (getType()->isNullPtrType())
return NPCK_CXX11_nullptr;
if (const RecordType *UT = getType()->getAsUnionType())
if (!Ctx.getLangOpts().CPlusPlus11 &&
UT && UT->getDecl()->hasAttr<TransparentUnionAttr>())
if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){
const Expr *InitExpr = CLE->getInitializer();
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr))
return ILE->getInit(0)->isNullPointerConstant(Ctx, NPC);
}
// This expression must be an integer type.
if (!getType()->isIntegerType() ||
(Ctx.getLangOpts().CPlusPlus && getType()->isEnumeralType()))
return NPCK_NotNull;
// HLSL Change Begin -External variable is in cbuffer, cannot use as immediate.
if (getStmtClass() == Stmt::DeclRefExprClass && Ctx.getLangOpts().HLSL) {
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(this);
const ValueDecl *VD = DRE->getDecl();
// External variable is in cbuffer, cannot use as immediate.
if (VD->hasExternalFormalLinkage() &&
!isa<EnumConstantDecl>(VD))
return NPCK_NotNull;
}
// HLSL Change End.
if (Ctx.getLangOpts().CPlusPlus11) {
// C++11 [conv.ptr]p1: A null pointer constant is an integer literal with
// value zero or a prvalue of type std::nullptr_t.
// Microsoft mode permits C++98 rules reflecting MSVC behavior.
const IntegerLiteral *Lit = dyn_cast<IntegerLiteral>(this);
if (Lit && !Lit->getValue())
return NPCK_ZeroLiteral;
else if (!Ctx.getLangOpts().MSVCCompat || !isCXX98IntegralConstantExpr(Ctx))
return NPCK_NotNull;
} else {
// If we have an integer constant expression, we need to *evaluate* it and
// test for the value 0.
if (!isIntegerConstantExpr(Ctx))
return NPCK_NotNull;
}
if (EvaluateKnownConstInt(Ctx) != 0)
return NPCK_NotNull;
if (isa<IntegerLiteral>(this))
return NPCK_ZeroLiteral;
return NPCK_ZeroExpression;
}
/// \brief If this expression is an l-value for an Objective C
/// property, find the underlying property reference expression.
const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
const Expr *E = this;
while (true) {
assert((E->getValueKind() == VK_LValue &&
E->getObjectKind() == OK_ObjCProperty) &&
"expression is not a property reference");
E = E->IgnoreParenCasts();
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_Comma) {
E = BO->getRHS();
continue;
}
}
break;
}
return cast<ObjCPropertyRefExpr>(E);
}
bool Expr::isObjCSelfExpr() const {
const Expr *E = IgnoreParenImpCasts();
const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
if (!DRE)
return false;
const ImplicitParamDecl *Param = dyn_cast<ImplicitParamDecl>(DRE->getDecl());
if (!Param)
return false;
const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(Param->getDeclContext());
if (!M)
return false;
return M->getSelfDecl() == Param;
}
FieldDecl *Expr::getSourceBitField() {
Expr *E = this->IgnoreParens();
while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_LValueToRValue ||
(ICE->getValueKind() != VK_RValue && ICE->getCastKind() == CK_NoOp))
E = ICE->getSubExpr()->IgnoreParens();
else
break;
}
if (MemberExpr *MemRef = dyn_cast<MemberExpr>(E))
if (FieldDecl *Field = dyn_cast<FieldDecl>(MemRef->getMemberDecl()))
if (Field->isBitField())
return Field;
if (ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E))
if (FieldDecl *Ivar = dyn_cast<FieldDecl>(IvarRef->getDecl()))
if (Ivar->isBitField())
return Ivar;
if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E))
if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl()))
if (Field->isBitField())
return Field;
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) {
if (BinOp->isAssignmentOp() && BinOp->getLHS())
return BinOp->getLHS()->getSourceBitField();
if (BinOp->getOpcode() == BO_Comma && BinOp->getRHS())
return BinOp->getRHS()->getSourceBitField();
}
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E))
if (UnOp->isPrefix() && UnOp->isIncrementDecrementOp())
return UnOp->getSubExpr()->getSourceBitField();
return nullptr;
}
bool Expr::refersToVectorElement() const {
const Expr *E = this->IgnoreParens();
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getValueKind() != VK_RValue &&
ICE->getCastKind() == CK_NoOp)
E = ICE->getSubExpr()->IgnoreParens();
else
break;
}
if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E))
return ASE->getBase()->getType()->isVectorType();
if (isa<ExtVectorElementExpr>(E))
return true;
return false;
}
/// isArrow - Return true if the base expression is a pointer to vector,
/// return false if the base expression is a vector.
bool ExtVectorElementExpr::isArrow() const {
return getBase()->getType()->isPointerType();
}
unsigned ExtVectorElementExpr::getNumElements() const {
if (const VectorType *VT = getType()->getAs<VectorType>())
return VT->getNumElements();
return 1;
}
/// containsDuplicateElements - Return true if any element access is repeated.
bool ExtVectorElementExpr::containsDuplicateElements() const {
// FIXME: Refactor this code to an accessor on the AST node which returns the
// "type" of component access, and share with code below and in Sema.
StringRef Comp = Accessor->getName();
// Halving swizzles do not contain duplicate elements.
if (Comp == "hi" || Comp == "lo" || Comp == "even" || Comp == "odd")
return false;
// Advance past s-char prefix on hex swizzles.
if (Comp[0] == 's' || Comp[0] == 'S')
Comp = Comp.substr(1);
for (unsigned i = 0, e = Comp.size(); i != e; ++i)
if (Comp.substr(i + 1).find(Comp[i]) != StringRef::npos)
return true;
return false;
}
/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray.
void ExtVectorElementExpr::getEncodedElementAccess(
SmallVectorImpl<unsigned> &Elts) const {
StringRef Comp = Accessor->getName();
if (Comp[0] == 's' || Comp[0] == 'S')
Comp = Comp.substr(1);
bool isHi = Comp == "hi";
bool isLo = Comp == "lo";
bool isEven = Comp == "even";
bool isOdd = Comp == "odd";
for (unsigned i = 0, e = getNumElements(); i != e; ++i) {
uint64_t Index;
if (isHi)
Index = e + i;
else if (isLo)
Index = i;
else if (isEven)
Index = 2 * i;
else if (isOdd)
Index = 2 * i + 1;
else
Index = ExtVectorType::getAccessorIdx(Comp[i]);
Elts.push_back(Index);
}
}
ObjCMessageExpr::ObjCMessageExpr(QualType T,
ExprValueKind VK,
SourceLocation LBracLoc,
SourceLocation SuperLoc,
bool IsInstanceSuper,
QualType SuperType,
Selector Sel,
ArrayRef<SourceLocation> SelLocs,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
SourceLocation RBracLoc,
bool isImplicit)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
/*TypeDependent=*/false, /*ValueDependent=*/false,
/*InstantiationDependent=*/false,
/*ContainsUnexpandedParameterPack=*/false),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
Kind(IsInstanceSuper? SuperInstance : SuperClass),
HasMethod(Method != nullptr), IsDelegateInitCall(false),
IsImplicit(isImplicit), SuperLoc(SuperLoc), LBracLoc(LBracLoc),
RBracLoc(RBracLoc)
{
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(SuperType.getAsOpaquePtr());
}
ObjCMessageExpr::ObjCMessageExpr(QualType T,
ExprValueKind VK,
SourceLocation LBracLoc,
TypeSourceInfo *Receiver,
Selector Sel,
ArrayRef<SourceLocation> SelLocs,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
SourceLocation RBracLoc,
bool isImplicit)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
T->isDependentType(), T->isInstantiationDependentType(),
T->containsUnexpandedParameterPack()),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
Kind(Class),
HasMethod(Method != nullptr), IsDelegateInitCall(false),
IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(Receiver);
}
ObjCMessageExpr::ObjCMessageExpr(QualType T,
ExprValueKind VK,
SourceLocation LBracLoc,
Expr *Receiver,
Selector Sel,
ArrayRef<SourceLocation> SelLocs,
SelectorLocationsKind SelLocsK,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
SourceLocation RBracLoc,
bool isImplicit)
: Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(),
Receiver->isTypeDependent(),
Receiver->isInstantiationDependent(),
Receiver->containsUnexpandedParameterPack()),
SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
: Sel.getAsOpaquePtr())),
Kind(Instance),
HasMethod(Method != nullptr), IsDelegateInitCall(false),
IsImplicit(isImplicit), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
{
initArgsAndSelLocs(Args, SelLocs, SelLocsK);
setReceiverPointer(Receiver);
}
void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
ArrayRef<SourceLocation> SelLocs,
SelectorLocationsKind SelLocsK) {
setNumArgs(Args.size());
Expr **MyArgs = getArgs();
for (unsigned I = 0; I != Args.size(); ++I) {
if (Args[I]->isTypeDependent())
ExprBits.TypeDependent = true;
if (Args[I]->isValueDependent())
ExprBits.ValueDependent = true;
if (Args[I]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (Args[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
MyArgs[I] = Args[I];
}
SelLocsKind = SelLocsK;
if (!isImplicit()) {
if (SelLocsK == SelLoc_NonStandard)
std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
}
}
ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T,
ExprValueKind VK,
SourceLocation LBracLoc,
SourceLocation SuperLoc,
bool IsInstanceSuper,
QualType SuperType,
Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
SourceLocation RBracLoc,
bool isImplicit) {
assert((!SelLocs.empty() || isImplicit) &&
"No selector locs for non-implicit message");
ObjCMessageExpr *Mem;
SelectorLocationsKind SelLocsK = SelectorLocationsKind();
if (isImplicit)
Mem = alloc(Context, Args.size(), 0);
else
Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
SuperType, Sel, SelLocs, SelLocsK,
Method, Args, RBracLoc, isImplicit);
}
ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T,
ExprValueKind VK,
SourceLocation LBracLoc,
TypeSourceInfo *Receiver,
Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
SourceLocation RBracLoc,
bool isImplicit) {
assert((!SelLocs.empty() || isImplicit) &&
"No selector locs for non-implicit message");
ObjCMessageExpr *Mem;
SelectorLocationsKind SelLocsK = SelectorLocationsKind();
if (isImplicit)
Mem = alloc(Context, Args.size(), 0);
else
Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
SelLocs, SelLocsK, Method, Args, RBracLoc,
isImplicit);
}
ObjCMessageExpr *ObjCMessageExpr::Create(const ASTContext &Context, QualType T,
ExprValueKind VK,
SourceLocation LBracLoc,
Expr *Receiver,
Selector Sel,
ArrayRef<SourceLocation> SelLocs,
ObjCMethodDecl *Method,
ArrayRef<Expr *> Args,
SourceLocation RBracLoc,
bool isImplicit) {
assert((!SelLocs.empty() || isImplicit) &&
"No selector locs for non-implicit message");
ObjCMessageExpr *Mem;
SelectorLocationsKind SelLocsK = SelectorLocationsKind();
if (isImplicit)
Mem = alloc(Context, Args.size(), 0);
else
Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
SelLocs, SelLocsK, Method, Args, RBracLoc,
isImplicit);
}
ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(const ASTContext &Context,
unsigned NumArgs,
unsigned NumStoredSelLocs) {
ObjCMessageExpr *Mem = alloc(Context, NumArgs, NumStoredSelLocs);
return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs);
}
ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
ArrayRef<Expr *> Args,
SourceLocation RBraceLoc,
ArrayRef<SourceLocation> SelLocs,
Selector Sel,
SelectorLocationsKind &SelLocsK) {
SelLocsK = hasStandardSelectorLocs(Sel, SelLocs, Args, RBraceLoc);
unsigned NumStoredSelLocs = (SelLocsK == SelLoc_NonStandard) ? SelLocs.size()
: 0;
return alloc(C, Args.size(), NumStoredSelLocs);
}
ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
unsigned NumArgs,
unsigned NumStoredSelLocs) {
unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
NumArgs * sizeof(Expr *) + NumStoredSelLocs * sizeof(SourceLocation);
return (ObjCMessageExpr *)C.Allocate(Size,
llvm::AlignOf<ObjCMessageExpr>::Alignment);
}
void ObjCMessageExpr::getSelectorLocs(
SmallVectorImpl<SourceLocation> &SelLocs) const {
for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
SelLocs.push_back(getSelectorLoc(i));
}
SourceRange ObjCMessageExpr::getReceiverRange() const {
switch (getReceiverKind()) {
case Instance:
return getInstanceReceiver()->getSourceRange();
case Class:
return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange();
case SuperInstance:
case SuperClass:
return getSuperLoc();
}
llvm_unreachable("Invalid ReceiverKind!");
}
Selector ObjCMessageExpr::getSelector() const {
if (HasMethod)
return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod)
->getSelector();
return Selector(SelectorOrMethod);
}
QualType ObjCMessageExpr::getReceiverType() const {
switch (getReceiverKind()) {
case Instance:
return getInstanceReceiver()->getType();
case Class:
return getClassReceiver();
case SuperInstance:
case SuperClass:
return getSuperType();
}
llvm_unreachable("unexpected receiver kind");
}
ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
QualType T = getReceiverType();
if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
return Ptr->getInterfaceDecl();
if (const ObjCObjectType *Ty = T->getAs<ObjCObjectType>())
return Ty->getInterface();
return nullptr;
}
QualType ObjCPropertyRefExpr::getReceiverType(const ASTContext &ctx) const {
if (isClassReceiver())
return ctx.getObjCInterfaceType(getClassReceiver());
if (isSuperReceiver())
return getSuperReceiverType();
return getBase()->getType();
}
StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
switch (getBridgeKind()) {
case OBC_Bridge:
return "__bridge";
case OBC_BridgeTransfer:
return "__bridge_transfer";
case OBC_BridgeRetained:
return "__bridge_retained";
}
llvm_unreachable("Invalid BridgeKind!");
}
ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr*> args,
QualType Type, SourceLocation BLoc,
SourceLocation RP)
: Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
Type->isDependentType(), Type->isDependentType(),
Type->isInstantiationDependentType(),
Type->containsUnexpandedParameterPack()),
BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size())
{
SubExprs = new (C) Stmt*[args.size()];
for (unsigned i = 0; i != args.size(); i++) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
if (args[i]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
SubExprs[i] = args[i];
}
}
void ShuffleVectorExpr::setExprs(const ASTContext &C, ArrayRef<Expr *> Exprs) {
if (SubExprs) C.Deallocate(SubExprs);
this->NumExprs = Exprs.size();
SubExprs = new (C) Stmt*[NumExprs];
memcpy(SubExprs, Exprs.data(), sizeof(Expr *) * Exprs.size());
}
GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context,
SourceLocation GenericLoc, Expr *ControllingExpr,
ArrayRef<TypeSourceInfo*> AssocTypes,
ArrayRef<Expr*> AssocExprs,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack,
unsigned ResultIndex)
: Expr(GenericSelectionExprClass,
AssocExprs[ResultIndex]->getType(),
AssocExprs[ResultIndex]->getValueKind(),
AssocExprs[ResultIndex]->getObjectKind(),
AssocExprs[ResultIndex]->isTypeDependent(),
AssocExprs[ResultIndex]->isValueDependent(),
AssocExprs[ResultIndex]->isInstantiationDependent(),
ContainsUnexpandedParameterPack),
AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
GenericLoc(GenericLoc), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
SubExprs[CONTROLLING] = ControllingExpr;
assert(AssocTypes.size() == AssocExprs.size());
std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
}
GenericSelectionExpr::GenericSelectionExpr(const ASTContext &Context,
SourceLocation GenericLoc, Expr *ControllingExpr,
ArrayRef<TypeSourceInfo*> AssocTypes,
ArrayRef<Expr*> AssocExprs,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack)
: Expr(GenericSelectionExprClass,
Context.DependentTy,
VK_RValue,
OK_Ordinary,
/*isTypeDependent=*/true,
/*isValueDependent=*/true,
/*isInstantiationDependent=*/true,
ContainsUnexpandedParameterPack),
AssocTypes(new (Context) TypeSourceInfo*[AssocTypes.size()]),
SubExprs(new (Context) Stmt*[END_EXPR+AssocExprs.size()]),
NumAssocs(AssocExprs.size()), ResultIndex(-1U), GenericLoc(GenericLoc),
DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
SubExprs[CONTROLLING] = ControllingExpr;
assert(AssocTypes.size() == AssocExprs.size());
std::copy(AssocTypes.begin(), AssocTypes.end(), this->AssocTypes);
std::copy(AssocExprs.begin(), AssocExprs.end(), SubExprs+END_EXPR);
}
//===----------------------------------------------------------------------===//
// DesignatedInitExpr
//===----------------------------------------------------------------------===//
IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
assert(Kind == FieldDesignator && "Only valid on a field designator");
if (Field.NameOrField & 0x01)
return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01);
else
return getField()->getIdentifier();
}
DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
unsigned NumDesignators,
const Designator *Designators,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ArrayRef<Expr*> IndexExprs,
Expr *Init)
: Expr(DesignatedInitExprClass, Ty,
Init->getValueKind(), Init->getObjectKind(),
Init->isTypeDependent(), Init->isValueDependent(),
Init->isInstantiationDependent(),
Init->containsUnexpandedParameterPack()),
EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
NumDesignators(NumDesignators), NumSubExprs(IndexExprs.size() + 1) {
this->Designators = new (C) Designator[NumDesignators];
// Record the initializer itself.
child_range Child = children();
*Child++ = Init;
// Copy the designators and their subexpressions, computing
// value-dependence along the way.
unsigned IndexIdx = 0;
for (unsigned I = 0; I != NumDesignators; ++I) {
this->Designators[I] = Designators[I];
if (this->Designators[I].isArrayDesignator()) {
// Compute type- and value-dependence.
Expr *Index = IndexExprs[IndexIdx];
if (Index->isTypeDependent() || Index->isValueDependent())
ExprBits.TypeDependent = ExprBits.ValueDependent = true;
if (Index->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
// Propagate unexpanded parameter packs.
if (Index->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
// Copy the index expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
} else if (this->Designators[I].isArrayRangeDesignator()) {
// Compute type- and value-dependence.
Expr *Start = IndexExprs[IndexIdx];
Expr *End = IndexExprs[IndexIdx + 1];
if (Start->isTypeDependent() || Start->isValueDependent() ||
End->isTypeDependent() || End->isValueDependent()) {
ExprBits.TypeDependent = ExprBits.ValueDependent = true;
ExprBits.InstantiationDependent = true;
} else if (Start->isInstantiationDependent() ||
End->isInstantiationDependent()) {
ExprBits.InstantiationDependent = true;
}
// Propagate unexpanded parameter packs.
if (Start->containsUnexpandedParameterPack() ||
End->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
// Copy the start/end expressions into permanent storage.
*Child++ = IndexExprs[IndexIdx++];
*Child++ = IndexExprs[IndexIdx++];
}
}
assert(IndexIdx == IndexExprs.size() && "Wrong number of index expressions");
}
DesignatedInitExpr *
DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators,
unsigned NumDesignators,
ArrayRef<Expr*> IndexExprs,
SourceLocation ColonOrEqualLoc,
bool UsesColonSyntax, Expr *Init) {
void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
sizeof(Stmt *) * (IndexExprs.size() + 1), 8);
return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
ColonOrEqualLoc, UsesColonSyntax,
IndexExprs, Init);
}
DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(const ASTContext &C,
unsigned NumIndexExprs) {
void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
sizeof(Stmt *) * (NumIndexExprs + 1), 8);
return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
}
void DesignatedInitExpr::setDesignators(const ASTContext &C,
const Designator *Desigs,
unsigned NumDesigs) {
Designators = new (C) Designator[NumDesigs];
NumDesignators = NumDesigs;
for (unsigned I = 0; I != NumDesigs; ++I)
Designators[I] = Desigs[I];
}
SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
DesignatedInitExpr *DIE = const_cast<DesignatedInitExpr*>(this);
if (size() == 1)
return DIE->getDesignator(0)->getSourceRange();
return SourceRange(DIE->getDesignator(0)->getLocStart(),
DIE->getDesignator(size()-1)->getLocEnd());
}
SourceLocation DesignatedInitExpr::getLocStart() const {
SourceLocation StartLoc;
Designator &First =
*const_cast<DesignatedInitExpr*>(this)->designators_begin();
if (First.isFieldDesignator()) {
if (GNUSyntax)
StartLoc = SourceLocation::getFromRawEncoding(First.Field.FieldLoc);
else
StartLoc = SourceLocation::getFromRawEncoding(First.Field.DotLoc);
} else
StartLoc =
SourceLocation::getFromRawEncoding(First.ArrayOrRange.LBracketLoc);
return StartLoc;
}
SourceLocation DesignatedInitExpr::getLocEnd() const {
return getInit()->getLocEnd();
}
Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
}
Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
assert(D.Kind == Designator::ArrayRangeDesignator &&
"Requires array range designator");
Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
}
Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
assert(D.Kind == Designator::ArrayRangeDesignator &&
"Requires array range designator");
Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
}
/// \brief Replaces the designator at index @p Idx with the series
/// of designators in [First, Last).
void DesignatedInitExpr::ExpandDesignator(const ASTContext &C, unsigned Idx,
const Designator *First,
const Designator *Last) {
unsigned NumNewDesignators = Last - First;
if (NumNewDesignators == 0) {
std::copy_backward(Designators + Idx + 1,
Designators + NumDesignators,
Designators + Idx);
--NumNewDesignators;
return;
} else if (NumNewDesignators == 1) {
Designators[Idx] = *First;
return;
}
Designator *NewDesignators
= new (C) Designator[NumDesignators - 1 + NumNewDesignators];
std::copy(Designators, Designators + Idx, NewDesignators);
std::copy(First, Last, NewDesignators + Idx);
std::copy(Designators + Idx + 1, Designators + NumDesignators,
NewDesignators + Idx + NumNewDesignators);
Designators = NewDesignators;
NumDesignators = NumDesignators - 1 + NumNewDesignators;
}
DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C,
SourceLocation lBraceLoc, Expr *baseExpr, SourceLocation rBraceLoc)
: Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_RValue,
OK_Ordinary, false, false, false, false) {
BaseAndUpdaterExprs[0] = baseExpr;
InitListExpr *ILE = new (C) InitListExpr(C, lBraceLoc, None, rBraceLoc);
ILE->setType(baseExpr->getType());
BaseAndUpdaterExprs[1] = ILE;
}
SourceLocation DesignatedInitUpdateExpr::getLocStart() const {
return getBase()->getLocStart();
}
SourceLocation DesignatedInitUpdateExpr::getLocEnd() const {
return getBase()->getLocEnd();
}
ParenListExpr::ParenListExpr(const ASTContext& C, SourceLocation lparenloc,
ArrayRef<Expr*> exprs,
SourceLocation rparenloc)
: Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
false, false, false, false),
NumExprs(exprs.size()), LParenLoc(lparenloc), RParenLoc(rparenloc) {
Exprs = new (C) Stmt*[exprs.size()];
for (unsigned i = 0; i != exprs.size(); ++i) {
if (exprs[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (exprs[i]->isValueDependent())
ExprBits.ValueDependent = true;
if (exprs[i]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (exprs[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
Exprs[i] = exprs[i];
}
}
const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
e = ewc->getSubExpr();
if (const MaterializeTemporaryExpr *m = dyn_cast<MaterializeTemporaryExpr>(e))
e = m->GetTemporaryExpr();
e = cast<CXXConstructExpr>(e)->getArg(0);
while (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
e = ice->getSubExpr();
return cast<OpaqueValueExpr>(e);
}
PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &Context,
EmptyShell sh,
unsigned numSemanticExprs) {
void *buffer = Context.Allocate(sizeof(PseudoObjectExpr) +
(1 + numSemanticExprs) * sizeof(Expr*),
llvm::alignOf<PseudoObjectExpr>());
return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
}
PseudoObjectExpr::PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs)
: Expr(PseudoObjectExprClass, shell) {
PseudoObjectExprBits.NumSubExprs = numSemanticExprs + 1;
}
PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
ArrayRef<Expr*> semantics,
unsigned resultIndex) {
assert(syntax && "no syntactic expression!");
assert(semantics.size() && "no semantic expressions!");
QualType type;
ExprValueKind VK;
if (resultIndex == NoResult) {
type = C.VoidTy;
VK = VK_RValue;
} else {
assert(resultIndex < semantics.size());
type = semantics[resultIndex]->getType();
VK = semantics[resultIndex]->getValueKind();
assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary);
}
void *buffer = C.Allocate(sizeof(PseudoObjectExpr) +
(1 + semantics.size()) * sizeof(Expr*),
llvm::alignOf<PseudoObjectExpr>());
return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
resultIndex);
}
PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
Expr *syntax, ArrayRef<Expr*> semantics,
unsigned resultIndex)
: Expr(PseudoObjectExprClass, type, VK, OK_Ordinary,
/*filled in at end of ctor*/ false, false, false, false) {
PseudoObjectExprBits.NumSubExprs = semantics.size() + 1;
PseudoObjectExprBits.ResultIndex = resultIndex + 1;
for (unsigned i = 0, e = semantics.size() + 1; i != e; ++i) {
Expr *E = (i == 0 ? syntax : semantics[i-1]);
getSubExprsBuffer()[i] = E;
if (E->isTypeDependent())
ExprBits.TypeDependent = true;
if (E->isValueDependent())
ExprBits.ValueDependent = true;
if (E->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (E->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
if (isa<OpaqueValueExpr>(E))
assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != nullptr &&
"opaque-value semantic expressions for pseudo-object "
"operations must have sources");
}
}
//===----------------------------------------------------------------------===//
// ExprIterator.
//===----------------------------------------------------------------------===//
Expr* ExprIterator::operator[](size_t idx) { return cast<Expr>(I[idx]); }
Expr* ExprIterator::operator*() const { return cast<Expr>(*I); }
Expr* ExprIterator::operator->() const { return cast<Expr>(*I); }
const Expr* ConstExprIterator::operator[](size_t idx) const {
return cast<Expr>(I[idx]);
}
const Expr* ConstExprIterator::operator*() const { return cast<Expr>(*I); }
const Expr* ConstExprIterator::operator->() const { return cast<Expr>(*I); }
//===----------------------------------------------------------------------===//
// Child Iterators for iterating over subexpressions/substatements
//===----------------------------------------------------------------------===//
// UnaryExprOrTypeTraitExpr
Stmt::child_range UnaryExprOrTypeTraitExpr::children() {
// If this is of a type and the type is a VLA type (and not a typedef), the
// size expression of the VLA needs to be treated as an executable expression.
// Why isn't this weirdness documented better in StmtIterator?
if (isArgumentType()) {
if (const VariableArrayType* T = dyn_cast<VariableArrayType>(
getArgumentType().getTypePtr()))
return child_range(child_iterator(T), child_iterator());
return child_range();
}
return child_range(&Argument.Ex, &Argument.Ex + 1);
}
// ObjCMessageExpr
Stmt::child_range ObjCMessageExpr::children() {
Stmt **begin;
if (getReceiverKind() == Instance)
begin = reinterpret_cast<Stmt **>(this + 1);
else
begin = reinterpret_cast<Stmt **>(getArgs());
return child_range(begin,
reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
}
ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements,
QualType T, ObjCMethodDecl *Method,
SourceRange SR)
: Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary,
false, false, false, false),
NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method)
{
Expr **SaveElements = getElements();
for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
ExprBits.ValueDependent = true;
if (Elements[I]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (Elements[I]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
SaveElements[I] = Elements[I];
}
}
ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
ArrayRef<Expr *> Elements,
QualType T, ObjCMethodDecl * Method,
SourceRange SR) {
void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ Elements.size() * sizeof(Expr *));
return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
}
ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(const ASTContext &C,
unsigned NumElements) {
void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ NumElements * sizeof(Expr *));
return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
}
ObjCDictionaryLiteral::ObjCDictionaryLiteral(
ArrayRef<ObjCDictionaryElement> VK,
bool HasPackExpansions,
QualType T, ObjCMethodDecl *method,
SourceRange SR)
: Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
false, false),
NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
DictWithObjectsMethod(method)
{
KeyValuePair *KeyValues = getKeyValues();
ExpansionData *Expansions = getExpansionData();
for (unsigned I = 0; I < NumElements; I++) {
if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
ExprBits.ValueDependent = true;
if (VK[I].Key->isInstantiationDependent() ||
VK[I].Value->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (VK[I].EllipsisLoc.isInvalid() &&
(VK[I].Key->containsUnexpandedParameterPack() ||
VK[I].Value->containsUnexpandedParameterPack()))
ExprBits.ContainsUnexpandedParameterPack = true;
KeyValues[I].Key = VK[I].Key;
KeyValues[I].Value = VK[I].Value;
if (Expansions) {
Expansions[I].EllipsisLoc = VK[I].EllipsisLoc;
if (VK[I].NumExpansions)
Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1;
else
Expansions[I].NumExpansionsPlusOne = 0;
}
}
}
ObjCDictionaryLiteral *
ObjCDictionaryLiteral::Create(const ASTContext &C,
ArrayRef<ObjCDictionaryElement> VK,
bool HasPackExpansions,
QualType T, ObjCMethodDecl *method,
SourceRange SR) {
unsigned ExpansionsSize = 0;
if (HasPackExpansions)
ExpansionsSize = sizeof(ExpansionData) * VK.size();
void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
}
ObjCDictionaryLiteral *
ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements,
bool HasPackExpansions) {
unsigned ExpansionsSize = 0;
if (HasPackExpansions)
ExpansionsSize = sizeof(ExpansionData) * NumElements;
void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
sizeof(KeyValuePair) * NumElements + ExpansionsSize);
return new (Mem) ObjCDictionaryLiteral(EmptyShell(), NumElements,
HasPackExpansions);
}
ObjCSubscriptRefExpr *ObjCSubscriptRefExpr::Create(const ASTContext &C,
Expr *base,
Expr *key, QualType T,
ObjCMethodDecl *getMethod,
ObjCMethodDecl *setMethod,
SourceLocation RB) {
void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
return new (Mem) ObjCSubscriptRefExpr(base, key, T, VK_LValue,
OK_ObjCSubscript,
getMethod, setMethod, RB);
}
AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
QualType t, AtomicOp op, SourceLocation RP)
: Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
false, false, false, false),
NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
{
assert(args.size() == getNumSubExprs(op) && "wrong number of subexpressions");
for (unsigned i = 0; i != args.size(); i++) {
if (args[i]->isTypeDependent())
ExprBits.TypeDependent = true;
if (args[i]->isValueDependent())
ExprBits.ValueDependent = true;
if (args[i]->isInstantiationDependent())
ExprBits.InstantiationDependent = true;
if (args[i]->containsUnexpandedParameterPack())
ExprBits.ContainsUnexpandedParameterPack = true;
SubExprs[i] = args[i];
}
}
unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
switch (Op) {
case AO__c11_atomic_init:
case AO__c11_atomic_load:
case AO__atomic_load_n:
return 2;
case AO__c11_atomic_store:
case AO__c11_atomic_exchange:
case AO__atomic_load:
case AO__atomic_store:
case AO__atomic_store_n:
case AO__atomic_exchange_n:
case AO__c11_atomic_fetch_add:
case AO__c11_atomic_fetch_sub:
case AO__c11_atomic_fetch_and:
case AO__c11_atomic_fetch_or:
case AO__c11_atomic_fetch_xor:
case AO__atomic_fetch_add:
case AO__atomic_fetch_sub:
case AO__atomic_fetch_and:
case AO__atomic_fetch_or:
case AO__atomic_fetch_xor:
case AO__atomic_fetch_nand:
case AO__atomic_add_fetch:
case AO__atomic_sub_fetch:
case AO__atomic_and_fetch:
case AO__atomic_or_fetch:
case AO__atomic_xor_fetch:
case AO__atomic_nand_fetch:
return 3;
case AO__atomic_exchange:
return 4;
case AO__c11_atomic_compare_exchange_strong:
case AO__c11_atomic_compare_exchange_weak:
return 5;
case AO__atomic_compare_exchange:
case AO__atomic_compare_exchange_n:
return 6;
}
llvm_unreachable("unknown atomic op");
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ASTConsumer.cpp | //===--- ASTConsumer.cpp - Abstract interface for reading ASTs --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ASTConsumer class.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTConsumer.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclGroup.h"
using namespace clang;
bool ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {
return true;
}
void ASTConsumer::HandleInterestingDecl(DeclGroupRef D) {
HandleTopLevelDecl(D);
}
void ASTConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {}
void ASTConsumer::HandleImplicitImportDecl(ImportDecl *D) {
HandleTopLevelDecl(DeclGroupRef(D));
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ASTDumper.cpp | //===--- ASTDumper.cpp - Dumping implementation for ASTs ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the AST dump methods, which dump out the
// AST in a form that exposes type details and other fields.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclLookups.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace clang::comments;
//===----------------------------------------------------------------------===//
// ASTDumper Visitor
//===----------------------------------------------------------------------===//
namespace {
// Colors used for various parts of the AST dump
// Do not use bold yellow for any text. It is hard to read on white screens.
struct TerminalColor {
raw_ostream::Colors Color;
bool Bold;
};
// Red - CastColor
// Green - TypeColor
// Bold Green - DeclKindNameColor, UndeserializedColor
// Yellow - AddressColor, LocationColor
// Blue - CommentColor, NullColor, IndentColor
// Bold Blue - AttrColor
// Bold Magenta - StmtColor
// Cyan - ValueKindColor, ObjectKindColor
// Bold Cyan - ValueColor, DeclNameColor
// Decl kind names (VarDecl, FunctionDecl, etc)
static const TerminalColor DeclKindNameColor = { raw_ostream::GREEN, true };
// Attr names (CleanupAttr, GuardedByAttr, etc)
static const TerminalColor AttrColor = { raw_ostream::BLUE, true };
// Statement names (DeclStmt, ImplicitCastExpr, etc)
static const TerminalColor StmtColor = { raw_ostream::MAGENTA, true };
// Comment names (FullComment, ParagraphComment, TextComment, etc)
static const TerminalColor CommentColor = { raw_ostream::BLUE, false };
// Type names (int, float, etc, plus user defined types)
static const TerminalColor TypeColor = { raw_ostream::GREEN, false };
// Pointer address
static const TerminalColor AddressColor = { raw_ostream::YELLOW, false };
// Source locations
static const TerminalColor LocationColor = { raw_ostream::YELLOW, false };
// lvalue/xvalue
static const TerminalColor ValueKindColor = { raw_ostream::CYAN, false };
// bitfield/objcproperty/objcsubscript/vectorcomponent
static const TerminalColor ObjectKindColor = { raw_ostream::CYAN, false };
// Null statements
static const TerminalColor NullColor = { raw_ostream::BLUE, false };
// Undeserialized entities
static const TerminalColor UndeserializedColor = { raw_ostream::GREEN, true };
// CastKind from CastExpr's
static const TerminalColor CastColor = { raw_ostream::RED, false };
// Value of the statement
static const TerminalColor ValueColor = { raw_ostream::CYAN, true };
// Decl names
static const TerminalColor DeclNameColor = { raw_ostream::CYAN, true };
// Indents ( `, -. | )
static const TerminalColor IndentColor = { raw_ostream::BLUE, false };
class ASTDumper
: public ConstDeclVisitor<ASTDumper>, public ConstStmtVisitor<ASTDumper>,
public ConstCommentVisitor<ASTDumper>, public TypeVisitor<ASTDumper> {
raw_ostream &OS;
const CommandTraits *Traits;
const SourceManager *SM;
/// Pending[i] is an action to dump an entity at level i.
llvm::SmallVector<std::function<void(bool isLastChild)>, 32> Pending;
/// Indicates whether we're at the top level.
bool TopLevel;
/// Indicates if we're handling the first child after entering a new depth.
bool FirstChild;
/// Prefix for currently-being-dumped entity.
std::string Prefix;
/// Keep track of the last location we print out so that we can
/// print out deltas from then on out.
const char *LastLocFilename;
unsigned LastLocLine;
/// The \c FullComment parent of the comment being dumped.
const FullComment *FC;
bool ShowColors;
bool DumpImplicitTopLevelDecls = true; // HLSL Change
/// Dump a child of the current node.
template<typename Fn> void dumpChild(Fn doDumpChild) {
// If we're at the top level, there's nothing interesting to do; just
// run the dumper.
if (TopLevel) {
TopLevel = false;
doDumpChild();
while (!Pending.empty()) {
Pending.back()(true);
Pending.pop_back();
}
Prefix.clear();
OS << "\n";
TopLevel = true;
return;
}
const FullComment *OrigFC = FC;
auto dumpWithIndent = [this, doDumpChild, OrigFC](bool isLastChild) {
// Print out the appropriate tree structure and work out the prefix for
// children of this node. For instance:
//
// A Prefix = ""
// |-B Prefix = "| "
// | `-C Prefix = "| "
// `-D Prefix = " "
// |-E Prefix = " | "
// `-F Prefix = " "
// G Prefix = ""
//
// Note that the first level gets no prefix.
{
OS << '\n';
ColorScope Color(*this, IndentColor);
OS << Prefix << (isLastChild ? '`' : '|') << '-';
this->Prefix.push_back(isLastChild ? ' ' : '|');
this->Prefix.push_back(' ');
}
FirstChild = true;
unsigned Depth = Pending.size();
FC = OrigFC;
doDumpChild();
// If any children are left, they're the last at their nesting level.
// Dump those ones out now.
while (Depth < Pending.size()) {
Pending.back()(true);
this->Pending.pop_back();
}
// Restore the old prefix.
this->Prefix.resize(Prefix.size() - 2);
};
if (FirstChild) {
Pending.push_back(std::move(dumpWithIndent));
} else {
Pending.back()(false);
Pending.back() = std::move(dumpWithIndent);
}
FirstChild = false;
}
class ColorScope {
ASTDumper &Dumper;
public:
ColorScope(ASTDumper &Dumper, TerminalColor Color)
: Dumper(Dumper) {
if (Dumper.ShowColors)
Dumper.OS.changeColor(Color.Color, Color.Bold);
}
~ColorScope() {
if (Dumper.ShowColors)
Dumper.OS.resetColor();
}
};
public:
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM)
: OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
LastLocFilename(""), LastLocLine(~0U), FC(nullptr),
ShowColors(SM && SM->getDiagnostics().getShowColors()) { }
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM, bool ShowColors)
: OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
LastLocFilename(""), LastLocLine(~0U),
ShowColors(ShowColors) { }
// HLSL Change Start - filter implicit decls.
void setDumpImplicitTopLevelDecls(bool B) { DumpImplicitTopLevelDecls = B; }
// HLSL Change End - filter implicit decls.
void dumpDecl(const Decl *D);
void dumpStmt(const Stmt *S);
void dumpFullComment(const FullComment *C);
// Utilities
void dumpPointer(const void *Ptr);
void dumpSourceRange(SourceRange R);
void dumpLocation(SourceLocation Loc);
void dumpBareType(QualType T, bool Desugar = true);
void dumpType(QualType T);
void dumpTypeAsChild(QualType T);
void dumpTypeAsChild(const Type *T);
void dumpBareDeclRef(const Decl *Node);
void dumpDeclRef(const Decl *Node, const char *Label = nullptr);
void dumpName(const NamedDecl *D);
bool hasNodes(const DeclContext *DC);
void dumpDeclContext(const DeclContext *DC);
void dumpLookups(const DeclContext *DC, bool DumpDecls);
void dumpAttr(const Attr *A);
void dumpHLSLUnusualAnnotations(const llvm::ArrayRef<hlsl::UnusualAnnotation*> UA);
// C++ Utilities
void dumpAccessSpecifier(AccessSpecifier AS);
void dumpCXXCtorInitializer(const CXXCtorInitializer *Init);
void dumpTemplateParameters(const TemplateParameterList *TPL);
void dumpTemplateArgumentListInfo(const TemplateArgumentListInfo &TALI);
void dumpTemplateArgumentLoc(const TemplateArgumentLoc &A);
void dumpTemplateArgumentList(const TemplateArgumentList &TAL);
void dumpTemplateArgument(const TemplateArgument &A,
SourceRange R = SourceRange());
// Objective-C utilities.
void dumpObjCTypeParamList(const ObjCTypeParamList *typeParams);
// Types
void VisitComplexType(const ComplexType *T) {
dumpTypeAsChild(T->getElementType());
}
void VisitPointerType(const PointerType *T) {
dumpTypeAsChild(T->getPointeeType());
}
void VisitBlockPointerType(const BlockPointerType *T) {
dumpTypeAsChild(T->getPointeeType());
}
void VisitReferenceType(const ReferenceType *T) {
dumpTypeAsChild(T->getPointeeType());
}
void VisitRValueReferenceType(const ReferenceType *T) {
if (T->isSpelledAsLValue())
OS << " written as lvalue reference";
VisitReferenceType(T);
}
void VisitMemberPointerType(const MemberPointerType *T) {
dumpTypeAsChild(T->getClass());
dumpTypeAsChild(T->getPointeeType());
}
void VisitArrayType(const ArrayType *T) {
switch (T->getSizeModifier()) {
case ArrayType::Normal: break;
case ArrayType::Static: OS << " static"; break;
case ArrayType::Star: OS << " *"; break;
}
OS << " " << T->getIndexTypeQualifiers().getAsString();
dumpTypeAsChild(T->getElementType());
}
void VisitConstantArrayType(const ConstantArrayType *T) {
OS << " " << T->getSize();
VisitArrayType(T);
}
void VisitVariableArrayType(const VariableArrayType *T) {
OS << " ";
dumpSourceRange(T->getBracketsRange());
VisitArrayType(T);
dumpStmt(T->getSizeExpr());
}
void VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
VisitArrayType(T);
OS << " ";
dumpSourceRange(T->getBracketsRange());
dumpStmt(T->getSizeExpr());
}
void VisitDependentSizedExtVectorType(
const DependentSizedExtVectorType *T) {
OS << " ";
dumpLocation(T->getAttributeLoc());
dumpTypeAsChild(T->getElementType());
dumpStmt(T->getSizeExpr());
}
void VisitVectorType(const VectorType *T) {
switch (T->getVectorKind()) {
case VectorType::GenericVector: break;
case VectorType::AltiVecVector: OS << " altivec"; break;
case VectorType::AltiVecPixel: OS << " altivec pixel"; break;
case VectorType::AltiVecBool: OS << " altivec bool"; break;
case VectorType::NeonVector: OS << " neon"; break;
case VectorType::NeonPolyVector: OS << " neon poly"; break;
}
OS << " " << T->getNumElements();
dumpTypeAsChild(T->getElementType());
}
void VisitFunctionType(const FunctionType *T) {
auto EI = T->getExtInfo();
if (EI.getNoReturn()) OS << " noreturn";
if (EI.getProducesResult()) OS << " produces_result";
if (EI.getHasRegParm()) OS << " regparm " << EI.getRegParm();
OS << " " << FunctionType::getNameForCallConv(EI.getCC());
dumpTypeAsChild(T->getReturnType());
}
void VisitFunctionProtoType(const FunctionProtoType *T) {
auto EPI = T->getExtProtoInfo();
if (EPI.HasTrailingReturn) OS << " trailing_return";
if (T->isConst()) OS << " const";
if (T->isVolatile()) OS << " volatile";
if (T->isRestrict()) OS << " restrict";
switch (EPI.RefQualifier) {
case RQ_None: break;
case RQ_LValue: OS << " &"; break;
case RQ_RValue: OS << " &&"; break;
}
// FIXME: Exception specification.
// FIXME: Consumed parameters.
VisitFunctionType(T);
for (QualType PT : T->getParamTypes())
dumpTypeAsChild(PT);
if (EPI.Variadic)
dumpChild([this] { OS << "..."; });
}
void VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
dumpDeclRef(T->getDecl());
}
void VisitTypedefType(const TypedefType *T) {
dumpDeclRef(T->getDecl());
}
void VisitTypeOfExprType(const TypeOfExprType *T) {
dumpStmt(T->getUnderlyingExpr());
}
void VisitDecltypeType(const DecltypeType *T) {
dumpStmt(T->getUnderlyingExpr());
}
void VisitUnaryTransformType(const UnaryTransformType *T) {
switch (T->getUTTKind()) {
case UnaryTransformType::EnumUnderlyingType:
OS << " underlying_type";
break;
}
dumpTypeAsChild(T->getBaseType());
}
void VisitTagType(const TagType *T) {
dumpDeclRef(T->getDecl());
}
void VisitAttributedType(const AttributedType *T) {
// FIXME: AttrKind
dumpTypeAsChild(T->getModifiedType());
}
void VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
OS << " depth " << T->getDepth() << " index " << T->getIndex();
if (T->isParameterPack()) OS << " pack";
dumpDeclRef(T->getDecl());
}
void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
dumpTypeAsChild(T->getReplacedParameter());
}
void VisitSubstTemplateTypeParmPackType(
const SubstTemplateTypeParmPackType *T) {
dumpTypeAsChild(T->getReplacedParameter());
dumpTemplateArgument(T->getArgumentPack());
}
void VisitAutoType(const AutoType *T) {
if (T->isDecltypeAuto()) OS << " decltype(auto)";
if (!T->isDeduced())
OS << " undeduced";
}
void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
if (T->isTypeAlias()) OS << " alias";
OS << " "; T->getTemplateName().dump(OS);
for (auto &Arg : *T)
dumpTemplateArgument(Arg);
if (T->isTypeAlias())
dumpTypeAsChild(T->getAliasedType());
}
void VisitInjectedClassNameType(const InjectedClassNameType *T) {
dumpDeclRef(T->getDecl());
}
void VisitObjCInterfaceType(const ObjCInterfaceType *T) {
dumpDeclRef(T->getDecl());
}
void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
dumpTypeAsChild(T->getPointeeType());
}
void VisitAtomicType(const AtomicType *T) {
dumpTypeAsChild(T->getValueType());
}
void VisitAdjustedType(const AdjustedType *T) {
dumpTypeAsChild(T->getOriginalType());
}
void VisitPackExpansionType(const PackExpansionType *T) {
if (auto N = T->getNumExpansions()) OS << " expansions " << *N;
if (!T->isSugared())
dumpTypeAsChild(T->getPattern());
}
// FIXME: ElaboratedType, DependentNameType,
// DependentTemplateSpecializationType, ObjCObjectType
// Decls
void VisitLabelDecl(const LabelDecl *D);
void VisitTypedefDecl(const TypedefDecl *D);
void VisitEnumDecl(const EnumDecl *D);
void VisitRecordDecl(const RecordDecl *D);
void VisitEnumConstantDecl(const EnumConstantDecl *D);
void VisitIndirectFieldDecl(const IndirectFieldDecl *D);
void VisitFunctionDecl(const FunctionDecl *D);
void VisitFieldDecl(const FieldDecl *D);
void VisitVarDecl(const VarDecl *D);
void VisitFileScopeAsmDecl(const FileScopeAsmDecl *D);
void VisitImportDecl(const ImportDecl *D);
void VisitHLSLBufferDecl(const HLSLBufferDecl* D); // HLSL Change
// C++ Decls
void VisitNamespaceDecl(const NamespaceDecl *D);
void VisitUsingDirectiveDecl(const UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D);
void VisitTypeAliasDecl(const TypeAliasDecl *D);
void VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D);
void VisitCXXRecordDecl(const CXXRecordDecl *D);
void VisitStaticAssertDecl(const StaticAssertDecl *D);
template<typename SpecializationDecl>
void VisitTemplateDeclSpecialization(const SpecializationDecl *D,
bool DumpExplicitInst,
bool DumpRefOnly);
template<typename TemplateDecl>
void VisitTemplateDecl(const TemplateDecl *D, bool DumpExplicitInst);
void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
void VisitClassTemplateDecl(const ClassTemplateDecl *D);
void VisitClassTemplateSpecializationDecl(
const ClassTemplateSpecializationDecl *D);
void VisitClassTemplatePartialSpecializationDecl(
const ClassTemplatePartialSpecializationDecl *D);
void VisitClassScopeFunctionSpecializationDecl(
const ClassScopeFunctionSpecializationDecl *D);
void VisitVarTemplateDecl(const VarTemplateDecl *D);
void VisitVarTemplateSpecializationDecl(
const VarTemplateSpecializationDecl *D);
void VisitVarTemplatePartialSpecializationDecl(
const VarTemplatePartialSpecializationDecl *D);
void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
void VisitUsingDecl(const UsingDecl *D);
void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D);
void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
void VisitUsingShadowDecl(const UsingShadowDecl *D);
void VisitLinkageSpecDecl(const LinkageSpecDecl *D);
void VisitAccessSpecDecl(const AccessSpecDecl *D);
void VisitFriendDecl(const FriendDecl *D);
// ObjC Decls
void VisitObjCIvarDecl(const ObjCIvarDecl *D);
void VisitObjCMethodDecl(const ObjCMethodDecl *D);
void VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D);
void VisitObjCCategoryDecl(const ObjCCategoryDecl *D);
void VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D);
void VisitObjCProtocolDecl(const ObjCProtocolDecl *D);
void VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D);
void VisitObjCImplementationDecl(const ObjCImplementationDecl *D);
void VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D);
void VisitObjCPropertyDecl(const ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D);
void VisitBlockDecl(const BlockDecl *D);
// Stmts.
void VisitStmt(const Stmt *Node);
void VisitDeclStmt(const DeclStmt *Node);
void VisitAttributedStmt(const AttributedStmt *Node);
void VisitLabelStmt(const LabelStmt *Node);
void VisitGotoStmt(const GotoStmt *Node);
void VisitCXXCatchStmt(const CXXCatchStmt *Node);
// Exprs
void VisitExpr(const Expr *Node);
void VisitCastExpr(const CastExpr *Node);
void VisitDeclRefExpr(const DeclRefExpr *Node);
void VisitPredefinedExpr(const PredefinedExpr *Node);
void VisitCharacterLiteral(const CharacterLiteral *Node);
void VisitIntegerLiteral(const IntegerLiteral *Node);
void VisitFloatingLiteral(const FloatingLiteral *Node);
void VisitStringLiteral(const StringLiteral *Str);
void VisitInitListExpr(const InitListExpr *ILE);
void VisitUnaryOperator(const UnaryOperator *Node);
void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Node);
void VisitMemberExpr(const MemberExpr *Node);
void VisitExtVectorElementExpr(const ExtVectorElementExpr *Node);
void VisitExtMatrixElementExpr(const ExtMatrixElementExpr *Node); // HLSL Change
void VisitHLSLVectorElementExpr(const HLSLVectorElementExpr *Node); // HLSL Change
void VisitBinaryOperator(const BinaryOperator *Node);
void VisitCompoundAssignOperator(const CompoundAssignOperator *Node);
void VisitAddrLabelExpr(const AddrLabelExpr *Node);
void VisitBlockExpr(const BlockExpr *Node);
void VisitOpaqueValueExpr(const OpaqueValueExpr *Node);
// C++
void VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node);
void VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node);
void VisitCXXThisExpr(const CXXThisExpr *Node);
void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node);
void VisitCXXConstructExpr(const CXXConstructExpr *Node);
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node);
void VisitCXXNewExpr(const CXXNewExpr *Node);
void VisitCXXDeleteExpr(const CXXDeleteExpr *Node);
void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node);
void VisitExprWithCleanups(const ExprWithCleanups *Node);
void VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node);
void dumpCXXTemporary(const CXXTemporary *Temporary);
void VisitLambdaExpr(const LambdaExpr *Node) {
VisitExpr(Node);
dumpDecl(Node->getLambdaClass());
}
void VisitSizeOfPackExpr(const SizeOfPackExpr *Node);
// ObjC
void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node);
void VisitObjCEncodeExpr(const ObjCEncodeExpr *Node);
void VisitObjCMessageExpr(const ObjCMessageExpr *Node);
void VisitObjCBoxedExpr(const ObjCBoxedExpr *Node);
void VisitObjCSelectorExpr(const ObjCSelectorExpr *Node);
void VisitObjCProtocolExpr(const ObjCProtocolExpr *Node);
void VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node);
void VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node);
void VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node);
void VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node);
// Comments.
const char *getCommandName(unsigned CommandID);
void dumpComment(const Comment *C);
// Inline comments.
void visitTextComment(const TextComment *C);
void visitInlineCommandComment(const InlineCommandComment *C);
void visitHTMLStartTagComment(const HTMLStartTagComment *C);
void visitHTMLEndTagComment(const HTMLEndTagComment *C);
// Block comments.
void visitBlockCommandComment(const BlockCommandComment *C);
void visitParamCommandComment(const ParamCommandComment *C);
void visitTParamCommandComment(const TParamCommandComment *C);
void visitVerbatimBlockComment(const VerbatimBlockComment *C);
void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
void visitVerbatimLineComment(const VerbatimLineComment *C);
};
}
//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//
void ASTDumper::dumpPointer(const void *Ptr) {
ColorScope Color(*this, AddressColor);
OS << ' ' << Ptr;
}
void ASTDumper::dumpLocation(SourceLocation Loc) {
if (!SM)
return;
ColorScope Color(*this, LocationColor);
SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
// The general format we print out is filename:line:col, but we drop pieces
// that haven't changed since the last loc printed.
PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
if (PLoc.isInvalid()) {
OS << "<invalid sloc>";
return;
}
if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
OS << PLoc.getFilename() << ':' << PLoc.getLine()
<< ':' << PLoc.getColumn();
LastLocFilename = PLoc.getFilename();
LastLocLine = PLoc.getLine();
} else if (PLoc.getLine() != LastLocLine) {
OS << "line" << ':' << PLoc.getLine()
<< ':' << PLoc.getColumn();
LastLocLine = PLoc.getLine();
} else {
OS << "col" << ':' << PLoc.getColumn();
}
}
void ASTDumper::dumpSourceRange(SourceRange R) {
// Can't translate locations if a SourceManager isn't available.
if (!SM)
return;
OS << " <";
dumpLocation(R.getBegin());
if (R.getBegin() != R.getEnd()) {
OS << ", ";
dumpLocation(R.getEnd());
}
OS << ">";
// <t2.c:123:421[blah], t2.c:412:321>
}
void ASTDumper::dumpBareType(QualType T, bool Desugar) {
ColorScope Color(*this, TypeColor);
SplitQualType T_split = T.split();
OS << "'" << QualType::getAsString(T_split) << "'";
if (Desugar && !T.isNull()) {
// If the type is sugared, also dump a (shallow) desugared type.
SplitQualType D_split = T.getSplitDesugaredType();
if (T_split != D_split)
OS << ":'" << QualType::getAsString(D_split) << "'";
}
}
void ASTDumper::dumpType(QualType T) {
OS << ' ';
dumpBareType(T);
}
void ASTDumper::dumpTypeAsChild(QualType T) {
SplitQualType SQT = T.split();
if (!SQT.Quals.hasQualifiers())
return dumpTypeAsChild(SQT.Ty);
dumpChild([this, T] {
OS << "QualType";
dumpPointer(T.getAsOpaquePtr());
OS << " ";
dumpBareType(T, false);
OS << " " << T.split().Quals.getAsString();
dumpTypeAsChild(T.split().Ty);
});
}
void ASTDumper::dumpTypeAsChild(const Type *T) {
dumpChild([this, T] {
if (!T) {
ColorScope Color(*this, NullColor);
OS << "<<<NULL>>>";
return;
}
{
ColorScope Color(*this, TypeColor);
OS << T->getTypeClassName() << "Type";
}
dumpPointer(T);
OS << " ";
dumpBareType(QualType(T, 0), false);
QualType SingleStepDesugar =
T->getLocallyUnqualifiedSingleStepDesugaredType();
if (SingleStepDesugar != QualType(T, 0))
OS << " sugar";
if (T->isDependentType())
OS << " dependent";
else if (T->isInstantiationDependentType())
OS << " instantiation_dependent";
if (T->isVariablyModifiedType())
OS << " variably_modified";
if (T->containsUnexpandedParameterPack())
OS << " contains_unexpanded_pack";
if (T->isFromAST())
OS << " imported";
TypeVisitor<ASTDumper>::Visit(T);
if (SingleStepDesugar != QualType(T, 0))
dumpTypeAsChild(SingleStepDesugar);
});
}
void ASTDumper::dumpBareDeclRef(const Decl *D) {
{
ColorScope Color(*this, DeclKindNameColor);
OS << D->getDeclKindName();
}
dumpPointer(D);
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
ColorScope Color(*this, DeclNameColor);
OS << " '" << ND->getDeclName() << '\'';
}
if (const ValueDecl *VD = dyn_cast<ValueDecl>(D))
dumpType(VD->getType());
}
void ASTDumper::dumpDeclRef(const Decl *D, const char *Label) {
if (!D)
return;
dumpChild([this, Label, D] {
if (Label)
OS << Label << ' ';
dumpBareDeclRef(D);
});
}
void ASTDumper::dumpName(const NamedDecl *ND) {
if (ND->getDeclName()) {
ColorScope Color(*this, DeclNameColor);
OS << ' ' << ND->getNameAsString();
}
}
bool ASTDumper::hasNodes(const DeclContext *DC) {
if (!DC)
return false;
return DC->hasExternalLexicalStorage() ||
DC->noload_decls_begin() != DC->noload_decls_end();
}
void ASTDumper::dumpDeclContext(const DeclContext *DC) {
if (!DC)
return;
// HLSL Change Starts: Don't dump implicit decls unless requested.
bool ShouldDumpImplicit = DumpImplicitTopLevelDecls || !DC->isTranslationUnit();
for (auto *D : DC->noload_decls())
if (ShouldDumpImplicit || !D->isImplicit())
dumpDecl(D);
// HLSL Change Ends
if (DC->hasExternalLexicalStorage()) {
dumpChild([this] {
ColorScope Color(*this, UndeserializedColor);
OS << "<undeserialized declarations>";
});
}
}
void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
dumpChild([this, DC, DumpDecls] {
OS << "StoredDeclsMap ";
dumpBareDeclRef(cast<Decl>(DC));
const DeclContext *Primary = DC->getPrimaryContext();
if (Primary != DC) {
OS << " primary";
dumpPointer(cast<Decl>(Primary));
}
bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
DeclContext::all_lookups_iterator I = Primary->noload_lookups_begin(),
E = Primary->noload_lookups_end();
while (I != E) {
DeclarationName Name = I.getLookupName();
DeclContextLookupResult R = *I++;
dumpChild([this, Name, R, DumpDecls] {
OS << "DeclarationName ";
{
ColorScope Color(*this, DeclNameColor);
OS << '\'' << Name << '\'';
}
for (DeclContextLookupResult::iterator RI = R.begin(), RE = R.end();
RI != RE; ++RI) {
dumpChild([this, RI, DumpDecls] {
dumpBareDeclRef(*RI);
if ((*RI)->isHidden())
OS << " hidden";
// If requested, dump the redecl chain for this lookup.
if (DumpDecls) {
// Dump earliest decl first.
std::function<void(Decl *)> DumpWithPrev = [&](Decl *D) {
if (Decl *Prev = D->getPreviousDecl())
DumpWithPrev(Prev);
dumpDecl(D);
};
DumpWithPrev(*RI);
}
});
}
});
}
if (HasUndeserializedLookups) {
dumpChild([this] {
ColorScope Color(*this, UndeserializedColor);
OS << "<undeserialized lookups>";
});
}
});
}
void ASTDumper::dumpAttr(const Attr *A) {
dumpChild([this, A] {
{
ColorScope Color(*this, AttrColor);
switch (A->getKind()) {
#define ATTR(X) case attr::X: OS << #X; break;
#include "clang/Basic/AttrList.inc"
default:
llvm_unreachable("unexpected attribute kind");
}
OS << "Attr";
}
dumpPointer(A);
dumpSourceRange(A->getRange());
if (A->isInherited())
OS << " Inherited";
if (A->isImplicit())
OS << " Implicit";
#include "clang/AST/AttrDump.inc"
});
}
static void dumpPreviousDeclImpl(raw_ostream &OS, ...) {}
template<typename T>
static void dumpPreviousDeclImpl(raw_ostream &OS, const Mergeable<T> *D) {
const T *First = D->getFirstDecl();
if (First != D)
OS << " first " << First;
}
template<typename T>
static void dumpPreviousDeclImpl(raw_ostream &OS, const Redeclarable<T> *D) {
const T *Prev = D->getPreviousDecl();
if (Prev)
OS << " prev " << Prev;
}
/// Dump the previous declaration in the redeclaration chain for a declaration,
/// if any.
static void dumpPreviousDecl(raw_ostream &OS, const Decl *D) {
switch (D->getKind()) {
#define DECL(DERIVED, BASE) \
case Decl::DERIVED: \
return dumpPreviousDeclImpl(OS, cast<DERIVED##Decl>(D));
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
}
llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
}
//===----------------------------------------------------------------------===//
// C++ Utilities
//===----------------------------------------------------------------------===//
void ASTDumper::dumpAccessSpecifier(AccessSpecifier AS) {
switch (AS) {
case AS_none:
break;
case AS_public:
OS << "public";
break;
case AS_protected:
OS << "protected";
break;
case AS_private:
OS << "private";
break;
}
}
void ASTDumper::dumpCXXCtorInitializer(const CXXCtorInitializer *Init) {
dumpChild([this, Init] {
OS << "CXXCtorInitializer";
if (Init->isAnyMemberInitializer()) {
OS << ' ';
dumpBareDeclRef(Init->getAnyMember());
} else if (Init->isBaseInitializer()) {
dumpType(QualType(Init->getBaseClass(), 0));
} else if (Init->isDelegatingInitializer()) {
dumpType(Init->getTypeSourceInfo()->getType());
} else {
llvm_unreachable("Unknown initializer type");
}
dumpStmt(Init->getInit());
});
}
void ASTDumper::dumpTemplateParameters(const TemplateParameterList *TPL) {
if (!TPL)
return;
for (TemplateParameterList::const_iterator I = TPL->begin(), E = TPL->end();
I != E; ++I)
dumpDecl(*I);
}
void ASTDumper::dumpTemplateArgumentListInfo(
const TemplateArgumentListInfo &TALI) {
for (unsigned i = 0, e = TALI.size(); i < e; ++i)
dumpTemplateArgumentLoc(TALI[i]);
}
void ASTDumper::dumpTemplateArgumentLoc(const TemplateArgumentLoc &A) {
dumpTemplateArgument(A.getArgument(), A.getSourceRange());
}
void ASTDumper::dumpTemplateArgumentList(const TemplateArgumentList &TAL) {
for (unsigned i = 0, e = TAL.size(); i < e; ++i)
dumpTemplateArgument(TAL[i]);
}
void ASTDumper::dumpTemplateArgument(const TemplateArgument &A, SourceRange R) {
dumpChild([this, A, R] {
OS << "TemplateArgument";
if (R.isValid())
dumpSourceRange(R);
switch (A.getKind()) {
case TemplateArgument::Null:
OS << " null";
break;
case TemplateArgument::Type:
OS << " type";
dumpType(A.getAsType());
break;
case TemplateArgument::Declaration:
OS << " decl";
dumpDeclRef(A.getAsDecl());
break;
case TemplateArgument::NullPtr:
OS << " nullptr";
break;
case TemplateArgument::Integral:
OS << " integral " << A.getAsIntegral();
break;
case TemplateArgument::Template:
OS << " template ";
A.getAsTemplate().dump(OS);
break;
case TemplateArgument::TemplateExpansion:
OS << " template expansion";
A.getAsTemplateOrTemplatePattern().dump(OS);
break;
case TemplateArgument::Expression:
OS << " expr";
dumpStmt(A.getAsExpr());
break;
case TemplateArgument::Pack:
OS << " pack";
for (TemplateArgument::pack_iterator I = A.pack_begin(), E = A.pack_end();
I != E; ++I)
dumpTemplateArgument(*I);
break;
}
});
}
//===----------------------------------------------------------------------===//
// Objective-C Utilities
//===----------------------------------------------------------------------===//
void ASTDumper::dumpObjCTypeParamList(const ObjCTypeParamList *typeParams) {
if (!typeParams)
return;
for (auto typeParam : *typeParams) {
dumpDecl(typeParam);
}
}
//===----------------------------------------------------------------------===//
// Decl dumping methods.
//===----------------------------------------------------------------------===//
// HLSL Change Begins
void ASTDumper::dumpHLSLUnusualAnnotations(const ArrayRef<hlsl::UnusualAnnotation*> UA)
{
for (auto It = UA.begin(), E = UA.end(); It != E; ++It)
{
dumpChild([this, It] {
{
ColorScope Color(*this, AttrColor);
switch ((*It)->getKind())
{
case hlsl::UnusualAnnotation::UA_ConstantPacking:
OS << "ConstantPacking";
break;
case hlsl::UnusualAnnotation::UA_RegisterAssignment:
OS << "RegisterAssignment";
break;
case hlsl::UnusualAnnotation::UA_SemanticDecl:
OS << "SemanticDecl";
break;
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier:
OS << "PayloadAccessQualifier";
break;
}
}
dumpPointer(It);
dumpSourceRange(SourceRange((*It)->Loc));
switch ((*It)->getKind())
{
case hlsl::UnusualAnnotation::UA_ConstantPacking: {
const hlsl::ConstantPacking *constantPacking =
cast<hlsl::ConstantPacking>(*It);
OS << " packoffset(c";
OS << constantPacking->Subcomponent;
OS << ".";
const char *xyzw[4] = {"x", "y", "z", "w"};
if (constantPacking->ComponentOffset < 4)
OS << xyzw[constantPacking->ComponentOffset];
else
OS << "<invalid>";
OS << ")";
if (!constantPacking->IsValid)
OS << " invalid";
break;
}
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
const hlsl::RegisterAssignment *registerAssignment =
cast<hlsl::RegisterAssignment>(*It);
OS << " register(";
if (!registerAssignment->ShaderProfile.empty())
OS << registerAssignment->ShaderProfile << ", ";
bool needsComma = false;
if (!registerAssignment->isSpaceOnly()) {
if (!registerAssignment->RegisterType)
OS << "invalid";
else
OS << StringRef(®isterAssignment->RegisterType, 1);
OS << registerAssignment->RegisterNumber +
registerAssignment->RegisterOffset;
needsComma = true;
}
if (registerAssignment->RegisterSpace.hasValue()) {
if (needsComma)
OS << ", ";
OS << "space" << registerAssignment->RegisterSpace.getValue();
}
OS << ")";
if (!registerAssignment->IsValid)
OS << " invalid";
break;
}
case hlsl::UnusualAnnotation::UA_SemanticDecl: {
const hlsl::SemanticDecl *semanticDecl = cast<hlsl::SemanticDecl>(*It);
OS << " \"" << semanticDecl->SemanticName << "\"";
break;
}
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier: {
const hlsl::PayloadAccessAnnotation *annotation =
cast<hlsl::PayloadAccessAnnotation>(*It);
OS << " "
<< (annotation->qualifier == hlsl::DXIL::PayloadAccessQualifier::Read
? "read"
: "write")
<< "(";
StringRef shaderStageNames[] = {"caller", "closesthit", "miss", "anyhit"};
for (unsigned i = 0; i < annotation->ShaderStages.size(); ++i) {
OS << shaderStageNames[static_cast<unsigned>(
annotation->ShaderStages[i])];
if (i < annotation->ShaderStages.size() - 1)
OS << ", ";
}
OS << ")";
break;
}
}
});
}
}
// HLSL Change Ends
void ASTDumper::dumpDecl(const Decl *D) {
dumpChild([this, D] {
if (!D) {
ColorScope Color(*this, NullColor);
OS << "<<<NULL>>>";
return;
}
{
ColorScope Color(*this, DeclKindNameColor);
OS << D->getDeclKindName() << "Decl";
}
dumpPointer(D);
if (D->getLexicalDeclContext() != D->getDeclContext())
OS << " parent " << cast<Decl>(D->getDeclContext());
// HLSL Change Begin - dump HLSLBufferDecl
if (const HLSLBufferDecl *bufDecl =
dyn_cast_or_null<HLSLBufferDecl>(D->getDeclContext())) {
OS << " parent " << bufDecl->getDeclKindName();
dumpPointer(bufDecl);
}
// HLSL Change End
dumpPreviousDecl(OS, D);
dumpSourceRange(D->getSourceRange());
OS << ' ';
dumpLocation(D->getLocation());
if (Module *M = D->getImportedOwningModule())
OS << " in " << M->getFullModuleName();
else if (Module *M = D->getLocalOwningModule())
OS << " in (local) " << M->getFullModuleName();
if (auto *ND = dyn_cast<NamedDecl>(D))
for (Module *M : D->getASTContext().getModulesWithMergedDefinition(
const_cast<NamedDecl *>(ND)))
dumpChild([this, M] { OS << "also in " << M->getFullModuleName(); });
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
if (ND->isHidden())
OS << " hidden";
if (D->isImplicit())
OS << " implicit";
if (D->isUsed())
OS << " used";
else if (D->isThisDeclarationReferenced())
OS << " referenced";
if (D->isInvalidDecl())
OS << " invalid";
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
if (FD->isConstexpr())
OS << " constexpr";
ConstDeclVisitor<ASTDumper>::Visit(D);
for (Decl::attr_iterator I = D->attr_begin(), E = D->attr_end(); I != E;
++I)
dumpAttr(*I);
// HLSL Change Begins
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
dumpHLSLUnusualAnnotations(const_cast<NamedDecl*>(ND)->getUnusualAnnotations());
// HLSL Change Ends
if (const FullComment *Comment =
D->getASTContext().getLocalCommentForDeclUncached(D))
dumpFullComment(Comment);
// Decls within functions are visited by the body.
if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D) &&
hasNodes(dyn_cast<DeclContext>(D)))
dumpDeclContext(cast<DeclContext>(D));
});
}
void ASTDumper::VisitLabelDecl(const LabelDecl *D) {
dumpName(D);
}
void ASTDumper::VisitTypedefDecl(const TypedefDecl *D) {
dumpName(D);
dumpType(D->getUnderlyingType());
if (D->isModulePrivate())
OS << " __module_private__";
}
// HLSL Change Starts
void ASTDumper::VisitHLSLBufferDecl(const HLSLBufferDecl* D) {
if (D->isCBuffer())
OS << " cbuffer";
else
OS << " tbuffer";
dumpName(D);
}
// HLSL Change Ends
void ASTDumper::VisitEnumDecl(const EnumDecl *D) {
if (D->isScoped()) {
if (D->isScopedUsingClassTag())
OS << " class";
else
OS << " struct";
}
dumpName(D);
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isFixed())
dumpType(D->getIntegerType());
}
void ASTDumper::VisitRecordDecl(const RecordDecl *D) {
OS << ' ' << D->getKindName();
dumpName(D);
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isCompleteDefinition())
OS << " definition";
}
void ASTDumper::VisitEnumConstantDecl(const EnumConstantDecl *D) {
dumpName(D);
dumpType(D->getType());
if (const Expr *Init = D->getInitExpr())
dumpStmt(Init);
}
void ASTDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
dumpName(D);
dumpType(D->getType());
for (auto *Child : D->chain())
dumpDeclRef(Child);
}
void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
dumpName(D);
dumpType(D->getType());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
if (D->isInlineSpecified())
OS << " inline";
if (D->isVirtualAsWritten())
OS << " virtual";
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isPure())
OS << " pure";
else if (D->isDeletedAsWritten())
OS << " delete";
if (const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
switch (EPI.ExceptionSpec.Type) {
default: break;
case EST_Unevaluated:
OS << " noexcept-unevaluated " << EPI.ExceptionSpec.SourceDecl;
break;
case EST_Uninstantiated:
OS << " noexcept-uninstantiated " << EPI.ExceptionSpec.SourceTemplate;
break;
}
}
if (const FunctionTemplateSpecializationInfo *FTSI =
D->getTemplateSpecializationInfo())
dumpTemplateArgumentList(*FTSI->TemplateArguments);
for (ArrayRef<NamedDecl *>::iterator
I = D->getDeclsInPrototypeScope().begin(),
E = D->getDeclsInPrototypeScope().end(); I != E; ++I)
dumpDecl(*I);
if (!D->param_begin() && D->getNumParams())
dumpChild(
[this, D] { OS << "<<NULL params x " << D->getNumParams() << ">>"; });
else
for (FunctionDecl::param_const_iterator I = D->param_begin(),
E = D->param_end();
I != E; ++I)
dumpDecl(*I);
if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(D))
for (CXXConstructorDecl::init_const_iterator I = C->init_begin(),
E = C->init_end();
I != E; ++I)
dumpCXXCtorInitializer(*I);
if (D->doesThisDeclarationHaveABody())
dumpStmt(D->getBody());
}
void ASTDumper::VisitFieldDecl(const FieldDecl *D) {
dumpName(D);
dumpType(D->getType());
if (D->isMutable())
OS << " mutable";
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isBitField())
dumpStmt(D->getBitWidth());
if (Expr *Init = D->getInClassInitializer())
dumpStmt(Init);
}
void ASTDumper::VisitVarDecl(const VarDecl *D) {
dumpName(D);
dumpType(D->getType());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
switch (D->getTLSKind()) {
case VarDecl::TLS_None: break;
case VarDecl::TLS_Static: OS << " tls"; break;
case VarDecl::TLS_Dynamic: OS << " tls_dynamic"; break;
}
if (D->isModulePrivate())
OS << " __module_private__";
if (D->isNRVOVariable())
OS << " nrvo";
if (D->hasInit()) {
switch (D->getInitStyle()) {
case VarDecl::CInit: OS << " cinit"; break;
case VarDecl::CallInit: OS << " callinit"; break;
case VarDecl::ListInit: OS << " listinit"; break;
}
dumpStmt(D->getInit());
}
}
void ASTDumper::VisitFileScopeAsmDecl(const FileScopeAsmDecl *D) {
dumpStmt(D->getAsmString());
}
void ASTDumper::VisitImportDecl(const ImportDecl *D) {
OS << ' ' << D->getImportedModule()->getFullModuleName();
}
//===----------------------------------------------------------------------===//
// C++ Declarations
//===----------------------------------------------------------------------===//
void ASTDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
dumpName(D);
if (D->isInline())
OS << " inline";
if (!D->isOriginalNamespace())
dumpDeclRef(D->getOriginalNamespace(), "original");
}
void ASTDumper::VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
OS << ' ';
dumpBareDeclRef(D->getNominatedNamespace());
}
void ASTDumper::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
dumpName(D);
dumpDeclRef(D->getAliasedNamespace());
}
void ASTDumper::VisitTypeAliasDecl(const TypeAliasDecl *D) {
dumpName(D);
dumpType(D->getUnderlyingType());
}
void ASTDumper::VisitTypeAliasTemplateDecl(const TypeAliasTemplateDecl *D) {
dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
dumpDecl(D->getTemplatedDecl());
}
void ASTDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
VisitRecordDecl(D);
if (!D->isCompleteDefinition())
return;
for (const auto &I : D->bases()) {
dumpChild([this, I] {
if (I.isVirtual())
OS << "virtual ";
dumpAccessSpecifier(I.getAccessSpecifier());
dumpType(I.getType());
if (I.isPackExpansion())
OS << "...";
});
}
}
void ASTDumper::VisitStaticAssertDecl(const StaticAssertDecl *D) {
dumpStmt(D->getAssertExpr());
dumpStmt(D->getMessage());
}
template<typename SpecializationDecl>
void ASTDumper::VisitTemplateDeclSpecialization(const SpecializationDecl *D,
bool DumpExplicitInst,
bool DumpRefOnly) {
bool DumpedAny = false;
for (auto *RedeclWithBadType : D->redecls()) {
// FIXME: The redecls() range sometimes has elements of a less-specific
// type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
// us TagDecls, and should give CXXRecordDecls).
auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
if (!Redecl) {
// Found the injected-class-name for a class template. This will be dumped
// as part of its surrounding class so we don't need to dump it here.
assert(isa<CXXRecordDecl>(RedeclWithBadType) &&
"expected an injected-class-name");
continue;
}
switch (Redecl->getTemplateSpecializationKind()) {
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
if (!DumpExplicitInst)
break;
LLVM_FALLTHROUGH; // HLSL Change
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
if (DumpRefOnly)
dumpDeclRef(Redecl);
else
dumpDecl(Redecl);
DumpedAny = true;
break;
case TSK_ExplicitSpecialization:
break;
}
}
// Ensure we dump at least one decl for each specialization.
if (!DumpedAny)
dumpDeclRef(D);
}
template<typename TemplateDecl>
void ASTDumper::VisitTemplateDecl(const TemplateDecl *D,
bool DumpExplicitInst) {
dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
dumpDecl(D->getTemplatedDecl());
for (auto *Child : D->specializations())
VisitTemplateDeclSpecialization(Child, DumpExplicitInst,
!D->isCanonicalDecl());
}
void ASTDumper::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
// FIXME: We don't add a declaration of a function template specialization
// to its context when it's explicitly instantiated, so dump explicit
// instantiations when we dump the template itself.
VisitTemplateDecl(D, true);
}
void ASTDumper::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
VisitTemplateDecl(D, false);
}
void ASTDumper::VisitClassTemplateSpecializationDecl(
const ClassTemplateSpecializationDecl *D) {
VisitCXXRecordDecl(D);
dumpTemplateArgumentList(D->getTemplateArgs());
}
void ASTDumper::VisitClassTemplatePartialSpecializationDecl(
const ClassTemplatePartialSpecializationDecl *D) {
VisitClassTemplateSpecializationDecl(D);
dumpTemplateParameters(D->getTemplateParameters());
}
void ASTDumper::VisitClassScopeFunctionSpecializationDecl(
const ClassScopeFunctionSpecializationDecl *D) {
dumpDeclRef(D->getSpecialization());
if (D->hasExplicitTemplateArgs())
dumpTemplateArgumentListInfo(D->templateArgs());
}
void ASTDumper::VisitVarTemplateDecl(const VarTemplateDecl *D) {
VisitTemplateDecl(D, false);
}
void ASTDumper::VisitVarTemplateSpecializationDecl(
const VarTemplateSpecializationDecl *D) {
dumpTemplateArgumentList(D->getTemplateArgs());
VisitVarDecl(D);
}
void ASTDumper::VisitVarTemplatePartialSpecializationDecl(
const VarTemplatePartialSpecializationDecl *D) {
dumpTemplateParameters(D->getTemplateParameters());
VisitVarTemplateSpecializationDecl(D);
}
void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
if (D->wasDeclaredWithTypename())
OS << " typename";
else
OS << " class";
if (D->isParameterPack())
OS << " ...";
dumpName(D);
if (D->hasDefaultArgument())
dumpTemplateArgument(D->getDefaultArgument());
}
void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
dumpType(D->getType());
if (D->isParameterPack())
OS << " ...";
dumpName(D);
if (D->hasDefaultArgument())
dumpTemplateArgument(D->getDefaultArgument());
}
void ASTDumper::VisitTemplateTemplateParmDecl(
const TemplateTemplateParmDecl *D) {
if (D->isParameterPack())
OS << " ...";
dumpName(D);
dumpTemplateParameters(D->getTemplateParameters());
if (D->hasDefaultArgument())
dumpTemplateArgumentLoc(D->getDefaultArgument());
}
void ASTDumper::VisitUsingDecl(const UsingDecl *D) {
OS << ' ';
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
}
void ASTDumper::VisitUnresolvedUsingTypenameDecl(
const UnresolvedUsingTypenameDecl *D) {
OS << ' ';
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
}
void ASTDumper::VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D) {
OS << ' ';
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getNameAsString();
dumpType(D->getType());
}
void ASTDumper::VisitUsingShadowDecl(const UsingShadowDecl *D) {
OS << ' ';
dumpBareDeclRef(D->getTargetDecl());
}
void ASTDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
switch (D->getLanguage()) {
case LinkageSpecDecl::lang_c: OS << " C"; break;
case LinkageSpecDecl::lang_cxx: OS << " C++"; break;
}
}
void ASTDumper::VisitAccessSpecDecl(const AccessSpecDecl *D) {
OS << ' ';
dumpAccessSpecifier(D->getAccess());
}
void ASTDumper::VisitFriendDecl(const FriendDecl *D) {
if (TypeSourceInfo *T = D->getFriendType())
dumpType(T->getType());
else
dumpDecl(D->getFriendDecl());
}
//===----------------------------------------------------------------------===//
// Obj-C Declarations
//===----------------------------------------------------------------------===//
void ASTDumper::VisitObjCIvarDecl(const ObjCIvarDecl *D) {
dumpName(D);
dumpType(D->getType());
if (D->getSynthesize())
OS << " synthesize";
switch (D->getAccessControl()) {
case ObjCIvarDecl::None:
OS << " none";
break;
case ObjCIvarDecl::Private:
OS << " private";
break;
case ObjCIvarDecl::Protected:
OS << " protected";
break;
case ObjCIvarDecl::Public:
OS << " public";
break;
case ObjCIvarDecl::Package:
OS << " package";
break;
}
}
void ASTDumper::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
if (D->isInstanceMethod())
OS << " -";
else
OS << " +";
dumpName(D);
dumpType(D->getReturnType());
if (D->isThisDeclarationADefinition()) {
dumpDeclContext(D);
} else {
for (ObjCMethodDecl::param_const_iterator I = D->param_begin(),
E = D->param_end();
I != E; ++I)
dumpDecl(*I);
}
if (D->isVariadic())
dumpChild([this] { OS << "..."; });
if (D->hasBody())
dumpStmt(D->getBody());
}
void ASTDumper::VisitObjCTypeParamDecl(const ObjCTypeParamDecl *D) {
dumpName(D);
switch (D->getVariance()) {
case ObjCTypeParamVariance::Invariant:
break;
case ObjCTypeParamVariance::Covariant:
OS << " covariant";
break;
case ObjCTypeParamVariance::Contravariant:
OS << " contravariant";
break;
}
if (D->hasExplicitBound())
OS << " bounded";
dumpType(D->getUnderlyingType());
}
void ASTDumper::VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
dumpName(D);
dumpDeclRef(D->getClassInterface());
dumpObjCTypeParamList(D->getTypeParamList());
dumpDeclRef(D->getImplementation());
for (ObjCCategoryDecl::protocol_iterator I = D->protocol_begin(),
E = D->protocol_end();
I != E; ++I)
dumpDeclRef(*I);
}
void ASTDumper::VisitObjCCategoryImplDecl(const ObjCCategoryImplDecl *D) {
dumpName(D);
dumpDeclRef(D->getClassInterface());
dumpDeclRef(D->getCategoryDecl());
}
void ASTDumper::VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
dumpName(D);
for (auto *Child : D->protocols())
dumpDeclRef(Child);
}
void ASTDumper::VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
dumpName(D);
dumpObjCTypeParamList(D->getTypeParamListAsWritten());
dumpDeclRef(D->getSuperClass(), "super");
dumpDeclRef(D->getImplementation());
for (auto *Child : D->protocols())
dumpDeclRef(Child);
}
void ASTDumper::VisitObjCImplementationDecl(const ObjCImplementationDecl *D) {
dumpName(D);
dumpDeclRef(D->getSuperClass(), "super");
dumpDeclRef(D->getClassInterface());
for (ObjCImplementationDecl::init_const_iterator I = D->init_begin(),
E = D->init_end();
I != E; ++I)
dumpCXXCtorInitializer(*I);
}
void ASTDumper::VisitObjCCompatibleAliasDecl(const ObjCCompatibleAliasDecl *D) {
dumpName(D);
dumpDeclRef(D->getClassInterface());
}
void ASTDumper::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
dumpName(D);
dumpType(D->getType());
if (D->getPropertyImplementation() == ObjCPropertyDecl::Required)
OS << " required";
else if (D->getPropertyImplementation() == ObjCPropertyDecl::Optional)
OS << " optional";
ObjCPropertyDecl::PropertyAttributeKind Attrs = D->getPropertyAttributes();
if (Attrs != ObjCPropertyDecl::OBJC_PR_noattr) {
if (Attrs & ObjCPropertyDecl::OBJC_PR_readonly)
OS << " readonly";
if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
OS << " assign";
if (Attrs & ObjCPropertyDecl::OBJC_PR_readwrite)
OS << " readwrite";
if (Attrs & ObjCPropertyDecl::OBJC_PR_retain)
OS << " retain";
if (Attrs & ObjCPropertyDecl::OBJC_PR_copy)
OS << " copy";
if (Attrs & ObjCPropertyDecl::OBJC_PR_nonatomic)
OS << " nonatomic";
if (Attrs & ObjCPropertyDecl::OBJC_PR_atomic)
OS << " atomic";
if (Attrs & ObjCPropertyDecl::OBJC_PR_weak)
OS << " weak";
if (Attrs & ObjCPropertyDecl::OBJC_PR_strong)
OS << " strong";
if (Attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained)
OS << " unsafe_unretained";
if (Attrs & ObjCPropertyDecl::OBJC_PR_getter)
dumpDeclRef(D->getGetterMethodDecl(), "getter");
if (Attrs & ObjCPropertyDecl::OBJC_PR_setter)
dumpDeclRef(D->getSetterMethodDecl(), "setter");
}
}
void ASTDumper::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
dumpName(D->getPropertyDecl());
if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
OS << " synthesize";
else
OS << " dynamic";
dumpDeclRef(D->getPropertyDecl());
dumpDeclRef(D->getPropertyIvarDecl());
}
void ASTDumper::VisitBlockDecl(const BlockDecl *D) {
for (auto I : D->params())
dumpDecl(I);
if (D->isVariadic())
dumpChild([this] { OS << "..."; });
if (D->capturesCXXThis())
dumpChild([this] { OS << "capture this"; });
for (const auto &I : D->captures()) {
dumpChild([this, I] {
OS << "capture";
if (I.isByRef())
OS << " byref";
if (I.isNested())
OS << " nested";
if (I.getVariable()) {
OS << ' ';
dumpBareDeclRef(I.getVariable());
}
if (I.hasCopyExpr())
dumpStmt(I.getCopyExpr());
});
}
dumpStmt(D->getBody());
}
//===----------------------------------------------------------------------===//
// Stmt dumping methods.
//===----------------------------------------------------------------------===//
void ASTDumper::dumpStmt(const Stmt *S) {
dumpChild([this, S] {
if (!S) {
ColorScope Color(*this, NullColor);
OS << "<<<NULL>>>";
return;
}
if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
VisitDeclStmt(DS);
return;
}
ConstStmtVisitor<ASTDumper>::Visit(S);
for (const Stmt *SubStmt : S->children())
dumpStmt(SubStmt);
});
}
void ASTDumper::VisitStmt(const Stmt *Node) {
{
ColorScope Color(*this, StmtColor);
OS << Node->getStmtClassName();
}
dumpPointer(Node);
dumpSourceRange(Node->getSourceRange());
}
void ASTDumper::VisitDeclStmt(const DeclStmt *Node) {
VisitStmt(Node);
for (DeclStmt::const_decl_iterator I = Node->decl_begin(),
E = Node->decl_end();
I != E; ++I)
dumpDecl(*I);
}
void ASTDumper::VisitAttributedStmt(const AttributedStmt *Node) {
VisitStmt(Node);
for (ArrayRef<const Attr *>::iterator I = Node->getAttrs().begin(),
E = Node->getAttrs().end();
I != E; ++I)
dumpAttr(*I);
}
void ASTDumper::VisitLabelStmt(const LabelStmt *Node) {
VisitStmt(Node);
OS << " '" << Node->getName() << "'";
}
void ASTDumper::VisitGotoStmt(const GotoStmt *Node) {
VisitStmt(Node);
OS << " '" << Node->getLabel()->getName() << "'";
dumpPointer(Node->getLabel());
}
void ASTDumper::VisitCXXCatchStmt(const CXXCatchStmt *Node) {
VisitStmt(Node);
dumpDecl(Node->getExceptionDecl());
}
//===----------------------------------------------------------------------===//
// Expr dumping methods.
//===----------------------------------------------------------------------===//
void ASTDumper::VisitExpr(const Expr *Node) {
VisitStmt(Node);
dumpType(Node->getType());
{
ColorScope Color(*this, ValueKindColor);
switch (Node->getValueKind()) {
case VK_RValue:
break;
case VK_LValue:
OS << " lvalue";
break;
case VK_XValue:
OS << " xvalue";
break;
}
}
{
ColorScope Color(*this, ObjectKindColor);
switch (Node->getObjectKind()) {
case OK_Ordinary:
break;
case OK_BitField:
OS << " bitfield";
break;
case OK_ObjCProperty:
OS << " objcproperty";
break;
case OK_ObjCSubscript:
OS << " objcsubscript";
break;
case OK_VectorComponent:
OS << " vectorcomponent";
break;
}
}
}
static void dumpBasePath(raw_ostream &OS, const CastExpr *Node) {
if (Node->path_empty())
return;
OS << " (";
bool First = true;
for (CastExpr::path_const_iterator I = Node->path_begin(),
E = Node->path_end();
I != E; ++I) {
const CXXBaseSpecifier *Base = *I;
if (!First)
OS << " -> ";
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (Base->isVirtual())
OS << "virtual ";
OS << RD->getName();
First = false;
}
OS << ')';
}
void ASTDumper::VisitCastExpr(const CastExpr *Node) {
VisitExpr(Node);
OS << " <";
{
ColorScope Color(*this, CastColor);
OS << Node->getCastKindName();
}
dumpBasePath(OS, Node);
OS << ">";
}
void ASTDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
VisitExpr(Node);
OS << " ";
dumpBareDeclRef(Node->getDecl());
if (Node->getDecl() != Node->getFoundDecl()) {
OS << " (";
dumpBareDeclRef(Node->getFoundDecl());
OS << ")";
}
}
void ASTDumper::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node) {
VisitExpr(Node);
OS << " (";
if (!Node->requiresADL())
OS << "no ";
OS << "ADL) = '" << Node->getName() << '\'';
UnresolvedLookupExpr::decls_iterator
I = Node->decls_begin(), E = Node->decls_end();
if (I == E)
OS << " empty";
for (; I != E; ++I)
dumpPointer(*I);
}
void ASTDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) {
VisitExpr(Node);
{
ColorScope Color(*this, DeclKindNameColor);
OS << " " << Node->getDecl()->getDeclKindName() << "Decl";
}
OS << "='" << *Node->getDecl() << "'";
dumpPointer(Node->getDecl());
if (Node->isFreeIvar())
OS << " isFreeIvar";
}
void ASTDumper::VisitPredefinedExpr(const PredefinedExpr *Node) {
VisitExpr(Node);
OS << " " << PredefinedExpr::getIdentTypeName(Node->getIdentType());
}
void ASTDumper::VisitCharacterLiteral(const CharacterLiteral *Node) {
VisitExpr(Node);
ColorScope Color(*this, ValueColor);
OS << " " << Node->getValue();
}
void ASTDumper::VisitIntegerLiteral(const IntegerLiteral *Node) {
VisitExpr(Node);
bool isSigned = Node->getType()->isSignedIntegerType();
ColorScope Color(*this, ValueColor);
OS << " " << Node->getValue().toString(10, isSigned);
}
void ASTDumper::VisitFloatingLiteral(const FloatingLiteral *Node) {
VisitExpr(Node);
ColorScope Color(*this, ValueColor);
OS << " " << Node->getValueAsApproximateDouble();
}
void ASTDumper::VisitStringLiteral(const StringLiteral *Str) {
VisitExpr(Str);
ColorScope Color(*this, ValueColor);
OS << " ";
Str->outputString(OS);
}
void ASTDumper::VisitInitListExpr(const InitListExpr *ILE) {
VisitExpr(ILE);
if (auto *Filler = ILE->getArrayFiller()) {
dumpChild([this, Filler] {
OS << "array filler";
dumpStmt(Filler);
});
}
if (auto *Field = ILE->getInitializedFieldInUnion()) {
OS << " field ";
dumpBareDeclRef(Field);
}
}
void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
VisitExpr(Node);
OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
<< " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
}
void ASTDumper::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *Node) {
VisitExpr(Node);
switch(Node->getKind()) {
case UETT_SizeOf:
OS << " sizeof";
break;
case UETT_AlignOf:
OS << " alignof";
break;
case UETT_VecStep:
OS << " vec_step";
break;
case UETT_OpenMPRequiredSimdAlign:
OS << " __builtin_omp_required_simd_align";
break;
// HLSL Change Begins
case UETT_ArrayLength:
OS << " Length";
// HLSLC Change Ends
}
if (Node->isArgumentType())
dumpType(Node->getArgumentType());
}
void ASTDumper::VisitMemberExpr(const MemberExpr *Node) {
VisitExpr(Node);
OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl();
dumpPointer(Node->getMemberDecl());
}
void ASTDumper::VisitExtVectorElementExpr(const ExtVectorElementExpr *Node) {
VisitExpr(Node);
OS << " " << Node->getAccessor().getNameStart();
}
// HLSL Change Starts
void ASTDumper::VisitExtMatrixElementExpr(const ExtMatrixElementExpr *Node) {
VisitExpr(Node);
OS << " " << Node->getAccessor().getNameStart();
}
void ASTDumper::VisitHLSLVectorElementExpr(const HLSLVectorElementExpr *Node) {
VisitExpr(Node);
OS << " " << Node->getAccessor().getNameStart();
}
// HLSL Change Ends
void ASTDumper::VisitBinaryOperator(const BinaryOperator *Node) {
VisitExpr(Node);
OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
}
void ASTDumper::VisitCompoundAssignOperator(
const CompoundAssignOperator *Node) {
VisitExpr(Node);
OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode())
<< "' ComputeLHSTy=";
dumpBareType(Node->getComputationLHSType());
OS << " ComputeResultTy=";
dumpBareType(Node->getComputationResultType());
}
void ASTDumper::VisitBlockExpr(const BlockExpr *Node) {
VisitExpr(Node);
dumpDecl(Node->getBlockDecl());
}
void ASTDumper::VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
VisitExpr(Node);
if (Expr *Source = Node->getSourceExpr())
dumpStmt(Source);
}
// GNU extensions.
void ASTDumper::VisitAddrLabelExpr(const AddrLabelExpr *Node) {
VisitExpr(Node);
OS << " " << Node->getLabel()->getName();
dumpPointer(Node->getLabel());
}
//===----------------------------------------------------------------------===//
// C++ Expressions
//===----------------------------------------------------------------------===//
void ASTDumper::VisitCXXNamedCastExpr(const CXXNamedCastExpr *Node) {
VisitExpr(Node);
OS << " " << Node->getCastName()
<< "<" << Node->getTypeAsWritten().getAsString() << ">"
<< " <" << Node->getCastKindName();
dumpBasePath(OS, Node);
OS << ">";
}
void ASTDumper::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *Node) {
VisitExpr(Node);
OS << " " << (Node->getValue() ? "true" : "false");
}
void ASTDumper::VisitCXXThisExpr(const CXXThisExpr *Node) {
VisitExpr(Node);
OS << " this";
}
void ASTDumper::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node) {
VisitExpr(Node);
OS << " functional cast to " << Node->getTypeAsWritten().getAsString()
<< " <" << Node->getCastKindName() << ">";
}
void ASTDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
VisitExpr(Node);
CXXConstructorDecl *Ctor = Node->getConstructor();
dumpType(Ctor->getType());
if (Node->isElidable())
OS << " elidable";
if (Node->requiresZeroInitialization())
OS << " zeroing";
}
void ASTDumper::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node) {
VisitExpr(Node);
OS << " ";
dumpCXXTemporary(Node->getTemporary());
}
void ASTDumper::VisitCXXNewExpr(const CXXNewExpr *Node) {
VisitExpr(Node);
if (Node->isGlobalNew())
OS << " global";
if (Node->isArray())
OS << " array";
if (Node->getOperatorNew()) {
OS << ' ';
dumpBareDeclRef(Node->getOperatorNew());
}
// We could dump the deallocation function used in case of error, but it's
// usually not that interesting.
}
void ASTDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) {
VisitExpr(Node);
if (Node->isGlobalDelete())
OS << " global";
if (Node->isArrayForm())
OS << " array";
if (Node->getOperatorDelete()) {
OS << ' ';
dumpBareDeclRef(Node->getOperatorDelete());
}
}
void
ASTDumper::VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node) {
VisitExpr(Node);
if (const ValueDecl *VD = Node->getExtendingDecl()) {
OS << " extended by ";
dumpBareDeclRef(VD);
}
}
void ASTDumper::VisitExprWithCleanups(const ExprWithCleanups *Node) {
VisitExpr(Node);
for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i)
dumpDeclRef(Node->getObject(i), "cleanup");
}
void ASTDumper::dumpCXXTemporary(const CXXTemporary *Temporary) {
OS << "(CXXTemporary";
dumpPointer(Temporary);
OS << ")";
}
void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
VisitExpr(Node);
dumpPointer(Node->getPack());
dumpName(Node->getPack());
}
//===----------------------------------------------------------------------===//
// Obj-C Expressions
//===----------------------------------------------------------------------===//
void ASTDumper::VisitObjCMessageExpr(const ObjCMessageExpr *Node) {
VisitExpr(Node);
OS << " selector=";
Node->getSelector().print(OS);
switch (Node->getReceiverKind()) {
case ObjCMessageExpr::Instance:
break;
case ObjCMessageExpr::Class:
OS << " class=";
dumpBareType(Node->getClassReceiver());
break;
case ObjCMessageExpr::SuperInstance:
OS << " super (instance)";
break;
case ObjCMessageExpr::SuperClass:
OS << " super (class)";
break;
}
}
void ASTDumper::VisitObjCBoxedExpr(const ObjCBoxedExpr *Node) {
VisitExpr(Node);
OS << " selector=";
Node->getBoxingMethod()->getSelector().print(OS);
}
void ASTDumper::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node) {
VisitStmt(Node);
if (const VarDecl *CatchParam = Node->getCatchParamDecl())
dumpDecl(CatchParam);
else
OS << " catch all";
}
void ASTDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *Node) {
VisitExpr(Node);
dumpType(Node->getEncodedType());
}
void ASTDumper::VisitObjCSelectorExpr(const ObjCSelectorExpr *Node) {
VisitExpr(Node);
OS << " ";
Node->getSelector().print(OS);
}
void ASTDumper::VisitObjCProtocolExpr(const ObjCProtocolExpr *Node) {
VisitExpr(Node);
OS << ' ' << *Node->getProtocol();
}
void ASTDumper::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *Node) {
VisitExpr(Node);
if (Node->isImplicitProperty()) {
OS << " Kind=MethodRef Getter=\"";
if (Node->getImplicitPropertyGetter())
Node->getImplicitPropertyGetter()->getSelector().print(OS);
else
OS << "(null)";
OS << "\" Setter=\"";
if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
Setter->getSelector().print(OS);
else
OS << "(null)";
OS << "\"";
} else {
OS << " Kind=PropertyRef Property=\"" << *Node->getExplicitProperty() <<'"';
}
if (Node->isSuperReceiver())
OS << " super";
OS << " Messaging=";
if (Node->isMessagingGetter() && Node->isMessagingSetter())
OS << "Getter&Setter";
else if (Node->isMessagingGetter())
OS << "Getter";
else if (Node->isMessagingSetter())
OS << "Setter";
}
void ASTDumper::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *Node) {
VisitExpr(Node);
if (Node->isArraySubscriptRefExpr())
OS << " Kind=ArraySubscript GetterForArray=\"";
else
OS << " Kind=DictionarySubscript GetterForDictionary=\"";
if (Node->getAtIndexMethodDecl())
Node->getAtIndexMethodDecl()->getSelector().print(OS);
else
OS << "(null)";
if (Node->isArraySubscriptRefExpr())
OS << "\" SetterForArray=\"";
else
OS << "\" SetterForDictionary=\"";
if (Node->setAtIndexMethodDecl())
Node->setAtIndexMethodDecl()->getSelector().print(OS);
else
OS << "(null)";
}
void ASTDumper::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node) {
VisitExpr(Node);
OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
}
//===----------------------------------------------------------------------===//
// Comments
//===----------------------------------------------------------------------===//
const char *ASTDumper::getCommandName(unsigned CommandID) {
if (Traits)
return Traits->getCommandInfo(CommandID)->Name;
const CommandInfo *Info = CommandTraits::getBuiltinCommandInfo(CommandID);
if (Info)
return Info->Name;
return "<not a builtin command>";
}
void ASTDumper::dumpFullComment(const FullComment *C) {
if (!C)
return;
FC = C;
dumpComment(C);
FC = nullptr;
}
void ASTDumper::dumpComment(const Comment *C) {
dumpChild([this, C] {
if (!C) {
ColorScope Color(*this, NullColor);
OS << "<<<NULL>>>";
return;
}
{
ColorScope Color(*this, CommentColor);
OS << C->getCommentKindName();
}
dumpPointer(C);
dumpSourceRange(C->getSourceRange());
ConstCommentVisitor<ASTDumper>::visit(C);
for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
I != E; ++I)
dumpComment(*I);
});
}
void ASTDumper::visitTextComment(const TextComment *C) {
OS << " Text=\"" << C->getText() << "\"";
}
void ASTDumper::visitInlineCommandComment(const InlineCommandComment *C) {
OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
switch (C->getRenderKind()) {
case InlineCommandComment::RenderNormal:
OS << " RenderNormal";
break;
case InlineCommandComment::RenderBold:
OS << " RenderBold";
break;
case InlineCommandComment::RenderMonospaced:
OS << " RenderMonospaced";
break;
case InlineCommandComment::RenderEmphasized:
OS << " RenderEmphasized";
break;
}
for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
}
void ASTDumper::visitHTMLStartTagComment(const HTMLStartTagComment *C) {
OS << " Name=\"" << C->getTagName() << "\"";
if (C->getNumAttrs() != 0) {
OS << " Attrs: ";
for (unsigned i = 0, e = C->getNumAttrs(); i != e; ++i) {
const HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
OS << " \"" << Attr.Name << "=\"" << Attr.Value << "\"";
}
}
if (C->isSelfClosing())
OS << " SelfClosing";
}
void ASTDumper::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
OS << " Name=\"" << C->getTagName() << "\"";
}
void ASTDumper::visitBlockCommandComment(const BlockCommandComment *C) {
OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i)
OS << " Arg[" << i << "]=\"" << C->getArgText(i) << "\"";
}
void ASTDumper::visitParamCommandComment(const ParamCommandComment *C) {
OS << " " << ParamCommandComment::getDirectionAsString(C->getDirection());
if (C->isDirectionExplicit())
OS << " explicitly";
else
OS << " implicitly";
if (C->hasParamName()) {
if (C->isParamIndexValid())
OS << " Param=\"" << C->getParamName(FC) << "\"";
else
OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
}
if (C->isParamIndexValid() && !C->isVarArgParam())
OS << " ParamIndex=" << C->getParamIndex();
}
void ASTDumper::visitTParamCommandComment(const TParamCommandComment *C) {
if (C->hasParamName()) {
if (C->isPositionValid())
OS << " Param=\"" << C->getParamName(FC) << "\"";
else
OS << " Param=\"" << C->getParamNameAsWritten() << "\"";
}
if (C->isPositionValid()) {
OS << " Position=<";
for (unsigned i = 0, e = C->getDepth(); i != e; ++i) {
OS << C->getIndex(i);
if (i != e - 1)
OS << ", ";
}
OS << ">";
}
}
void ASTDumper::visitVerbatimBlockComment(const VerbatimBlockComment *C) {
OS << " Name=\"" << getCommandName(C->getCommandID()) << "\""
" CloseName=\"" << C->getCloseName() << "\"";
}
void ASTDumper::visitVerbatimBlockLineComment(
const VerbatimBlockLineComment *C) {
OS << " Text=\"" << C->getText() << "\"";
}
void ASTDumper::visitVerbatimLineComment(const VerbatimLineComment *C) {
OS << " Text=\"" << C->getText() << "\"";
}
//===----------------------------------------------------------------------===//
// Type method implementations
//===----------------------------------------------------------------------===//
void QualType::dump(const char *msg) const {
if (msg)
llvm::errs() << msg << ": ";
dump();
}
LLVM_DUMP_METHOD void QualType::dump() const {
ASTDumper Dumper(llvm::errs(), nullptr, nullptr);
Dumper.dumpTypeAsChild(*this);
}
LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
//===----------------------------------------------------------------------===//
// Decl method implementations
//===----------------------------------------------------------------------===//
LLVM_DUMP_METHOD void Decl::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS) const {
ASTDumper P(OS, &getASTContext().getCommentCommandTraits(),
&getASTContext().getSourceManager());
// HLSL Change - Support suppressing dumping implicit decls.
P.setDumpImplicitTopLevelDecls(getASTContext().getLangOpts().DumpImplicitTopLevelDecls);
P.dumpDecl(this);
}
LLVM_DUMP_METHOD void Decl::dumpColor() const {
ASTDumper P(llvm::errs(), &getASTContext().getCommentCommandTraits(),
&getASTContext().getSourceManager(), /*ShowColors*/true);
P.dumpDecl(this);
}
LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
dumpLookups(llvm::errs());
}
LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
bool DumpDecls) const {
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
DC = DC->getParent();
ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager());
P.dumpLookups(this, DumpDecls);
}
//===----------------------------------------------------------------------===//
// Stmt method implementations
//===----------------------------------------------------------------------===//
LLVM_DUMP_METHOD void Stmt::dump(SourceManager &SM) const {
dump(llvm::errs(), SM);
}
LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
ASTDumper P(OS, nullptr, &SM);
P.dumpStmt(this);
}
LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS) const {
ASTDumper P(OS, nullptr, nullptr);
P.dumpStmt(this);
}
LLVM_DUMP_METHOD void Stmt::dump() const {
ASTDumper P(llvm::errs(), nullptr, nullptr);
P.dumpStmt(this);
}
LLVM_DUMP_METHOD void Stmt::dumpColor() const {
ASTDumper P(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
P.dumpStmt(this);
}
//===----------------------------------------------------------------------===//
// Comment method implementations
//===----------------------------------------------------------------------===//
LLVM_DUMP_METHOD void Comment::dump() const {
dump(llvm::errs(), nullptr, nullptr);
}
LLVM_DUMP_METHOD void Comment::dump(const ASTContext &Context) const {
dump(llvm::errs(), &Context.getCommentCommandTraits(),
&Context.getSourceManager());
}
void Comment::dump(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM) const {
const FullComment *FC = dyn_cast<FullComment>(this);
ASTDumper D(OS, Traits, SM);
D.dumpFullComment(FC);
}
LLVM_DUMP_METHOD void Comment::dumpColor() const {
const FullComment *FC = dyn_cast<FullComment>(this);
ASTDumper D(llvm::errs(), nullptr, nullptr, /*ShowColors*/true);
D.dumpFullComment(FC);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/Mangle.cpp | //===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implements generic name mangling support for blocks and Objective-C.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Attr.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
// //
///////////////////////////////////////////////////////////////////////////////
#define MANGLE_CHECKER 0
#if MANGLE_CHECKER
#include <cxxabi.h>
#endif
using namespace clang;
// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves
// much to be desired. Come up with a better mangling scheme.
static void mangleFunctionBlock(MangleContext &Context,
StringRef Outer,
const BlockDecl *BD,
raw_ostream &Out) {
unsigned discriminator = Context.getBlockId(BD, true);
if (discriminator == 0)
Out << "__" << Outer << "_block_invoke";
else
Out << "__" << Outer << "_block_invoke_" << discriminator+1;
}
void MangleContext::anchor() { }
enum CCMangling {
CCM_Other,
CCM_Fast,
CCM_Vector,
CCM_Std
};
static bool isExternC(const NamedDecl *ND) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
return FD->isExternC();
return cast<VarDecl>(ND)->isExternC();
}
static CCMangling getCallingConvMangling(const ASTContext &Context,
const NamedDecl *ND) {
const TargetInfo &TI = Context.getTargetInfo();
const llvm::Triple &Triple = TI.getTriple();
if (!Triple.isOSWindows() ||
!(Triple.getArch() == llvm::Triple::x86 ||
Triple.getArch() == llvm::Triple::x86_64))
return CCM_Other;
if (Context.getLangOpts().CPlusPlus && !isExternC(ND) &&
TI.getCXXABI() == TargetCXXABI::Microsoft)
return CCM_Other;
const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND);
if (!FD)
return CCM_Other;
QualType T = FD->getType();
const FunctionType *FT = T->castAs<FunctionType>();
CallingConv CC = FT->getCallConv();
switch (CC) {
default:
return CCM_Other;
case CC_X86FastCall:
return CCM_Fast;
case CC_X86StdCall:
return CCM_Std;
case CC_X86VectorCall:
return CCM_Vector;
}
}
bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
const ASTContext &ASTContext = getASTContext();
CCMangling CC = getCallingConvMangling(ASTContext, D);
if (CC != CCM_Other)
return true;
// In C, functions with no attributes never need to be mangled. Fastpath them.
if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
return false;
// Any decl can be declared with __asm("foo") on it, and this takes precedence
// over all other naming in the .o file.
if (D->hasAttr<AsmLabelAttr>())
return true;
return shouldMangleCXXName(D);
}
void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
// Any decl can be declared with __asm("foo") on it, and this takes precedence
// over all other naming in the .o file.
if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
// If we have an asm name, then we use it as the mangling.
// Adding the prefix can cause problems when one file has a "foo" and
// another has a "\01foo". That is known to happen on ELF with the
// tricks normally used for producing aliases (PR9177). Fortunately the
// llvm mangler on ELF is a nop, so we can just avoid adding the \01
// marker. We also avoid adding the marker if this is an alias for an
// LLVM intrinsic.
StringRef UserLabelPrefix =
getASTContext().getTargetInfo().getUserLabelPrefix();
if (!UserLabelPrefix.empty() && !ALA->getLabel().startswith("llvm."))
Out << '\01'; // LLVM IR Marker for __asm("foo")
Out << ALA->getLabel();
return;
}
const ASTContext &ASTContext = getASTContext();
CCMangling CC = getCallingConvMangling(ASTContext, D);
bool MCXX = shouldMangleCXXName(D);
const TargetInfo &TI = Context.getTargetInfo();
if (CC == CCM_Other || (MCXX && TI.getCXXABI() == TargetCXXABI::Microsoft)) {
if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
mangleObjCMethodName(OMD, Out);
else
mangleCXXName(D, Out);
return;
}
Out << '\01';
if (CC == CCM_Std)
Out << '_';
else if (CC == CCM_Fast)
Out << '@';
if (!MCXX)
Out << D->getIdentifier()->getName();
else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D))
mangleObjCMethodName(OMD, Out);
else
mangleCXXName(D, Out);
const FunctionDecl *FD = cast<FunctionDecl>(D);
const FunctionType *FT = FD->getType()->castAs<FunctionType>();
const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT);
if (CC == CCM_Vector)
Out << '@';
Out << '@';
if (!Proto) {
Out << '0';
return;
}
assert(!Proto->isVariadic());
unsigned ArgWords = 0;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (!MD->isStatic())
++ArgWords;
for (const auto &AT : Proto->param_types())
// Size should be aligned to pointer size.
ArgWords += llvm::RoundUpToAlignment(ASTContext.getTypeSize(AT),
TI.getPointerWidth(0)) /
TI.getPointerWidth(0);
Out << ((TI.getPointerWidth(0) / 8) * ArgWords);
}
void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
const NamedDecl *ID,
raw_ostream &Out) {
unsigned discriminator = getBlockId(BD, false);
if (ID) {
if (shouldMangleDeclName(ID))
mangleName(ID, Out);
else {
Out << ID->getIdentifier()->getName();
}
}
if (discriminator == 0)
Out << "_block_invoke";
else
Out << "_block_invoke_" << discriminator+1;
}
void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
CXXCtorType CT, const BlockDecl *BD,
raw_ostream &ResStream) {
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
mangleCXXCtor(CD, CT, Out);
Out.flush();
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
CXXDtorType DT, const BlockDecl *BD,
raw_ostream &ResStream) {
SmallString<64> Buffer;
llvm::raw_svector_ostream Out(Buffer);
mangleCXXDtor(DD, DT, Out);
Out.flush();
mangleFunctionBlock(*this, Buffer, BD, ResStream);
}
void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
raw_ostream &Out) {
assert(!isa<CXXConstructorDecl>(DC) && !isa<CXXDestructorDecl>(DC));
SmallString<64> Buffer;
llvm::raw_svector_ostream Stream(Buffer);
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
mangleObjCMethodName(Method, Stream);
} else {
assert((isa<NamedDecl>(DC) || isa<BlockDecl>(DC)) &&
"expected a NamedDecl or BlockDecl");
if (isa<BlockDecl>(DC))
for (; DC && isa<BlockDecl>(DC); DC = DC->getParent())
(void) getBlockId(cast<BlockDecl>(DC), true);
assert((isa<TranslationUnitDecl>(DC) || isa<NamedDecl>(DC)) &&
"expected a TranslationUnitDecl or a NamedDecl");
if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
mangleCtorBlock(CD, /*CT*/ Ctor_Complete, BD, Out);
else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
mangleDtorBlock(DD, /*DT*/ Dtor_Complete, BD, Out);
else if (auto ND = dyn_cast<NamedDecl>(DC)) {
if (!shouldMangleDeclName(ND) && ND->getIdentifier())
Stream << ND->getIdentifier()->getName();
else {
// FIXME: We were doing a mangleUnqualifiedName() before, but that's
// a private member of a class that will soon itself be private to the
// Itanium C++ ABI object. What should we do now? Right now, I'm just
// calling the mangleName() method on the MangleContext; is there a
// better way?
mangleName(ND, Stream);
}
}
}
Stream.flush();
mangleFunctionBlock(*this, Buffer, BD, Out);
}
void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
raw_ostream &Out) {
SmallString<64> Name;
llvm::raw_svector_ostream OS(Name);
const ObjCContainerDecl *CD =
dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
assert (CD && "Missing container decl in GetNameForMethod");
OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
OS << '(' << *CID << ')';
OS << ' ';
MD->getSelector().print(OS);
OS << ']';
Out << OS.str().size() << OS.str();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclOpenMP.cpp | //===--- DeclOpenMP.cpp - Declaration OpenMP AST Node Implementation ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file implements OMPThreadPrivateDecl class.
///
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/Expr.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// OMPThreadPrivateDecl Implementation.
//===----------------------------------------------------------------------===//
void OMPThreadPrivateDecl::anchor() { }
OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL) {
OMPThreadPrivateDecl *D = new (C, DC, VL.size() * sizeof(Expr *))
OMPThreadPrivateDecl(OMPThreadPrivate, DC, L);
D->NumVars = VL.size();
D->setVars(VL);
return D;
}
OMPThreadPrivateDecl *OMPThreadPrivateDecl::CreateDeserialized(ASTContext &C,
unsigned ID,
unsigned N) {
OMPThreadPrivateDecl *D = new (C, ID, N * sizeof(Expr *))
OMPThreadPrivateDecl(OMPThreadPrivate, nullptr, SourceLocation());
D->NumVars = N;
return D;
}
void OMPThreadPrivateDecl::setVars(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
Expr **Vars = reinterpret_cast<Expr **>(this + 1);
std::copy(VL.begin(), VL.end(), Vars);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ParentMap.cpp | //===--- ParentMap.cpp - Mappings from Stmts to their Parents ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ParentMap class.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ParentMap.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "llvm/ADT/DenseMap.h"
using namespace clang;
typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
enum OpaqueValueMode {
OV_Transparent,
OV_Opaque
};
static void BuildParentMap(MapTy& M, Stmt* S,
OpaqueValueMode OVMode = OV_Transparent) {
switch (S->getStmtClass()) {
case Stmt::PseudoObjectExprClass: {
assert(OVMode == OV_Transparent && "Should not appear alongside OVEs");
PseudoObjectExpr *POE = cast<PseudoObjectExpr>(S);
// If we are rebuilding the map, clear out any existing state.
if (M[POE->getSyntacticForm()])
for (Stmt *SubStmt : S->children())
M[SubStmt] = nullptr;
M[POE->getSyntacticForm()] = S;
BuildParentMap(M, POE->getSyntacticForm(), OV_Transparent);
for (PseudoObjectExpr::semantics_iterator I = POE->semantics_begin(),
E = POE->semantics_end();
I != E; ++I) {
M[*I] = S;
BuildParentMap(M, *I, OV_Opaque);
}
break;
}
case Stmt::BinaryConditionalOperatorClass: {
assert(OVMode == OV_Transparent && "Should not appear alongside OVEs");
BinaryConditionalOperator *BCO = cast<BinaryConditionalOperator>(S);
M[BCO->getCommon()] = S;
BuildParentMap(M, BCO->getCommon(), OV_Transparent);
M[BCO->getCond()] = S;
BuildParentMap(M, BCO->getCond(), OV_Opaque);
M[BCO->getTrueExpr()] = S;
BuildParentMap(M, BCO->getTrueExpr(), OV_Opaque);
M[BCO->getFalseExpr()] = S;
BuildParentMap(M, BCO->getFalseExpr(), OV_Transparent);
break;
}
case Stmt::OpaqueValueExprClass: {
// FIXME: This isn't correct; it assumes that multiple OpaqueValueExprs
// share a single source expression, but in the AST a single
// OpaqueValueExpr is shared among multiple parent expressions.
// The right thing to do is to give the OpaqueValueExpr its syntactic
// parent, then not reassign that when traversing the semantic expressions.
OpaqueValueExpr *OVE = cast<OpaqueValueExpr>(S);
if (OVMode == OV_Transparent || !M[OVE->getSourceExpr()]) {
M[OVE->getSourceExpr()] = S;
BuildParentMap(M, OVE->getSourceExpr(), OV_Transparent);
}
break;
}
default:
for (Stmt *SubStmt : S->children()) {
if (SubStmt) {
M[SubStmt] = S;
BuildParentMap(M, SubStmt, OVMode);
}
}
break;
}
}
ParentMap::ParentMap(Stmt *S) : Impl(nullptr) {
if (S) {
MapTy *M = new MapTy();
BuildParentMap(*M, S);
Impl = M;
}
}
ParentMap::~ParentMap() {
delete (MapTy*) Impl;
}
void ParentMap::addStmt(Stmt* S) {
if (S) {
BuildParentMap(*(MapTy*) Impl, S);
}
}
void ParentMap::setParent(const Stmt *S, const Stmt *Parent) {
assert(S);
assert(Parent);
MapTy *M = reinterpret_cast<MapTy *>(Impl);
M->insert(std::make_pair(const_cast<Stmt *>(S), const_cast<Stmt *>(Parent)));
}
Stmt* ParentMap::getParent(Stmt* S) const {
MapTy* M = (MapTy*) Impl;
MapTy::iterator I = M->find(S);
return I == M->end() ? nullptr : I->second;
}
Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
do { S = getParent(S); } while (S && isa<ParenExpr>(S));
return S;
}
Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const {
do {
S = getParent(S);
}
while (S && (isa<ParenExpr>(S) || isa<CastExpr>(S)));
return S;
}
Stmt *ParentMap::getParentIgnoreParenImpCasts(Stmt *S) const {
do {
S = getParent(S);
} while (S && isa<Expr>(S) && cast<Expr>(S)->IgnoreParenImpCasts() != S);
return S;
}
Stmt *ParentMap::getOuterParenParent(Stmt *S) const {
Stmt *Paren = nullptr;
while (isa<ParenExpr>(S)) {
Paren = S;
S = getParent(S);
};
return Paren;
}
bool ParentMap::isConsumedExpr(Expr* E) const {
Stmt *P = getParent(E);
Stmt *DirectChild = E;
// Ignore parents that don't guarantee consumption.
while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P) ||
isa<ExprWithCleanups>(P))) {
DirectChild = P;
P = getParent(P);
}
if (!P)
return false;
switch (P->getStmtClass()) {
default:
return isa<Expr>(P);
case Stmt::DeclStmtClass:
return true;
case Stmt::BinaryOperatorClass: {
BinaryOperator *BE = cast<BinaryOperator>(P);
// If it is a comma, only the right side is consumed.
// If it isn't a comma, both sides are consumed.
return BE->getOpcode()!=BO_Comma ||DirectChild==BE->getRHS();
}
case Stmt::ForStmtClass:
return DirectChild == cast<ForStmt>(P)->getCond();
case Stmt::WhileStmtClass:
return DirectChild == cast<WhileStmt>(P)->getCond();
case Stmt::DoStmtClass:
return DirectChild == cast<DoStmt>(P)->getCond();
case Stmt::IfStmtClass:
return DirectChild == cast<IfStmt>(P)->getCond();
case Stmt::IndirectGotoStmtClass:
return DirectChild == cast<IndirectGotoStmt>(P)->getTarget();
case Stmt::SwitchStmtClass:
return DirectChild == cast<SwitchStmt>(P)->getCond();
case Stmt::ReturnStmtClass:
return true;
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclBase.cpp | //===--- DeclBase.cpp - Declaration AST Node Implementation ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Decl and DeclContext classes.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/DeclBase.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclContextInternals.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DependentDiagnostic.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - options change visibility rules
using namespace clang;
//===----------------------------------------------------------------------===//
// Statistics
//===----------------------------------------------------------------------===//
#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
void Decl::updateOutOfDate(IdentifierInfo &II) const {
getASTContext().getExternalSource()->updateOutOfDateIdentifier(II);
}
void *Decl::operator new(std::size_t Size, const ASTContext &Context,
unsigned ID, std::size_t Extra) {
// Allocate an extra 8 bytes worth of storage, which ensures that the
// resulting pointer will still be 8-byte aligned.
void *Start = Context.Allocate(Size + Extra + 8);
void *Result = (char*)Start + 8;
unsigned *PrefixPtr = (unsigned *)Result - 2;
// Zero out the first 4 bytes; this is used to store the owning module ID.
PrefixPtr[0] = 0;
// Store the global declaration ID in the second 4 bytes.
PrefixPtr[1] = ID;
return Result;
}
void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
DeclContext *Parent, std::size_t Extra) {
assert(!Parent || &Parent->getParentASTContext() == &Ctx);
// With local visibility enabled, we track the owning module even for local
// declarations.
if (Ctx.getLangOpts().ModulesLocalVisibility) {
void *Buffer = ::operator new(sizeof(Module *) + Size + Extra, Ctx);
return new (Buffer) Module*(nullptr) + 1;
}
return ::operator new(Size + Extra, Ctx);
}
Module *Decl::getOwningModuleSlow() const {
assert(isFromASTFile() && "Not from AST file?");
return getASTContext().getExternalSource()->getModule(getOwningModuleID());
}
bool Decl::hasLocalOwningModuleStorage() const {
return getASTContext().getLangOpts().ModulesLocalVisibility;
}
const char *Decl::getDeclKindName() const {
switch (DeclKind) {
default: llvm_unreachable("Declaration not in DeclNodes.inc!");
#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
}
}
void Decl::setInvalidDecl(bool Invalid) {
InvalidDecl = Invalid;
assert(!isa<TagDecl>(this) || !cast<TagDecl>(this)->isCompleteDefinition());
if (Invalid && !isa<ParmVarDecl>(this)) {
// Defensive maneuver for ill-formed code: we're likely not to make it to
// a point where we set the access specifier, so default it to "public"
// to avoid triggering asserts elsewhere in the front end.
setAccess(AS_public);
}
}
const char *DeclContext::getDeclKindName() const {
switch (DeclKind) {
default: llvm_unreachable("Declaration context not in DeclNodes.inc!");
#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
}
}
bool Decl::StatisticsEnabled = false;
void Decl::EnableStatistics() {
StatisticsEnabled = true;
}
void Decl::PrintStats() {
llvm::errs() << "\n*** Decl Stats:\n";
int totalDecls = 0;
#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
llvm::errs() << " " << totalDecls << " decls total.\n";
int totalBytes = 0;
#define DECL(DERIVED, BASE) \
if (n##DERIVED##s > 0) { \
totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \
llvm::errs() << " " << n##DERIVED##s << " " #DERIVED " decls, " \
<< sizeof(DERIVED##Decl) << " each (" \
<< n##DERIVED##s * sizeof(DERIVED##Decl) \
<< " bytes)\n"; \
}
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
llvm::errs() << "Total bytes = " << totalBytes << "\n";
}
void Decl::add(Kind k) {
switch (k) {
#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break;
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
}
}
bool Decl::isTemplateParameterPack() const {
if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(this))
return TTP->isParameterPack();
if (const NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(this))
return NTTP->isParameterPack();
if (const TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(this))
return TTP->isParameterPack();
return false;
}
bool Decl::isParameterPack() const {
if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(this))
return Parm->isParameterPack();
return isTemplateParameterPack();
}
FunctionDecl *Decl::getAsFunction() {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
return FD;
if (const FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(this))
return FTD->getTemplatedDecl();
return nullptr;
}
bool Decl::isTemplateDecl() const {
return isa<TemplateDecl>(this);
}
const DeclContext *Decl::getParentFunctionOrMethod() const {
for (const DeclContext *DC = getDeclContext();
DC && !DC->isTranslationUnit() && !DC->isNamespace();
DC = DC->getParent())
if (DC->isFunctionOrMethod())
return DC;
return nullptr;
}
//===----------------------------------------------------------------------===//
// PrettyStackTraceDecl Implementation
//===----------------------------------------------------------------------===//
void PrettyStackTraceDecl::print(raw_ostream &OS) const {
SourceLocation TheLoc = Loc;
if (TheLoc.isInvalid() && TheDecl)
TheLoc = TheDecl->getLocation();
if (TheLoc.isValid()) {
TheLoc.print(OS, SM);
OS << ": ";
}
OS << Message;
if (const NamedDecl *DN = dyn_cast_or_null<NamedDecl>(TheDecl)) {
OS << " '";
DN->printQualifiedName(OS);
OS << '\'';
}
OS << '\n';
}
//===----------------------------------------------------------------------===//
// Decl Implementation
//===----------------------------------------------------------------------===//
// Out-of-line virtual method providing a home for Decl.
Decl::~Decl() { }
void Decl::setDeclContext(DeclContext *DC) {
DeclCtx = DC;
}
void Decl::setLexicalDeclContext(DeclContext *DC) {
if (DC == getLexicalDeclContext())
return;
if (isInSemaDC()) {
setDeclContextsImpl(getDeclContext(), DC, getASTContext());
} else {
getMultipleDC()->LexicalDC = DC;
}
Hidden = cast<Decl>(DC)->Hidden;
}
void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
ASTContext &Ctx) {
if (SemaDC == LexicalDC) {
DeclCtx = SemaDC;
} else {
Decl::MultipleDC *MDC = new (Ctx) Decl::MultipleDC();
MDC->SemanticDC = SemaDC;
MDC->LexicalDC = LexicalDC;
DeclCtx = MDC;
}
}
/// HLSL Change Begin - back port from llvm-project/73c6a2448f24 & f721e0582b15.
bool Decl::isInLocalScopeForInstantiation() const {
const DeclContext *LDC = getLexicalDeclContext();
if (!LDC->isDependentContext())
return false;
while (true) {
if (LDC->isFunctionOrMethod())
return true;
if (!isa<TagDecl>(LDC))
return false;
if (const auto *CRD = dyn_cast<CXXRecordDecl>(LDC))
if (CRD->isLambda())
return true;
LDC = LDC->getLexicalParent();
}
return false;
}
/// HLSL Change End - back port from llvm-project/73c6a2448f24 & f721e0582b15.
bool Decl::isInAnonymousNamespace() const {
const DeclContext *DC = getDeclContext();
do {
if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
if (ND->isAnonymousNamespace())
return true;
} while ((DC = DC->getParent()));
return false;
}
bool Decl::isInStdNamespace() const {
return getDeclContext()->isStdNamespace();
}
TranslationUnitDecl *Decl::getTranslationUnitDecl() {
if (TranslationUnitDecl *TUD = dyn_cast<TranslationUnitDecl>(this))
return TUD;
DeclContext *DC = getDeclContext();
assert(DC && "This decl is not contained in a translation unit!");
while (!DC->isTranslationUnit()) {
DC = DC->getParent();
assert(DC && "This decl is not contained in a translation unit!");
}
return cast<TranslationUnitDecl>(DC);
}
ASTContext &Decl::getASTContext() const {
return getTranslationUnitDecl()->getASTContext();
}
ASTMutationListener *Decl::getASTMutationListener() const {
return getASTContext().getASTMutationListener();
}
unsigned Decl::getMaxAlignment() const {
if (!hasAttrs())
return 0;
unsigned Align = 0;
const AttrVec &V = getAttrs();
ASTContext &Ctx = getASTContext();
specific_attr_iterator<AlignedAttr> I(V.begin()), E(V.end());
for (; I != E; ++I)
Align = std::max(Align, I->getAlignment(Ctx));
return Align;
}
bool Decl::isUsed(bool CheckUsedAttr) const {
if (Used)
return true;
// Check for used attribute.
if (CheckUsedAttr && hasAttr<UsedAttr>())
return true;
return false;
}
void Decl::markUsed(ASTContext &C) {
if (Used)
return;
if (C.getASTMutationListener())
C.getASTMutationListener()->DeclarationMarkedUsed(this);
Used = true;
}
bool Decl::isReferenced() const {
if (Referenced)
return true;
// Check redeclarations.
for (auto I : redecls())
if (I->Referenced)
return true;
return false;
}
/// \brief Determine the availability of the given declaration based on
/// the target platform.
///
/// When it returns an availability result other than \c AR_Available,
/// if the \p Message parameter is non-NULL, it will be set to a
/// string describing why the entity is unavailable.
///
/// FIXME: Make these strings localizable, since they end up in
/// diagnostics.
static AvailabilityResult CheckAvailability(ASTContext &Context,
const AvailabilityAttr *A,
std::string *Message) {
VersionTuple TargetMinVersion =
Context.getTargetInfo().getPlatformMinVersion();
if (TargetMinVersion.empty())
return AR_Available;
// Check if this is an App Extension "platform", and if so chop off
// the suffix for matching with the actual platform.
StringRef ActualPlatform = A->getPlatform()->getName();
StringRef RealizedPlatform = ActualPlatform;
if (Context.getLangOpts().AppExt) {
size_t suffix = RealizedPlatform.rfind("_app_extension");
if (suffix != StringRef::npos)
RealizedPlatform = RealizedPlatform.slice(0, suffix);
}
StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
// Match the platform name.
if (RealizedPlatform != TargetPlatform)
return AR_Available;
StringRef PrettyPlatformName
= AvailabilityAttr::getPrettyPlatformName(ActualPlatform);
if (PrettyPlatformName.empty())
PrettyPlatformName = ActualPlatform;
std::string HintMessage;
if (!A->getMessage().empty()) {
HintMessage = " - ";
HintMessage += A->getMessage();
}
// Make sure that this declaration has not been marked 'unavailable'.
if (A->getUnavailable()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
Out << "not available on " << PrettyPlatformName
<< HintMessage;
}
return AR_Unavailable;
}
// Make sure that this declaration has already been introduced.
if (!A->getIntroduced().empty() &&
TargetMinVersion < A->getIntroduced()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
VersionTuple VTI(A->getIntroduced());
VTI.UseDotAsSeparator();
Out << "introduced in " << PrettyPlatformName << ' '
<< VTI << HintMessage;
}
return AR_NotYetIntroduced;
}
// Make sure that this declaration hasn't been obsoleted.
if (!A->getObsoleted().empty() && TargetMinVersion >= A->getObsoleted()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
VersionTuple VTO(A->getObsoleted());
VTO.UseDotAsSeparator();
Out << "obsoleted in " << PrettyPlatformName << ' '
<< VTO << HintMessage;
}
return AR_Unavailable;
}
// Make sure that this declaration hasn't been deprecated.
if (!A->getDeprecated().empty() && TargetMinVersion >= A->getDeprecated()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
VersionTuple VTD(A->getDeprecated());
VTD.UseDotAsSeparator();
Out << "first deprecated in " << PrettyPlatformName << ' '
<< VTD << HintMessage;
}
return AR_Deprecated;
}
return AR_Available;
}
AvailabilityResult Decl::getAvailability(std::string *Message) const {
AvailabilityResult Result = AR_Available;
std::string ResultMessage;
for (const auto *A : attrs()) {
if (const auto *Deprecated = dyn_cast<DeprecatedAttr>(A)) {
if (Result >= AR_Deprecated)
continue;
if (Message)
ResultMessage = Deprecated->getMessage();
Result = AR_Deprecated;
continue;
}
if (const auto *Unavailable = dyn_cast<UnavailableAttr>(A)) {
if (Message)
*Message = Unavailable->getMessage();
return AR_Unavailable;
}
if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
Message);
if (AR == AR_Unavailable)
return AR_Unavailable;
if (AR > Result) {
Result = AR;
if (Message)
ResultMessage.swap(*Message);
}
continue;
}
}
if (Message)
Message->swap(ResultMessage);
return Result;
}
bool Decl::canBeWeakImported(bool &IsDefinition) const {
IsDefinition = false;
// Variables, if they aren't definitions.
if (const VarDecl *Var = dyn_cast<VarDecl>(this)) {
if (Var->isThisDeclarationADefinition()) {
IsDefinition = true;
return false;
}
return true;
// Functions, if they aren't definitions.
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
if (FD->hasBody()) {
IsDefinition = true;
return false;
}
return true;
// Objective-C classes, if this is the non-fragile runtime.
} else if (isa<ObjCInterfaceDecl>(this) &&
getASTContext().getLangOpts().ObjCRuntime.hasWeakClassImport()) {
return true;
// Nothing else.
} else {
return false;
}
}
bool Decl::isWeakImported() const {
bool IsDefinition;
if (!canBeWeakImported(IsDefinition))
return false;
for (const auto *A : attrs()) {
if (isa<WeakImportAttr>(A))
return true;
if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
if (CheckAvailability(getASTContext(), Availability,
nullptr) == AR_NotYetIntroduced)
return true;
}
}
return false;
}
unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
switch (DeclKind) {
case Function:
case CXXMethod:
case CXXConstructor:
case CXXDestructor:
case CXXConversion:
case EnumConstant:
case Var:
case ImplicitParam:
case ParmVar:
case NonTypeTemplateParm:
case ObjCMethod:
case ObjCProperty:
case MSProperty:
return IDNS_Ordinary;
case Label:
return IDNS_Label;
case IndirectField:
return IDNS_Ordinary | IDNS_Member;
case ObjCCompatibleAlias:
case ObjCInterface:
return IDNS_Ordinary | IDNS_Type;
case Typedef:
case TypeAlias:
case TypeAliasTemplate:
case UnresolvedUsingTypename:
case TemplateTypeParm:
case ObjCTypeParam:
return IDNS_Ordinary | IDNS_Type;
case UsingShadow:
return 0; // we'll actually overwrite this later
case UnresolvedUsingValue:
return IDNS_Ordinary | IDNS_Using;
case Using:
return IDNS_Using;
case ObjCProtocol:
return IDNS_ObjCProtocol;
case Field:
case ObjCAtDefsField:
case ObjCIvar:
return IDNS_Member;
case Record:
case CXXRecord:
case Enum:
return IDNS_Tag | IDNS_Type;
case Namespace:
case NamespaceAlias:
return IDNS_Namespace;
case FunctionTemplate:
case VarTemplate:
return IDNS_Ordinary;
case ClassTemplate:
case TemplateTemplateParm:
return IDNS_Ordinary | IDNS_Tag | IDNS_Type;
// Never have names.
case Friend:
case FriendTemplate:
case AccessSpec:
case LinkageSpec:
case FileScopeAsm:
case StaticAssert:
case ObjCPropertyImpl:
case Block:
case Captured:
case TranslationUnit:
case ExternCContext:
case UsingDirective:
case ClassTemplateSpecialization:
case ClassTemplatePartialSpecialization:
case ClassScopeFunctionSpecialization:
case VarTemplateSpecialization:
case VarTemplatePartialSpecialization:
case ObjCImplementation:
case ObjCCategory:
case ObjCCategoryImpl:
case Import:
case OMPThreadPrivate:
case Empty:
case HLSLBuffer: // HLSL Change
// Never looked up by name.
return 0;
}
llvm_unreachable("Invalid DeclKind!");
}
void Decl::setAttrsImpl(const AttrVec &attrs, ASTContext &Ctx) {
assert(!HasAttrs && "Decl already contains attrs.");
AttrVec &AttrBlank = Ctx.getDeclAttrs(this);
assert(AttrBlank.empty() && "HasAttrs was wrong?");
AttrBlank = attrs;
HasAttrs = true;
}
void Decl::dropAttrs() {
if (!HasAttrs) return;
HasAttrs = false;
getASTContext().eraseDeclAttrs(this);
}
const AttrVec &Decl::getAttrs() const {
assert(HasAttrs && "No attrs to get!");
return getASTContext().getDeclAttrs(this);
}
Decl *Decl::castFromDeclContext (const DeclContext *D) {
Decl::Kind DK = D->getDeclKind();
switch(DK) {
#define DECL(NAME, BASE)
#define DECL_CONTEXT(NAME) \
case Decl::NAME: \
return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
#define DECL_CONTEXT_BASE(NAME)
#include "clang/AST/DeclNodes.inc"
default:
#define DECL(NAME, BASE)
#define DECL_CONTEXT_BASE(NAME) \
if (DK >= first##NAME && DK <= last##NAME) \
return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
#include "clang/AST/DeclNodes.inc"
llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
}
DeclContext *Decl::castToDeclContext(const Decl *D) {
Decl::Kind DK = D->getKind();
switch(DK) {
#define DECL(NAME, BASE)
#define DECL_CONTEXT(NAME) \
case Decl::NAME: \
return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
#define DECL_CONTEXT_BASE(NAME)
#include "clang/AST/DeclNodes.inc"
default:
#define DECL(NAME, BASE)
#define DECL_CONTEXT_BASE(NAME) \
if (DK >= first##NAME && DK <= last##NAME) \
return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
#include "clang/AST/DeclNodes.inc"
llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
}
SourceLocation Decl::getBodyRBrace() const {
// Special handling of FunctionDecl to avoid de-serializing the body from PCH.
// FunctionDecl stores EndRangeLoc for this purpose.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
const FunctionDecl *Definition;
if (FD->hasBody(Definition))
return Definition->getSourceRange().getEnd();
return SourceLocation();
}
if (Stmt *Body = getBody())
return Body->getSourceRange().getEnd();
return SourceLocation();
}
bool Decl::AccessDeclContextSanity() const {
#ifndef NDEBUG
// Suppress this check if any of the following hold:
// 1. this is the translation unit (and thus has no parent)
// 2. this is a template parameter (and thus doesn't belong to its context)
// 3. this is a non-type template parameter
// 4. the context is not a record
// 5. it's invalid
// 6. it's a C++0x static_assert.
if (isa<TranslationUnitDecl>(this) ||
isa<TemplateTypeParmDecl>(this) ||
isa<NonTypeTemplateParmDecl>(this) ||
!isa<CXXRecordDecl>(getDeclContext()) ||
isInvalidDecl() ||
isa<StaticAssertDecl>(this) ||
// FIXME: a ParmVarDecl can have ClassTemplateSpecialization
// as DeclContext (?).
isa<ParmVarDecl>(this) ||
// FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have
// AS_none as access specifier.
isa<CXXRecordDecl>(this) ||
isa<ClassScopeFunctionSpecializationDecl>(this))
return true;
assert(Access != AS_none &&
"Access specifier is AS_none inside a record decl");
#endif
return true;
}
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
QualType Ty;
if (const ValueDecl *D = dyn_cast<ValueDecl>(this))
Ty = D->getType();
else if (const TypedefNameDecl *D = dyn_cast<TypedefNameDecl>(this))
Ty = D->getUnderlyingType();
else
return nullptr;
if (Ty->isFunctionPointerType())
Ty = Ty->getAs<PointerType>()->getPointeeType();
else if (BlocksToo && Ty->isBlockPointerType())
Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
return Ty->getAs<FunctionType>();
}
/// Starting at a given context (a Decl or DeclContext), look for a
/// code context that is not a closure (a lambda, block, etc.).
template <class T> static Decl *getNonClosureContext(T *D) {
if (getKind(D) == Decl::CXXMethod) {
CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
if (MD->getOverloadedOperator() == OO_Call &&
MD->getParent()->isLambda())
return getNonClosureContext(MD->getParent()->getParent());
return MD;
} else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
return FD;
} else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
return MD;
} else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
return getNonClosureContext(BD->getParent());
} else if (CapturedDecl *CD = dyn_cast<CapturedDecl>(D)) {
return getNonClosureContext(CD->getParent());
} else {
return nullptr;
}
}
Decl *Decl::getNonClosureContext() {
return ::getNonClosureContext(this);
}
Decl *DeclContext::getNonClosureAncestor() {
return ::getNonClosureContext(this);
}
//===----------------------------------------------------------------------===//
// DeclContext Implementation
//===----------------------------------------------------------------------===//
bool DeclContext::classof(const Decl *D) {
switch (D->getKind()) {
#define DECL(NAME, BASE)
#define DECL_CONTEXT(NAME) case Decl::NAME:
#define DECL_CONTEXT_BASE(NAME)
#include "clang/AST/DeclNodes.inc"
return true;
default:
#define DECL(NAME, BASE)
#define DECL_CONTEXT_BASE(NAME) \
if (D->getKind() >= Decl::first##NAME && \
D->getKind() <= Decl::last##NAME) \
return true;
#include "clang/AST/DeclNodes.inc"
return false;
}
}
DeclContext::~DeclContext() { }
/// \brief Find the parent context of this context that will be
/// used for unqualified name lookup.
///
/// Generally, the parent lookup context is the semantic context. However, for
/// a friend function the parent lookup context is the lexical context, which
/// is the class in which the friend is declared.
DeclContext *DeclContext::getLookupParent() {
// FIXME: Find a better way to identify friends
if (isa<FunctionDecl>(this))
if (getParent()->getRedeclContext()->isFileContext() &&
getLexicalParent()->getRedeclContext()->isRecord())
return getLexicalParent();
return getParent();
}
bool DeclContext::isInlineNamespace() const {
return isNamespace() &&
cast<NamespaceDecl>(this)->isInline();
}
bool DeclContext::isStdNamespace() const {
if (!isNamespace())
return false;
const NamespaceDecl *ND = cast<NamespaceDecl>(this);
if (ND->isInline()) {
return ND->getParent()->isStdNamespace();
}
if (!getParent()->getRedeclContext()->isTranslationUnit())
return false;
const IdentifierInfo *II = ND->getIdentifier();
return II && II->isStr("std");
}
bool DeclContext::isDependentContext() const {
if (isFileContext())
return false;
if (isa<ClassTemplatePartialSpecializationDecl>(this))
return true;
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) {
if (Record->getDescribedClassTemplate())
return true;
if (Record->isDependentLambda())
return true;
}
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
if (Function->getDescribedFunctionTemplate())
return true;
// Friend function declarations are dependent if their *lexical*
// context is dependent.
if (cast<Decl>(this)->getFriendObjectKind())
return getLexicalParent()->isDependentContext();
}
// FIXME: A variable template is a dependent context, but is not a
// DeclContext. A context within it (such as a lambda-expression)
// should be considered dependent.
return getParent() && getParent()->isDependentContext();
}
bool DeclContext::isTransparentContext() const {
if (DeclKind == Decl::Enum)
return !cast<EnumDecl>(this)->isScoped();
else if (DeclKind == Decl::LinkageSpec)
return true;
// HLSL Change Starts
if (DeclKind == Decl::HLSLBuffer)
return true;
// HLSL Change Ends
return false;
}
static bool isLinkageSpecContext(const DeclContext *DC,
LinkageSpecDecl::LanguageIDs ID) {
while (DC->getDeclKind() != Decl::TranslationUnit) {
if (DC->getDeclKind() == Decl::LinkageSpec)
return cast<LinkageSpecDecl>(DC)->getLanguage() == ID;
DC = DC->getLexicalParent();
}
return false;
}
bool DeclContext::isExternCContext() const {
return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_c);
}
bool DeclContext::isExternCXXContext() const {
return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_cxx);
}
bool DeclContext::Encloses(const DeclContext *DC) const {
if (getPrimaryContext() != this)
return getPrimaryContext()->Encloses(DC);
for (; DC; DC = DC->getParent())
if (DC->getPrimaryContext() == this)
return true;
return false;
}
DeclContext *DeclContext::getPrimaryContext() {
switch (DeclKind) {
case Decl::TranslationUnit:
case Decl::ExternCContext:
case Decl::LinkageSpec:
case Decl::Block:
case Decl::Captured:
// There is only one DeclContext for these entities.
return this;
// HLSL Change Starts
case Decl::HLSLBuffer:
// Each buffer, even with the same name, is a distinct construct.
return this;
// HLSL Change Ends
case Decl::Namespace:
// The original namespace is our primary context.
return static_cast<NamespaceDecl*>(this)->getOriginalNamespace();
case Decl::ObjCMethod:
return this;
case Decl::ObjCInterface:
if (ObjCInterfaceDecl *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
return Def;
return this;
case Decl::ObjCProtocol:
if (ObjCProtocolDecl *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
return Def;
return this;
case Decl::ObjCCategory:
return this;
case Decl::ObjCImplementation:
case Decl::ObjCCategoryImpl:
return this;
default:
if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
// If this is a tag type that has a definition or is currently
// being defined, that definition is our primary context.
TagDecl *Tag = cast<TagDecl>(this);
if (TagDecl *Def = Tag->getDefinition())
return Def;
if (const TagType *TagTy = dyn_cast<TagType>(Tag->getTypeForDecl())) {
// Note, TagType::getDecl returns the (partial) definition one exists.
TagDecl *PossiblePartialDef = TagTy->getDecl();
if (PossiblePartialDef->isBeingDefined())
return PossiblePartialDef;
} else {
assert(isa<InjectedClassNameType>(Tag->getTypeForDecl()));
}
return Tag;
}
assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction &&
"Unknown DeclContext kind");
return this;
}
}
void
DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts){
Contexts.clear();
if (DeclKind != Decl::Namespace) {
Contexts.push_back(this);
return;
}
NamespaceDecl *Self = static_cast<NamespaceDecl *>(this);
for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
N = N->getPreviousDecl())
Contexts.push_back(N);
std::reverse(Contexts.begin(), Contexts.end());
}
std::pair<Decl *, Decl *>
DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls,
bool FieldsAlreadyLoaded) {
// Build up a chain of declarations via the Decl::NextInContextAndBits field.
Decl *FirstNewDecl = nullptr;
Decl *PrevDecl = nullptr;
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
if (FieldsAlreadyLoaded && isa<FieldDecl>(Decls[I]))
continue;
Decl *D = Decls[I];
if (PrevDecl)
PrevDecl->NextInContextAndBits.setPointer(D);
else
FirstNewDecl = D;
PrevDecl = D;
}
return std::make_pair(FirstNewDecl, PrevDecl);
}
/// \brief We have just acquired external visible storage, and we already have
/// built a lookup map. For every name in the map, pull in the new names from
/// the external storage.
void DeclContext::reconcileExternalVisibleStorage() const {
assert(NeedToReconcileExternalVisibleStorage && LookupPtr);
NeedToReconcileExternalVisibleStorage = false;
for (auto &Lookup : *LookupPtr)
Lookup.second.setHasExternalDecls();
}
/// \brief Load the declarations within this lexical storage from an
/// external source.
/// \return \c true if any declarations were added.
bool
DeclContext::LoadLexicalDeclsFromExternalStorage() const {
ExternalASTSource *Source = getParentASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
// Notify that we have a DeclContext that is initializing.
ExternalASTSource::Deserializing ADeclContext(Source);
// Load the external declarations, if any.
SmallVector<Decl*, 64> Decls;
ExternalLexicalStorage = false;
switch (Source->FindExternalLexicalDecls(this, Decls)) {
case ELR_Success:
break;
case ELR_Failure:
case ELR_AlreadyLoaded:
return false;
}
if (Decls.empty())
return false;
// We may have already loaded just the fields of this record, in which case
// we need to ignore them.
bool FieldsAlreadyLoaded = false;
if (const RecordDecl *RD = dyn_cast<RecordDecl>(this))
FieldsAlreadyLoaded = RD->LoadedFieldsFromExternalStorage;
// Splice the newly-read declarations into the beginning of the list
// of declarations.
Decl *ExternalFirst, *ExternalLast;
std::tie(ExternalFirst, ExternalLast) =
BuildDeclChain(Decls, FieldsAlreadyLoaded);
ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
FirstDecl = ExternalFirst;
if (!LastDecl)
LastDecl = ExternalLast;
return true;
}
DeclContext::lookup_result
ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
DeclarationName Name) {
ASTContext &Context = DC->getParentASTContext();
StoredDeclsMap *Map;
if (!(Map = DC->LookupPtr))
Map = DC->CreateStoredDeclsMap(Context);
if (DC->NeedToReconcileExternalVisibleStorage)
DC->reconcileExternalVisibleStorage();
(*Map)[Name].removeExternalDecls();
return DeclContext::lookup_result();
}
DeclContext::lookup_result
ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
DeclarationName Name,
ArrayRef<NamedDecl*> Decls) {
ASTContext &Context = DC->getParentASTContext();
StoredDeclsMap *Map;
if (!(Map = DC->LookupPtr))
Map = DC->CreateStoredDeclsMap(Context);
if (DC->NeedToReconcileExternalVisibleStorage)
DC->reconcileExternalVisibleStorage();
StoredDeclsList &List = (*Map)[Name];
// Clear out any old external visible declarations, to avoid quadratic
// performance in the redeclaration checks below.
List.removeExternalDecls();
if (!List.isNull()) {
// We have both existing declarations and new declarations for this name.
// Some of the declarations may simply replace existing ones. Handle those
// first.
llvm::SmallVector<unsigned, 8> Skip;
for (unsigned I = 0, N = Decls.size(); I != N; ++I)
if (List.HandleRedeclaration(Decls[I], /*IsKnownNewer*/false))
Skip.push_back(I);
Skip.push_back(Decls.size());
// Add in any new declarations.
unsigned SkipPos = 0;
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
if (I == Skip[SkipPos])
++SkipPos;
else
List.AddSubsequentDecl(Decls[I]);
}
} else {
// Convert the array to a StoredDeclsList.
for (ArrayRef<NamedDecl*>::iterator
I = Decls.begin(), E = Decls.end(); I != E; ++I) {
if (List.isNull())
List.setOnlyValue(*I);
else
List.AddSubsequentDecl(*I);
}
}
return List.getLookupResult();
}
DeclContext::decl_iterator DeclContext::decls_begin() const {
if (hasExternalLexicalStorage())
LoadLexicalDeclsFromExternalStorage();
return decl_iterator(FirstDecl);
}
bool DeclContext::decls_empty() const {
if (hasExternalLexicalStorage())
LoadLexicalDeclsFromExternalStorage();
return !FirstDecl;
}
bool DeclContext::containsDecl(Decl *D) const {
return (D->getLexicalDeclContext() == this &&
(D->NextInContextAndBits.getPointer() || D == LastDecl));
}
void DeclContext::removeDecl(Decl *D) {
assert(D->getLexicalDeclContext() == this &&
"decl being removed from non-lexical context");
assert((D->NextInContextAndBits.getPointer() || D == LastDecl) &&
"decl is not in decls list");
// Remove D from the decl chain. This is O(n) but hopefully rare.
if (D == FirstDecl) {
if (D == LastDecl)
FirstDecl = LastDecl = nullptr;
else
FirstDecl = D->NextInContextAndBits.getPointer();
} else {
for (Decl *I = FirstDecl; true; I = I->NextInContextAndBits.getPointer()) {
assert(I && "decl not found in linked list");
if (I->NextInContextAndBits.getPointer() == D) {
I->NextInContextAndBits.setPointer(D->NextInContextAndBits.getPointer());
if (D == LastDecl) LastDecl = I;
break;
}
}
}
// Mark that D is no longer in the decl chain.
D->NextInContextAndBits.setPointer(nullptr);
// Remove D from the lookup table if necessary.
if (isa<NamedDecl>(D)) {
NamedDecl *ND = cast<NamedDecl>(D);
// Remove only decls that have a name
if (!ND->getDeclName()) return;
StoredDeclsMap *Map = getPrimaryContext()->LookupPtr;
if (!Map) return;
StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
assert(Pos != Map->end() && "no lookup entry for decl");
if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
Pos->second.remove(ND);
}
}
void DeclContext::addHiddenDecl(Decl *D) {
assert(D->getLexicalDeclContext() == this &&
"Decl inserted into wrong lexical context");
assert(!D->getNextDeclInContext() && D != LastDecl &&
"Decl already inserted into a DeclContext");
if (FirstDecl) {
LastDecl->NextInContextAndBits.setPointer(D);
LastDecl = D;
} else {
FirstDecl = LastDecl = D;
}
// Notify a C++ record declaration that we've added a member, so it can
// update it's class-specific state.
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
Record->addedMember(D);
// If this is a newly-created (not de-serialized) import declaration, wire
// it in to the list of local import declarations.
if (!D->isFromASTFile()) {
if (ImportDecl *Import = dyn_cast<ImportDecl>(D))
D->getASTContext().addedLocalImportDecl(Import);
}
}
void DeclContext::addDecl(Decl *D) {
addHiddenDecl(D);
if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
ND->getDeclContext()->getPrimaryContext()->
makeDeclVisibleInContextWithFlags(ND, false, true);
}
void DeclContext::addDeclInternal(Decl *D) {
addHiddenDecl(D);
if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
ND->getDeclContext()->getPrimaryContext()->
makeDeclVisibleInContextWithFlags(ND, true, true);
}
/// shouldBeHidden - Determine whether a declaration which was declared
/// within its semantic context should be invisible to qualified name lookup.
static bool shouldBeHidden(NamedDecl *D) {
// Skip unnamed declarations.
if (!D->getDeclName())
return true;
// Skip entities that can't be found by name lookup into a particular
// context.
if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
D->isTemplateParameter())
return true;
// Skip template specializations.
// FIXME: This feels like a hack. Should DeclarationName support
// template-ids, or is there a better way to keep specializations
// from being visible?
if (isa<ClassTemplateSpecializationDecl>(D))
return true;
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
if (FD->isFunctionTemplateSpecialization())
return true;
return false;
}
/// buildLookup - Build the lookup data structure with all of the
/// declarations in this DeclContext (and any other contexts linked
/// to it or transparent contexts nested within it) and return it.
///
/// Note that the produced map may miss out declarations from an
/// external source. If it does, those entries will be marked with
/// the 'hasExternalDecls' flag.
StoredDeclsMap *DeclContext::buildLookup() {
assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
if (!HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups)
return LookupPtr;
SmallVector<DeclContext *, 2> Contexts;
collectAllContexts(Contexts);
if (HasLazyExternalLexicalLookups) {
HasLazyExternalLexicalLookups = false;
for (auto *DC : Contexts) {
if (DC->hasExternalLexicalStorage())
HasLazyLocalLexicalLookups |=
DC->LoadLexicalDeclsFromExternalStorage();
}
if (!HasLazyLocalLexicalLookups)
return LookupPtr;
}
for (auto *DC : Contexts)
buildLookupImpl(DC, hasExternalVisibleStorage());
// We no longer have any lazy decls.
HasLazyLocalLexicalLookups = false;
return LookupPtr;
}
/// buildLookupImpl - Build part of the lookup data structure for the
/// declarations contained within DCtx, which will either be this
/// DeclContext, a DeclContext linked to it, or a transparent context
/// nested within it.
void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
for (Decl *D : DCtx->noload_decls()) {
// Insert this declaration into the lookup structure, but only if
// it's semantically within its decl context. Any other decls which
// should be found in this context are added eagerly.
//
// If it's from an AST file, don't add it now. It'll get handled by
// FindExternalVisibleDeclsByName if needed. Exception: if we're not
// in C++, we do not track external visible decls for the TU, so in
// that case we need to collect them all here.
if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
if (ND->getDeclContext() == DCtx && !shouldBeHidden(ND) &&
(!ND->isFromASTFile() ||
(isTranslationUnit() &&
!getParentASTContext().getLangOpts().CPlusPlus)))
makeDeclVisibleInContextImpl(ND, Internal);
// If this declaration is itself a transparent declaration context
// or inline namespace, add the members of this declaration of that
// context (recursively).
if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D))
if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
buildLookupImpl(InnerCtx, Internal);
}
}
NamedDecl *const DeclContextLookupResult::SingleElementDummyList = nullptr;
DeclContext::lookup_result
DeclContext::lookup(DeclarationName Name) const {
assert(DeclKind != Decl::LinkageSpec &&
"Should not perform lookups into linkage specs!");
const DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
return PrimaryContext->lookup(Name);
// If we have an external source, ensure that any later redeclarations of this
// context have been loaded, since they may add names to the result of this
// lookup (or add external visible storage).
ExternalASTSource *Source = getParentASTContext().getExternalSource();
if (Source)
(void)cast<Decl>(this)->getMostRecentDecl();
if (hasExternalVisibleStorage()) {
assert(Source && "external visible storage but no external source?");
if (NeedToReconcileExternalVisibleStorage)
reconcileExternalVisibleStorage();
StoredDeclsMap *Map = LookupPtr;
if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups)
// FIXME: Make buildLookup const?
Map = const_cast<DeclContext*>(this)->buildLookup();
if (!Map)
Map = CreateStoredDeclsMap(getParentASTContext());
// If we have a lookup result with no external decls, we are done.
std::pair<StoredDeclsMap::iterator, bool> R =
Map->insert(std::make_pair(Name, StoredDeclsList()));
if (!R.second && !R.first->second.hasExternalDecls())
return R.first->second.getLookupResult();
if (Source->FindExternalVisibleDeclsByName(this, Name) || !R.second) {
if (StoredDeclsMap *Map = LookupPtr) {
StoredDeclsMap::iterator I = Map->find(Name);
if (I != Map->end())
return I->second.getLookupResult();
}
}
return lookup_result();
}
StoredDeclsMap *Map = LookupPtr;
if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups)
Map = const_cast<DeclContext*>(this)->buildLookup();
if (!Map)
return lookup_result();
StoredDeclsMap::iterator I = Map->find(Name);
if (I == Map->end())
return lookup_result();
return I->second.getLookupResult();
}
DeclContext::lookup_result
DeclContext::noload_lookup(DeclarationName Name) {
assert(DeclKind != Decl::LinkageSpec &&
"Should not perform lookups into linkage specs!");
DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
return PrimaryContext->noload_lookup(Name);
// If we have any lazy lexical declarations not in our lookup map, add them
// now. Don't import any external declarations, not even if we know we have
// some missing from the external visible lookups.
if (HasLazyLocalLexicalLookups) {
SmallVector<DeclContext *, 2> Contexts;
collectAllContexts(Contexts);
for (unsigned I = 0, N = Contexts.size(); I != N; ++I)
buildLookupImpl(Contexts[I], hasExternalVisibleStorage());
HasLazyLocalLexicalLookups = false;
}
StoredDeclsMap *Map = LookupPtr;
if (!Map)
return lookup_result();
StoredDeclsMap::iterator I = Map->find(Name);
return I != Map->end() ? I->second.getLookupResult()
: lookup_result();
}
void DeclContext::localUncachedLookup(DeclarationName Name,
SmallVectorImpl<NamedDecl *> &Results) {
Results.clear();
// If there's no external storage, just perform a normal lookup and copy
// the results.
if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage() && Name) {
lookup_result LookupResults = lookup(Name);
Results.insert(Results.end(), LookupResults.begin(), LookupResults.end());
return;
}
// If we have a lookup table, check there first. Maybe we'll get lucky.
// FIXME: Should we be checking these flags on the primary context?
if (Name && !HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups) {
if (StoredDeclsMap *Map = LookupPtr) {
StoredDeclsMap::iterator Pos = Map->find(Name);
if (Pos != Map->end()) {
Results.insert(Results.end(),
Pos->second.getLookupResult().begin(),
Pos->second.getLookupResult().end());
return;
}
}
}
// Slow case: grovel through the declarations in our chain looking for
// matches.
// FIXME: If we have lazy external declarations, this will not find them!
// FIXME: Should we CollectAllContexts and walk them all here?
for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) {
if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
if (ND->getDeclName() == Name)
Results.push_back(ND);
}
}
DeclContext *DeclContext::getRedeclContext() {
DeclContext *Ctx = this;
// Skip through transparent contexts.
while (Ctx->isTransparentContext())
Ctx = Ctx->getParent();
return Ctx;
}
DeclContext *DeclContext::getEnclosingNamespaceContext() {
DeclContext *Ctx = this;
// Skip through non-namespace, non-translation-unit contexts.
while (!Ctx->isFileContext())
Ctx = Ctx->getParent();
return Ctx->getPrimaryContext();
}
RecordDecl *DeclContext::getOuterLexicalRecordContext() {
// Loop until we find a non-record context.
RecordDecl *OutermostRD = nullptr;
DeclContext *DC = this;
while (DC->isRecord()) {
OutermostRD = cast<RecordDecl>(DC);
DC = DC->getLexicalParent();
}
return OutermostRD;
}
bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const {
// For non-file contexts, this is equivalent to Equals.
if (!isFileContext())
return O->Equals(this);
do {
if (O->Equals(this))
return true;
const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(O);
if (!NS || !NS->isInline())
break;
O = NS->getParent();
} while (O);
return false;
}
void DeclContext::makeDeclVisibleInContext(NamedDecl *D) {
DeclContext *PrimaryDC = this->getPrimaryContext();
DeclContext *DeclDC = D->getDeclContext()->getPrimaryContext();
// If the decl is being added outside of its semantic decl context, we
// need to ensure that we eagerly build the lookup information for it.
PrimaryDC->makeDeclVisibleInContextWithFlags(D, false, PrimaryDC == DeclDC);
}
void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
bool Recoverable) {
assert(this == getPrimaryContext() && "expected a primary DC");
// Skip declarations within functions.
if (isFunctionOrMethod())
return;
// Skip declarations which should be invisible to name lookup.
if (shouldBeHidden(D))
return;
// If we already have a lookup data structure, perform the insertion into
// it. If we might have externally-stored decls with this name, look them
// up and perform the insertion. If this decl was declared outside its
// semantic context, buildLookup won't add it, so add it now.
//
// FIXME: As a performance hack, don't add such decls into the translation
// unit unless we're in C++, since qualified lookup into the TU is never
// performed.
if (LookupPtr || hasExternalVisibleStorage() ||
((!Recoverable || D->getDeclContext() != D->getLexicalDeclContext()) &&
(getParentASTContext().getLangOpts().CPlusPlus ||
!isTranslationUnit()))) {
// If we have lazily omitted any decls, they might have the same name as
// the decl which we are adding, so build a full lookup table before adding
// this decl.
buildLookup();
makeDeclVisibleInContextImpl(D, Internal);
} else {
HasLazyLocalLexicalLookups = true;
}
// If we are a transparent context or inline namespace, insert into our
// parent context, too. This operation is recursive.
if (isTransparentContext() || isInlineNamespace())
getParent()->getPrimaryContext()->
makeDeclVisibleInContextWithFlags(D, Internal, Recoverable);
Decl *DCAsDecl = cast<Decl>(this);
// Notify that a decl was made visible unless we are a Tag being defined.
if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined()))
if (ASTMutationListener *L = DCAsDecl->getASTMutationListener())
L->AddedVisibleDecl(this, D);
}
void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) {
// Find or create the stored declaration map.
StoredDeclsMap *Map = LookupPtr;
if (!Map) {
ASTContext *C = &getParentASTContext();
Map = CreateStoredDeclsMap(*C);
}
// If there is an external AST source, load any declarations it knows about
// with this declaration's name.
// If the lookup table contains an entry about this name it means that we
// have already checked the external source.
if (!Internal)
if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
if (hasExternalVisibleStorage() &&
Map->find(D->getDeclName()) == Map->end())
Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
// Insert this declaration into the map.
StoredDeclsList &DeclNameEntries = (*Map)[D->getDeclName()];
if (Internal) {
// If this is being added as part of loading an external declaration,
// this may not be the only external declaration with this name.
// In this case, we never try to replace an existing declaration; we'll
// handle that when we finalize the list of declarations for this name.
DeclNameEntries.setHasExternalDecls();
DeclNameEntries.AddSubsequentDecl(D);
return;
}
if (DeclNameEntries.isNull()) {
DeclNameEntries.setOnlyValue(D);
return;
}
if (DeclNameEntries.HandleRedeclaration(D, /*IsKnownNewer*/!Internal)) {
// This declaration has replaced an existing one for which
// declarationReplaces returns true.
return;
}
// Put this declaration into the appropriate slot.
DeclNameEntries.AddSubsequentDecl(D);
}
UsingDirectiveDecl *DeclContext::udir_iterator::operator*() const {
return cast<UsingDirectiveDecl>(*I);
}
/// Returns iterator range [First, Last) of UsingDirectiveDecls stored within
/// this context.
DeclContext::udir_range DeclContext::using_directives() const {
// FIXME: Use something more efficient than normal lookup for using
// directives. In C++, using directives are looked up more than anything else.
lookup_result Result = lookup(UsingDirectiveDecl::getName());
return udir_range(Result.begin(), Result.end());
}
//===----------------------------------------------------------------------===//
// Creation and Destruction of StoredDeclsMaps. //
//===----------------------------------------------------------------------===//
StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const {
assert(!LookupPtr && "context already has a decls map");
assert(getPrimaryContext() == this &&
"creating decls map on non-primary context");
StoredDeclsMap *M;
bool Dependent = isDependentContext();
if (Dependent)
M = new DependentStoredDeclsMap();
else
M = new StoredDeclsMap();
M->Previous = C.LastSDM;
C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent);
LookupPtr = M;
return M;
}
void ASTContext::ReleaseDeclContextMaps() {
// It's okay to delete DependentStoredDeclsMaps via a StoredDeclsMap
// pointer because the subclass doesn't add anything that needs to
// be deleted.
StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt());
}
void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) {
while (Map) {
// Advance the iteration before we invalidate memory.
llvm::PointerIntPair<StoredDeclsMap*,1> Next = Map->Previous;
if (Dependent)
delete static_cast<DependentStoredDeclsMap*>(Map);
else
delete Map;
Map = Next.getPointer();
Dependent = Next.getInt();
}
}
DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
DeclContext *Parent,
const PartialDiagnostic &PDiag) {
assert(Parent->isDependentContext()
&& "cannot iterate dependent diagnostics of non-dependent context");
Parent = Parent->getPrimaryContext();
if (!Parent->LookupPtr)
Parent->CreateStoredDeclsMap(C);
DependentStoredDeclsMap *Map =
static_cast<DependentStoredDeclsMap *>(Parent->LookupPtr);
// Allocate the copy of the PartialDiagnostic via the ASTContext's
// BumpPtrAllocator, rather than the ASTContext itself.
PartialDiagnostic::Storage *DiagStorage = nullptr;
if (PDiag.hasStorage())
DiagStorage = new (C) PartialDiagnostic::Storage;
DependentDiagnostic *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
// TODO: Maybe we shouldn't reverse the order during insertion.
DD->NextDiagnostic = Map->FirstDiagnostic;
Map->FirstDiagnostic = DD;
return DD;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/StmtIterator.cpp | //===--- StmtIterator.cpp - Iterators for Statements ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines internal methods for StmtIterator.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/StmtIterator.h"
#include "clang/AST/Decl.h"
using namespace clang;
// FIXME: Add support for dependent-sized array types in C++?
// Does it even make sense to build a CFG for an uninstantiated template?
static inline const VariableArrayType *FindVA(const Type* t) {
while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
if (vat->getSizeExpr())
return vat;
t = vt->getElementType().getTypePtr();
}
return nullptr;
}
void StmtIteratorBase::NextVA() {
assert (getVAPtr());
const VariableArrayType *p = getVAPtr();
p = FindVA(p->getElementType().getTypePtr());
setVAPtr(p);
if (p)
return;
if (inDeclGroup()) {
if (VarDecl* VD = dyn_cast<VarDecl>(*DGI))
if (VD->Init)
return;
NextDecl();
}
else {
assert(inSizeOfTypeVA());
RawVAPtr = 0;
}
}
void StmtIteratorBase::NextDecl(bool ImmediateAdvance) {
assert(getVAPtr() == nullptr);
assert(inDeclGroup());
if (ImmediateAdvance)
++DGI;
for ( ; DGI != DGE; ++DGI)
if (HandleDecl(*DGI))
return;
RawVAPtr = 0;
}
bool StmtIteratorBase::HandleDecl(Decl* D) {
if (VarDecl* VD = dyn_cast<VarDecl>(D)) {
if (const VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) {
setVAPtr(VAPtr);
return true;
}
if (VD->getInit())
return true;
}
else if (TypedefNameDecl* TD = dyn_cast<TypedefNameDecl>(D)) {
if (const VariableArrayType* VAPtr =
FindVA(TD->getUnderlyingType().getTypePtr())) {
setVAPtr(VAPtr);
return true;
}
}
else if (EnumConstantDecl* ECD = dyn_cast<EnumConstantDecl>(D)) {
if (ECD->getInitExpr())
return true;
}
return false;
}
StmtIteratorBase::StmtIteratorBase(Decl** dgi, Decl** dge)
: DGI(dgi), RawVAPtr(DeclGroupMode), DGE(dge) {
NextDecl(false);
}
StmtIteratorBase::StmtIteratorBase(const VariableArrayType* t)
: DGI(nullptr), RawVAPtr(SizeOfTypeVAMode) {
RawVAPtr |= reinterpret_cast<uintptr_t>(t);
}
Stmt*& StmtIteratorBase::GetDeclExpr() const {
if (const VariableArrayType* VAPtr = getVAPtr()) {
assert (VAPtr->SizeExpr);
return const_cast<Stmt*&>(VAPtr->SizeExpr);
}
assert (inDeclGroup());
VarDecl* VD = cast<VarDecl>(*DGI);
return *VD->getInitAddress();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclCXX.cpp | //===--- DeclCXX.cpp - C++ Declaration AST Node Implementation ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the C++ related Decl classes.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
using namespace clang;
// //
///////////////////////////////////////////////////////////////////////////////
//===----------------------------------------------------------------------===//
// Decl Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
void AccessSpecDecl::anchor() { }
AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) AccessSpecDecl(EmptyShell());
}
void LazyASTUnresolvedSet::getFromExternalSource(ASTContext &C) const {
ExternalASTSource *Source = C.getExternalSource();
assert(Impl.Decls.isLazy() && "getFromExternalSource for non-lazy set");
assert(Source && "getFromExternalSource with no external source");
for (ASTUnresolvedSet::iterator I = Impl.begin(); I != Impl.end(); ++I)
I.setDecl(cast<NamedDecl>(Source->GetExternalDecl(
reinterpret_cast<uintptr_t>(I.getDecl()) >> 2)));
Impl.Decls.setLazy(false);
}
CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
: UserDeclaredConstructor(false), UserDeclaredSpecialMembers(0),
Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
HasMutableFields(false), HasVariantMembers(false), HasOnlyCMembers(true),
HasInClassInitializer(false), HasUninitializedReferenceMember(false),
NeedOverloadResolutionForMoveConstructor(false),
NeedOverloadResolutionForMoveAssignment(false),
NeedOverloadResolutionForDestructor(false),
DefaultedMoveConstructorIsDeleted(false),
DefaultedMoveAssignmentIsDeleted(false),
DefaultedDestructorIsDeleted(false),
HasTrivialSpecialMembers(SMF_All),
DeclaredNonTrivialSpecialMembers(0),
HasIrrelevantDestructor(true),
HasConstexprNonCopyMoveConstructor(false),
DefaultedDefaultConstructorIsConstexpr(true),
HasConstexprDefaultConstructor(false),
HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false),
UserProvidedDefaultConstructor(false), DeclaredSpecialMembers(0),
ImplicitCopyConstructorHasConstParam(true),
ImplicitCopyAssignmentHasConstParam(true),
HasDeclaredCopyConstructorWithConstParam(false),
HasDeclaredCopyAssignmentWithConstParam(false),
IsLambda(false), IsParsingBaseSpecifiers(false), NumBases(0), NumVBases(0),
Bases(), VBases(),
Definition(D), FirstFriend() {
}
CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getBasesSlowCase() const {
return Bases.get(Definition->getASTContext().getExternalSource());
}
CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getVBasesSlowCase() const {
return VBases.get(Definition->getASTContext().getExternalSource());
}
CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, const ASTContext &C,
DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
CXXRecordDecl *PrevDecl)
: RecordDecl(K, TK, C, DC, StartLoc, IdLoc, Id, PrevDecl),
DefinitionData(PrevDecl ? PrevDecl->DefinitionData
: DefinitionDataPtr(this)),
TemplateOrInstantiation() {}
CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
CXXRecordDecl* PrevDecl,
bool DelayTypeCreation) {
CXXRecordDecl *R = new (C, DC) CXXRecordDecl(CXXRecord, TK, C, DC, StartLoc,
IdLoc, Id, PrevDecl);
R->MayHaveOutOfDateDef = C.getLangOpts().Modules;
// FIXME: DelayTypeCreation seems like such a hack
if (!DelayTypeCreation)
C.getTypeDeclType(R, PrevDecl);
return R;
}
CXXRecordDecl *
CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
TypeSourceInfo *Info, SourceLocation Loc,
bool Dependent, bool IsGeneric,
LambdaCaptureDefault CaptureDefault) {
CXXRecordDecl *R =
new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc,
nullptr, nullptr);
R->IsBeingDefined = true;
R->DefinitionData =
new (C) struct LambdaDefinitionData(R, Info, Dependent, IsGeneric,
CaptureDefault);
R->MayHaveOutOfDateDef = false;
R->setImplicit(true);
C.getTypeDeclType(R, /*PrevDecl=*/nullptr);
return R;
}
CXXRecordDecl *
CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
CXXRecordDecl *R = new (C, ID) CXXRecordDecl(
CXXRecord, TTK_Struct, C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr);
R->MayHaveOutOfDateDef = false;
return R;
}
void
CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
unsigned NumBases) {
ASTContext &C = getASTContext();
if (!data().Bases.isOffset() && data().NumBases > 0)
C.Deallocate(data().getBases());
if (NumBases) {
// C++ [dcl.init.aggr]p1:
// An aggregate is [...] a class with [...] no base classes [...].
data().Aggregate = false;
// C++ [class]p4:
// A POD-struct is an aggregate class...
data().PlainOldData = false;
}
// The set of seen virtual base types.
llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes;
// The virtual bases of this class.
SmallVector<const CXXBaseSpecifier *, 8> VBases;
data().Bases = new(C) CXXBaseSpecifier [NumBases];
data().NumBases = NumBases;
for (unsigned i = 0; i < NumBases; ++i) {
data().getBases()[i] = *Bases[i];
// Keep track of inherited vbases for this base class.
const CXXBaseSpecifier *Base = Bases[i];
QualType BaseType = Base->getType();
// Skip dependent types; we can't do any checking on them now.
if (BaseType->isDependentType())
continue;
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
// A class with a non-empty base class is not empty.
// FIXME: Standard ref?
if (!BaseClassDecl->isEmpty()) {
if (!data().Empty) {
// C++0x [class]p7:
// A standard-layout class is a class that:
// [...]
// -- either has no non-static data members in the most derived
// class and at most one base class with non-static data members,
// or has no base classes with non-static data members, and
// If this is the second non-empty base, then neither of these two
// clauses can be true.
data().IsStandardLayout = false;
}
data().Empty = false;
data().HasNoNonEmptyBases = false;
}
// C++ [class.virtual]p1:
// A class that declares or inherits a virtual function is called a
// polymorphic class.
if (BaseClassDecl->isPolymorphic())
data().Polymorphic = true;
// C++0x [class]p7:
// A standard-layout class is a class that: [...]
// -- has no non-standard-layout base classes
if (!BaseClassDecl->isStandardLayout())
data().IsStandardLayout = false;
// Record if this base is the first non-literal field or base.
if (!hasNonLiteralTypeFieldsOrBases() && !BaseType->isLiteralType(C))
data().HasNonLiteralTypeFieldsOrBases = true;
// Now go through all virtual bases of this base and add them.
for (const auto &VBase : BaseClassDecl->vbases()) {
// Add this base if it's not already in the list.
if (SeenVBaseTypes.insert(C.getCanonicalType(VBase.getType())).second) {
VBases.push_back(&VBase);
// C++11 [class.copy]p8:
// The implicitly-declared copy constructor for a class X will have
// the form 'X::X(const X&)' if each [...] virtual base class B of X
// has a copy constructor whose first parameter is of type
// 'const B&' or 'const volatile B&' [...]
if (CXXRecordDecl *VBaseDecl = VBase.getType()->getAsCXXRecordDecl())
if (!VBaseDecl->hasCopyConstructorWithConstParam())
data().ImplicitCopyConstructorHasConstParam = false;
}
}
if (Base->isVirtual()) {
// Add this base if it's not already in the list.
if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)).second)
VBases.push_back(Base);
// C++0x [meta.unary.prop] is_empty:
// T is a class type, but not a union type, with ... no virtual base
// classes
data().Empty = false;
// C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25:
// A [default constructor, copy/move constructor, or copy/move assignment
// operator for a class X] is trivial [...] if:
// -- class X has [...] no virtual base classes
data().HasTrivialSpecialMembers &= SMF_Destructor;
// C++0x [class]p7:
// A standard-layout class is a class that: [...]
// -- has [...] no virtual base classes
data().IsStandardLayout = false;
// C++11 [dcl.constexpr]p4:
// In the definition of a constexpr constructor [...]
// -- the class shall not have any virtual base classes
data().DefaultedDefaultConstructorIsConstexpr = false;
} else {
// C++ [class.ctor]p5:
// A default constructor is trivial [...] if:
// -- all the direct base classes of its class have trivial default
// constructors.
if (!BaseClassDecl->hasTrivialDefaultConstructor())
data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor;
// C++0x [class.copy]p13:
// A copy/move constructor for class X is trivial if [...]
// [...]
// -- the constructor selected to copy/move each direct base class
// subobject is trivial, and
if (!BaseClassDecl->hasTrivialCopyConstructor())
data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor;
// If the base class doesn't have a simple move constructor, we'll eagerly
// declare it and perform overload resolution to determine which function
// it actually calls. If it does have a simple move constructor, this
// check is correct.
if (!BaseClassDecl->hasTrivialMoveConstructor())
data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor;
// C++0x [class.copy]p27:
// A copy/move assignment operator for class X is trivial if [...]
// [...]
// -- the assignment operator selected to copy/move each direct base
// class subobject is trivial, and
if (!BaseClassDecl->hasTrivialCopyAssignment())
data().HasTrivialSpecialMembers &= ~SMF_CopyAssignment;
// If the base class doesn't have a simple move assignment, we'll eagerly
// declare it and perform overload resolution to determine which function
// it actually calls. If it does have a simple move assignment, this
// check is correct.
if (!BaseClassDecl->hasTrivialMoveAssignment())
data().HasTrivialSpecialMembers &= ~SMF_MoveAssignment;
// C++11 [class.ctor]p6:
// If that user-written default constructor would satisfy the
// requirements of a constexpr constructor, the implicitly-defined
// default constructor is constexpr.
if (!BaseClassDecl->hasConstexprDefaultConstructor())
data().DefaultedDefaultConstructorIsConstexpr = false;
}
// C++ [class.ctor]p3:
// A destructor is trivial if all the direct base classes of its class
// have trivial destructors.
if (!BaseClassDecl->hasTrivialDestructor())
data().HasTrivialSpecialMembers &= ~SMF_Destructor;
if (!BaseClassDecl->hasIrrelevantDestructor())
data().HasIrrelevantDestructor = false;
// C++11 [class.copy]p18:
// The implicitly-declared copy assignment oeprator for a class X will
// have the form 'X& X::operator=(const X&)' if each direct base class B
// of X has a copy assignment operator whose parameter is of type 'const
// B&', 'const volatile B&', or 'B' [...]
if (!BaseClassDecl->hasCopyAssignmentWithConstParam())
data().ImplicitCopyAssignmentHasConstParam = false;
// C++11 [class.copy]p8:
// The implicitly-declared copy constructor for a class X will have
// the form 'X::X(const X&)' if each direct [...] base class B of X
// has a copy constructor whose first parameter is of type
// 'const B&' or 'const volatile B&' [...]
if (!BaseClassDecl->hasCopyConstructorWithConstParam())
data().ImplicitCopyConstructorHasConstParam = false;
// A class has an Objective-C object member if... or any of its bases
// has an Objective-C object member.
if (BaseClassDecl->hasObjectMember())
setHasObjectMember(true);
if (BaseClassDecl->hasVolatileMember())
setHasVolatileMember(true);
// Keep track of the presence of mutable fields.
if (BaseClassDecl->hasMutableFields())
data().HasMutableFields = true;
if (BaseClassDecl->hasUninitializedReferenceMember())
data().HasUninitializedReferenceMember = true;
addedClassSubobject(BaseClassDecl);
}
if (VBases.empty()) {
data().IsParsingBaseSpecifiers = false;
return;
}
// Create base specifier for any direct or indirect virtual bases.
data().VBases = new (C) CXXBaseSpecifier[VBases.size()];
data().NumVBases = VBases.size();
for (int I = 0, E = VBases.size(); I != E; ++I) {
QualType Type = VBases[I]->getType();
if (!Type->isDependentType())
addedClassSubobject(Type->getAsCXXRecordDecl());
data().getVBases()[I] = *VBases[I];
}
data().IsParsingBaseSpecifiers = false;
}
void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
// C++11 [class.copy]p11:
// A defaulted copy/move constructor for a class X is defined as
// deleted if X has:
// -- a direct or virtual base class B that cannot be copied/moved [...]
// -- a non-static data member of class type M (or array thereof)
// that cannot be copied or moved [...]
if (!Subobj->hasSimpleMoveConstructor())
data().NeedOverloadResolutionForMoveConstructor = true;
// C++11 [class.copy]p23:
// A defaulted copy/move assignment operator for a class X is defined as
// deleted if X has:
// -- a direct or virtual base class B that cannot be copied/moved [...]
// -- a non-static data member of class type M (or array thereof)
// that cannot be copied or moved [...]
if (!Subobj->hasSimpleMoveAssignment())
data().NeedOverloadResolutionForMoveAssignment = true;
// C++11 [class.ctor]p5, C++11 [class.copy]p11, C++11 [class.dtor]p5:
// A defaulted [ctor or dtor] for a class X is defined as
// deleted if X has:
// -- any direct or virtual base class [...] has a type with a destructor
// that is deleted or inaccessible from the defaulted [ctor or dtor].
// -- any non-static data member has a type with a destructor
// that is deleted or inaccessible from the defaulted [ctor or dtor].
if (!Subobj->hasSimpleDestructor()) {
data().NeedOverloadResolutionForMoveConstructor = true;
data().NeedOverloadResolutionForDestructor = true;
}
}
/// Callback function for CXXRecordDecl::forallBases that acknowledges
/// that it saw a base class.
static bool SawBase(const CXXRecordDecl *, void *) {
return true;
}
bool CXXRecordDecl::hasAnyDependentBases() const {
if (!isDependentContext())
return false;
return !forallBases(SawBase, nullptr);
}
bool CXXRecordDecl::isTriviallyCopyable() const {
// C++0x [class]p5:
// A trivially copyable class is a class that:
// -- has no non-trivial copy constructors,
if (hasNonTrivialCopyConstructor()) return false;
// -- has no non-trivial move constructors,
if (hasNonTrivialMoveConstructor()) return false;
// -- has no non-trivial copy assignment operators,
if (hasNonTrivialCopyAssignment()) return false;
// -- has no non-trivial move assignment operators, and
if (hasNonTrivialMoveAssignment()) return false;
// -- has a trivial destructor.
if (!hasTrivialDestructor()) return false;
return true;
}
void CXXRecordDecl::markedVirtualFunctionPure() {
// C++ [class.abstract]p2:
// A class is abstract if it has at least one pure virtual function.
data().Abstract = true;
}
void CXXRecordDecl::addedMember(Decl *D) {
if (!D->isImplicit() &&
!isa<FieldDecl>(D) &&
!isa<IndirectFieldDecl>(D) &&
(!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class ||
cast<TagDecl>(D)->getTagKind() == TTK_Interface))
data().HasOnlyCMembers = false;
// Ignore friends and invalid declarations.
if (D->getFriendObjectKind() || D->isInvalidDecl())
return;
FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
if (FunTmpl)
D = FunTmpl->getTemplatedDecl();
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
if (Method->isVirtual()) {
// C++ [dcl.init.aggr]p1:
// An aggregate is an array or a class with [...] no virtual functions.
data().Aggregate = false;
// C++ [class]p4:
// A POD-struct is an aggregate class...
data().PlainOldData = false;
// Virtual functions make the class non-empty.
// FIXME: Standard ref?
data().Empty = false;
// C++ [class.virtual]p1:
// A class that declares or inherits a virtual function is called a
// polymorphic class.
data().Polymorphic = true;
// C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25:
// A [default constructor, copy/move constructor, or copy/move
// assignment operator for a class X] is trivial [...] if:
// -- class X has no virtual functions [...]
data().HasTrivialSpecialMembers &= SMF_Destructor;
// C++0x [class]p7:
// A standard-layout class is a class that: [...]
// -- has no virtual functions
data().IsStandardLayout = false;
}
}
// Notify the listener if an implicit member was added after the definition
// was completed.
if (!isBeingDefined() && D->isImplicit())
if (ASTMutationListener *L = getASTMutationListener())
L->AddedCXXImplicitMember(data().Definition, D);
// The kind of special member this declaration is, if any.
unsigned SMKind = 0;
// Handle constructors.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
if (!Constructor->isImplicit()) {
// Note that we have a user-declared constructor.
data().UserDeclaredConstructor = true;
// C++ [class]p4:
// A POD-struct is an aggregate class [...]
// Since the POD bit is meant to be C++03 POD-ness, clear it even if the
// type is technically an aggregate in C++0x since it wouldn't be in 03.
data().PlainOldData = false;
}
// Technically, "user-provided" is only defined for special member
// functions, but the intent of the standard is clearly that it should apply
// to all functions.
bool UserProvided = Constructor->isUserProvided();
if (Constructor->isDefaultConstructor()) {
SMKind |= SMF_DefaultConstructor;
if (UserProvided)
data().UserProvidedDefaultConstructor = true;
if (Constructor->isConstexpr())
data().HasConstexprDefaultConstructor = true;
}
if (!FunTmpl) {
unsigned Quals;
if (Constructor->isCopyConstructor(Quals)) {
SMKind |= SMF_CopyConstructor;
if (Quals & Qualifiers::Const)
data().HasDeclaredCopyConstructorWithConstParam = true;
} else if (Constructor->isMoveConstructor())
SMKind |= SMF_MoveConstructor;
}
// Record if we see any constexpr constructors which are neither copy
// nor move constructors.
if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor())
data().HasConstexprNonCopyMoveConstructor = true;
// C++ [dcl.init.aggr]p1:
// An aggregate is an array or a class with no user-declared
// constructors [...].
// C++11 [dcl.init.aggr]p1:
// An aggregate is an array or a class with no user-provided
// constructors [...].
if (getASTContext().getLangOpts().CPlusPlus11
? UserProvided : !Constructor->isImplicit())
data().Aggregate = false;
}
// Handle destructors.
if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
SMKind |= SMF_Destructor;
if (DD->isUserProvided())
data().HasIrrelevantDestructor = false;
// If the destructor is explicitly defaulted and not trivial or not public
// or if the destructor is deleted, we clear HasIrrelevantDestructor in
// finishedDefaultedOrDeletedMember.
// C++11 [class.dtor]p5:
// A destructor is trivial if [...] the destructor is not virtual.
if (DD->isVirtual())
data().HasTrivialSpecialMembers &= ~SMF_Destructor;
}
// Handle member functions.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
if (Method->isCopyAssignmentOperator()) {
SMKind |= SMF_CopyAssignment;
const ReferenceType *ParamTy =
Method->getParamDecl(0)->getType()->getAs<ReferenceType>();
if (!ParamTy || ParamTy->getPointeeType().isConstQualified())
data().HasDeclaredCopyAssignmentWithConstParam = true;
}
if (Method->isMoveAssignmentOperator())
SMKind |= SMF_MoveAssignment;
// Keep the list of conversion functions up-to-date.
if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
// FIXME: We use the 'unsafe' accessor for the access specifier here,
// because Sema may not have set it yet. That's really just a misdesign
// in Sema. However, LLDB *will* have set the access specifier correctly,
// and adds declarations after the class is technically completed,
// so completeDefinition()'s overriding of the access specifiers doesn't
// work.
AccessSpecifier AS = Conversion->getAccessUnsafe();
if (Conversion->getPrimaryTemplate()) {
// We don't record specializations.
} else {
ASTContext &Ctx = getASTContext();
ASTUnresolvedSet &Conversions = data().Conversions.get(Ctx);
NamedDecl *Primary =
FunTmpl ? cast<NamedDecl>(FunTmpl) : cast<NamedDecl>(Conversion);
if (Primary->getPreviousDecl())
Conversions.replace(cast<NamedDecl>(Primary->getPreviousDecl()),
Primary, AS);
else
Conversions.addDecl(Ctx, Primary, AS);
}
}
if (SMKind) {
// If this is the first declaration of a special member, we no longer have
// an implicit trivial special member.
data().HasTrivialSpecialMembers &=
data().DeclaredSpecialMembers | ~SMKind;
if (!Method->isImplicit() && !Method->isUserProvided()) {
// This method is user-declared but not user-provided. We can't work out
// whether it's trivial yet (not until we get to the end of the class).
// We'll handle this method in finishedDefaultedOrDeletedMember.
} else if (Method->isTrivial())
data().HasTrivialSpecialMembers |= SMKind;
else
data().DeclaredNonTrivialSpecialMembers |= SMKind;
// Note when we have declared a declared special member, and suppress the
// implicit declaration of this special member.
data().DeclaredSpecialMembers |= SMKind;
if (!Method->isImplicit()) {
data().UserDeclaredSpecialMembers |= SMKind;
// C++03 [class]p4:
// A POD-struct is an aggregate class that has [...] no user-defined
// copy assignment operator and no user-defined destructor.
//
// Since the POD bit is meant to be C++03 POD-ness, and in C++03,
// aggregates could not have any constructors, clear it even for an
// explicitly defaulted or deleted constructor.
// type is technically an aggregate in C++0x since it wouldn't be in 03.
//
// Also, a user-declared move assignment operator makes a class non-POD.
// This is an extension in C++03.
data().PlainOldData = false;
}
}
return;
}
// Handle non-static data members.
if (FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
// C++ [class.bit]p2:
// A declaration for a bit-field that omits the identifier declares an
// unnamed bit-field. Unnamed bit-fields are not members and cannot be
// initialized.
if (Field->isUnnamedBitfield())
return;
// C++ [dcl.init.aggr]p1:
// An aggregate is an array or a class (clause 9) with [...] no
// private or protected non-static data members (clause 11).
//
// A POD must be an aggregate.
if (D->getAccess() == AS_private || D->getAccess() == AS_protected) {
data().Aggregate = false;
data().PlainOldData = false;
}
// C++0x [class]p7:
// A standard-layout class is a class that:
// [...]
// -- has the same access control for all non-static data members,
switch (D->getAccess()) {
case AS_private: data().HasPrivateFields = true; break;
case AS_protected: data().HasProtectedFields = true; break;
case AS_public: data().HasPublicFields = true; break;
case AS_none: llvm_unreachable("Invalid access specifier");
};
if ((data().HasPrivateFields + data().HasProtectedFields +
data().HasPublicFields) > 1)
data().IsStandardLayout = false;
// Keep track of the presence of mutable fields.
if (Field->isMutable())
data().HasMutableFields = true;
// C++11 [class.union]p8, DR1460:
// If X is a union, a non-static data member of X that is not an anonymous
// union is a variant member of X.
if (isUnion() && !Field->isAnonymousStructOrUnion())
data().HasVariantMembers = true;
// C++0x [class]p9:
// A POD struct is a class that is both a trivial class and a
// standard-layout class, and has no non-static data members of type
// non-POD struct, non-POD union (or array of such types).
//
// Automatic Reference Counting: the presence of a member of Objective-C pointer type
// that does not explicitly have no lifetime makes the class a non-POD.
ASTContext &Context = getASTContext();
QualType T = Context.getBaseElementType(Field->getType());
if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
if (!Context.getLangOpts().ObjCAutoRefCount) {
setHasObjectMember(true);
} else if (T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
// Objective-C Automatic Reference Counting:
// If a class has a non-static data member of Objective-C pointer
// type (or array thereof), it is a non-POD type and its
// default constructor (if any), copy constructor, move constructor,
// copy assignment operator, move assignment operator, and destructor are
// non-trivial.
setHasObjectMember(true);
struct DefinitionData &Data = data();
Data.PlainOldData = false;
Data.HasTrivialSpecialMembers = 0;
Data.HasIrrelevantDestructor = false;
}
} else if (!T.isCXX98PODType(Context))
data().PlainOldData = false;
if (T->isReferenceType()) {
if (!Field->hasInClassInitializer())
data().HasUninitializedReferenceMember = true;
// C++0x [class]p7:
// A standard-layout class is a class that:
// -- has no non-static data members of type [...] reference,
data().IsStandardLayout = false;
}
// Record if this field is the first non-literal or volatile field or base.
if (!T->isLiteralType(Context) || T.isVolatileQualified())
data().HasNonLiteralTypeFieldsOrBases = true;
if (Field->hasInClassInitializer() ||
(Field->isAnonymousStructOrUnion() &&
Field->getType()->getAsCXXRecordDecl()->hasInClassInitializer())) {
data().HasInClassInitializer = true;
// C++11 [class]p5:
// A default constructor is trivial if [...] no non-static data member
// of its class has a brace-or-equal-initializer.
data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor;
// C++11 [dcl.init.aggr]p1:
// An aggregate is a [...] class with [...] no
// brace-or-equal-initializers for non-static data members.
//
// This rule was removed in C++1y.
if (!getASTContext().getLangOpts().CPlusPlus14)
data().Aggregate = false;
// C++11 [class]p10:
// A POD struct is [...] a trivial class.
data().PlainOldData = false;
}
// C++11 [class.copy]p23:
// A defaulted copy/move assignment operator for a class X is defined
// as deleted if X has:
// -- a non-static data member of reference type
if (T->isReferenceType())
data().DefaultedMoveAssignmentIsDeleted = true;
if (const RecordType *RecordTy = T->getAs<RecordType>()) {
CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
if (FieldRec->getDefinition()) {
addedClassSubobject(FieldRec);
// We may need to perform overload resolution to determine whether a
// field can be moved if it's const or volatile qualified.
if (T.getCVRQualifiers() & (Qualifiers::Const | Qualifiers::Volatile)) {
data().NeedOverloadResolutionForMoveConstructor = true;
data().NeedOverloadResolutionForMoveAssignment = true;
}
// C++11 [class.ctor]p5, C++11 [class.copy]p11:
// A defaulted [special member] for a class X is defined as
// deleted if:
// -- X is a union-like class that has a variant member with a
// non-trivial [corresponding special member]
if (isUnion()) {
if (FieldRec->hasNonTrivialMoveConstructor())
data().DefaultedMoveConstructorIsDeleted = true;
if (FieldRec->hasNonTrivialMoveAssignment())
data().DefaultedMoveAssignmentIsDeleted = true;
if (FieldRec->hasNonTrivialDestructor())
data().DefaultedDestructorIsDeleted = true;
}
// C++0x [class.ctor]p5:
// A default constructor is trivial [...] if:
// -- for all the non-static data members of its class that are of
// class type (or array thereof), each such class has a trivial
// default constructor.
if (!FieldRec->hasTrivialDefaultConstructor())
data().HasTrivialSpecialMembers &= ~SMF_DefaultConstructor;
// C++0x [class.copy]p13:
// A copy/move constructor for class X is trivial if [...]
// [...]
// -- for each non-static data member of X that is of class type (or
// an array thereof), the constructor selected to copy/move that
// member is trivial;
if (!FieldRec->hasTrivialCopyConstructor())
data().HasTrivialSpecialMembers &= ~SMF_CopyConstructor;
// If the field doesn't have a simple move constructor, we'll eagerly
// declare the move constructor for this class and we'll decide whether
// it's trivial then.
if (!FieldRec->hasTrivialMoveConstructor())
data().HasTrivialSpecialMembers &= ~SMF_MoveConstructor;
// C++0x [class.copy]p27:
// A copy/move assignment operator for class X is trivial if [...]
// [...]
// -- for each non-static data member of X that is of class type (or
// an array thereof), the assignment operator selected to
// copy/move that member is trivial;
if (!FieldRec->hasTrivialCopyAssignment())
data().HasTrivialSpecialMembers &= ~SMF_CopyAssignment;
// If the field doesn't have a simple move assignment, we'll eagerly
// declare the move assignment for this class and we'll decide whether
// it's trivial then.
if (!FieldRec->hasTrivialMoveAssignment())
data().HasTrivialSpecialMembers &= ~SMF_MoveAssignment;
if (!FieldRec->hasTrivialDestructor())
data().HasTrivialSpecialMembers &= ~SMF_Destructor;
if (!FieldRec->hasIrrelevantDestructor())
data().HasIrrelevantDestructor = false;
if (FieldRec->hasObjectMember())
setHasObjectMember(true);
if (FieldRec->hasVolatileMember())
setHasVolatileMember(true);
// C++0x [class]p7:
// A standard-layout class is a class that:
// -- has no non-static data members of type non-standard-layout
// class (or array of such types) [...]
if (!FieldRec->isStandardLayout())
data().IsStandardLayout = false;
// C++0x [class]p7:
// A standard-layout class is a class that:
// [...]
// -- has no base classes of the same type as the first non-static
// data member.
// We don't want to expend bits in the state of the record decl
// tracking whether this is the first non-static data member so we
// cheat a bit and use some of the existing state: the empty bit.
// Virtual bases and virtual methods make a class non-empty, but they
// also make it non-standard-layout so we needn't check here.
// A non-empty base class may leave the class standard-layout, but not
// if we have arrived here, and have at least one non-static data
// member. If IsStandardLayout remains true, then the first non-static
// data member must come through here with Empty still true, and Empty
// will subsequently be set to false below.
if (data().IsStandardLayout && data().Empty) {
for (const auto &BI : bases()) {
if (Context.hasSameUnqualifiedType(BI.getType(), T)) {
data().IsStandardLayout = false;
break;
}
}
}
// Keep track of the presence of mutable fields.
if (FieldRec->hasMutableFields())
data().HasMutableFields = true;
// C++11 [class.copy]p13:
// If the implicitly-defined constructor would satisfy the
// requirements of a constexpr constructor, the implicitly-defined
// constructor is constexpr.
// C++11 [dcl.constexpr]p4:
// -- every constructor involved in initializing non-static data
// members [...] shall be a constexpr constructor
if (!Field->hasInClassInitializer() &&
!FieldRec->hasConstexprDefaultConstructor() && !isUnion())
// The standard requires any in-class initializer to be a constant
// expression. We consider this to be a defect.
data().DefaultedDefaultConstructorIsConstexpr = false;
// C++11 [class.copy]p8:
// The implicitly-declared copy constructor for a class X will have
// the form 'X::X(const X&)' if [...] for all the non-static data
// members of X that are of a class type M (or array thereof), each
// such class type has a copy constructor whose first parameter is
// of type 'const M&' or 'const volatile M&'.
if (!FieldRec->hasCopyConstructorWithConstParam())
data().ImplicitCopyConstructorHasConstParam = false;
// C++11 [class.copy]p18:
// The implicitly-declared copy assignment oeprator for a class X will
// have the form 'X& X::operator=(const X&)' if [...] for all the
// non-static data members of X that are of a class type M (or array
// thereof), each such class type has a copy assignment operator whose
// parameter is of type 'const M&', 'const volatile M&' or 'M'.
if (!FieldRec->hasCopyAssignmentWithConstParam())
data().ImplicitCopyAssignmentHasConstParam = false;
if (FieldRec->hasUninitializedReferenceMember() &&
!Field->hasInClassInitializer())
data().HasUninitializedReferenceMember = true;
// C++11 [class.union]p8, DR1460:
// a non-static data member of an anonymous union that is a member of
// X is also a variant member of X.
if (FieldRec->hasVariantMembers() &&
Field->isAnonymousStructOrUnion())
data().HasVariantMembers = true;
}
} else {
// Base element type of field is a non-class type.
if (!T->isLiteralType(Context) ||
(!Field->hasInClassInitializer() && !isUnion()))
data().DefaultedDefaultConstructorIsConstexpr = false;
// C++11 [class.copy]p23:
// A defaulted copy/move assignment operator for a class X is defined
// as deleted if X has:
// -- a non-static data member of const non-class type (or array
// thereof)
if (T.isConstQualified())
data().DefaultedMoveAssignmentIsDeleted = true;
}
// C++0x [class]p7:
// A standard-layout class is a class that:
// [...]
// -- either has no non-static data members in the most derived
// class and at most one base class with non-static data members,
// or has no base classes with non-static data members, and
// At this point we know that we have a non-static data member, so the last
// clause holds.
if (!data().HasNoNonEmptyBases)
data().IsStandardLayout = false;
// If this is not a zero-length bit-field, then the class is not empty.
if (data().Empty) {
if (!Field->isBitField() ||
(!Field->getBitWidth()->isTypeDependent() &&
!Field->getBitWidth()->isValueDependent() &&
Field->getBitWidthValue(Context) != 0))
data().Empty = false;
}
}
// Handle using declarations of conversion functions.
if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(D)) {
if (Shadow->getDeclName().getNameKind()
== DeclarationName::CXXConversionFunctionName) {
ASTContext &Ctx = getASTContext();
data().Conversions.get(Ctx).addDecl(Ctx, Shadow, Shadow->getAccess());
}
}
}
void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
assert(!D->isImplicit() && !D->isUserProvided());
// The kind of special member this declaration is, if any.
unsigned SMKind = 0;
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
if (Constructor->isDefaultConstructor()) {
SMKind |= SMF_DefaultConstructor;
if (Constructor->isConstexpr())
data().HasConstexprDefaultConstructor = true;
}
if (Constructor->isCopyConstructor())
SMKind |= SMF_CopyConstructor;
else if (Constructor->isMoveConstructor())
SMKind |= SMF_MoveConstructor;
else if (Constructor->isConstexpr())
// We may now know that the constructor is constexpr.
data().HasConstexprNonCopyMoveConstructor = true;
} else if (isa<CXXDestructorDecl>(D)) {
SMKind |= SMF_Destructor;
if (!D->isTrivial() || D->getAccess() != AS_public || D->isDeleted())
data().HasIrrelevantDestructor = false;
} else if (D->isCopyAssignmentOperator())
SMKind |= SMF_CopyAssignment;
else if (D->isMoveAssignmentOperator())
SMKind |= SMF_MoveAssignment;
// Update which trivial / non-trivial special members we have.
// addedMember will have skipped this step for this member.
if (D->isTrivial())
data().HasTrivialSpecialMembers |= SMKind;
else
data().DeclaredNonTrivialSpecialMembers |= SMKind;
}
bool CXXRecordDecl::isCLike() const {
if (getTagKind() == TTK_Class || getTagKind() == TTK_Interface ||
!TemplateOrInstantiation.isNull())
return false;
if (!hasDefinition())
return true;
return isPOD() && data().HasOnlyCMembers;
}
bool CXXRecordDecl::isGenericLambda() const {
if (!isLambda()) return false;
return getLambdaData().IsGenericLambda;
}
CXXMethodDecl* CXXRecordDecl::getLambdaCallOperator() const {
if (!isLambda()) return nullptr;
DeclarationName Name =
getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
DeclContext::lookup_result Calls = lookup(Name);
assert(!Calls.empty() && "Missing lambda call operator!");
assert(Calls.size() == 1 && "More than one lambda call operator!");
NamedDecl *CallOp = Calls.front();
if (FunctionTemplateDecl *CallOpTmpl =
dyn_cast<FunctionTemplateDecl>(CallOp))
return cast<CXXMethodDecl>(CallOpTmpl->getTemplatedDecl());
return cast<CXXMethodDecl>(CallOp);
}
CXXMethodDecl* CXXRecordDecl::getLambdaStaticInvoker() const {
if (!isLambda()) return nullptr;
DeclarationName Name =
&getASTContext().Idents.get(getLambdaStaticInvokerName());
DeclContext::lookup_result Invoker = lookup(Name);
if (Invoker.empty()) return nullptr;
assert(Invoker.size() == 1 && "More than one static invoker operator!");
NamedDecl *InvokerFun = Invoker.front();
if (FunctionTemplateDecl *InvokerTemplate =
dyn_cast<FunctionTemplateDecl>(InvokerFun))
return cast<CXXMethodDecl>(InvokerTemplate->getTemplatedDecl());
return cast<CXXMethodDecl>(InvokerFun);
}
void CXXRecordDecl::getCaptureFields(
llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
FieldDecl *&ThisCapture) const {
Captures.clear();
ThisCapture = nullptr;
LambdaDefinitionData &Lambda = getLambdaData();
RecordDecl::field_iterator Field = field_begin();
for (const LambdaCapture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures;
C != CEnd; ++C, ++Field) {
if (C->capturesThis())
ThisCapture = *Field;
else if (C->capturesVariable())
Captures[C->getCapturedVar()] = *Field;
}
assert(Field == field_end());
}
TemplateParameterList *
CXXRecordDecl::getGenericLambdaTemplateParameterList() const {
if (!isLambda()) return nullptr;
CXXMethodDecl *CallOp = getLambdaCallOperator();
if (FunctionTemplateDecl *Tmpl = CallOp->getDescribedFunctionTemplate())
return Tmpl->getTemplateParameters();
return nullptr;
}
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
QualType T =
cast<CXXConversionDecl>(Conv->getUnderlyingDecl()->getAsFunction())
->getConversionType();
return Context.getCanonicalType(T);
}
/// Collect the visible conversions of a base class.
///
/// \param Record a base class of the class we're considering
/// \param InVirtual whether this base class is a virtual base (or a base
/// of a virtual base)
/// \param Access the access along the inheritance path to this base
/// \param ParentHiddenTypes the conversions provided by the inheritors
/// of this base
/// \param Output the set to which to add conversions from non-virtual bases
/// \param VOutput the set to which to add conversions from virtual bases
/// \param HiddenVBaseCs the set of conversions which were hidden in a
/// virtual base along some inheritance path
static void CollectVisibleConversions(ASTContext &Context,
CXXRecordDecl *Record,
bool InVirtual,
AccessSpecifier Access,
const llvm::SmallPtrSet<CanQualType, 8> &ParentHiddenTypes,
ASTUnresolvedSet &Output,
UnresolvedSetImpl &VOutput,
llvm::SmallPtrSet<NamedDecl*, 8> &HiddenVBaseCs) {
// The set of types which have conversions in this class or its
// subclasses. As an optimization, we don't copy the derived set
// unless it might change.
const llvm::SmallPtrSet<CanQualType, 8> *HiddenTypes = &ParentHiddenTypes;
llvm::SmallPtrSet<CanQualType, 8> HiddenTypesBuffer;
// Collect the direct conversions and figure out which conversions
// will be hidden in the subclasses.
CXXRecordDecl::conversion_iterator ConvI = Record->conversion_begin();
CXXRecordDecl::conversion_iterator ConvE = Record->conversion_end();
if (ConvI != ConvE) {
HiddenTypesBuffer = ParentHiddenTypes;
HiddenTypes = &HiddenTypesBuffer;
for (CXXRecordDecl::conversion_iterator I = ConvI; I != ConvE; ++I) {
CanQualType ConvType(GetConversionType(Context, I.getDecl()));
bool Hidden = ParentHiddenTypes.count(ConvType);
if (!Hidden)
HiddenTypesBuffer.insert(ConvType);
// If this conversion is hidden and we're in a virtual base,
// remember that it's hidden along some inheritance path.
if (Hidden && InVirtual)
HiddenVBaseCs.insert(cast<NamedDecl>(I.getDecl()->getCanonicalDecl()));
// If this conversion isn't hidden, add it to the appropriate output.
else if (!Hidden) {
AccessSpecifier IAccess
= CXXRecordDecl::MergeAccess(Access, I.getAccess());
if (InVirtual)
VOutput.addDecl(I.getDecl(), IAccess);
else
Output.addDecl(Context, I.getDecl(), IAccess);
}
}
}
// Collect information recursively from any base classes.
for (const auto &I : Record->bases()) {
const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT) continue;
AccessSpecifier BaseAccess
= CXXRecordDecl::MergeAccess(Access, I.getAccessSpecifier());
bool BaseInVirtual = InVirtual || I.isVirtual();
CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
*HiddenTypes, Output, VOutput, HiddenVBaseCs);
}
}
/// Collect the visible conversions of a class.
///
/// This would be extremely straightforward if it weren't for virtual
/// bases. It might be worth special-casing that, really.
static void CollectVisibleConversions(ASTContext &Context,
CXXRecordDecl *Record,
ASTUnresolvedSet &Output) {
// The collection of all conversions in virtual bases that we've
// found. These will be added to the output as long as they don't
// appear in the hidden-conversions set.
UnresolvedSet<8> VBaseCs;
// The set of conversions in virtual bases that we've determined to
// be hidden.
llvm::SmallPtrSet<NamedDecl*, 8> HiddenVBaseCs;
// The set of types hidden by classes derived from this one.
llvm::SmallPtrSet<CanQualType, 8> HiddenTypes;
// Go ahead and collect the direct conversions and add them to the
// hidden-types set.
CXXRecordDecl::conversion_iterator ConvI = Record->conversion_begin();
CXXRecordDecl::conversion_iterator ConvE = Record->conversion_end();
Output.append(Context, ConvI, ConvE);
for (; ConvI != ConvE; ++ConvI)
HiddenTypes.insert(GetConversionType(Context, ConvI.getDecl()));
// Recursively collect conversions from base classes.
for (const auto &I : Record->bases()) {
const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT) continue;
CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
I.isVirtual(), I.getAccessSpecifier(),
HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
}
// Add any unhidden conversions provided by virtual bases.
for (UnresolvedSetIterator I = VBaseCs.begin(), E = VBaseCs.end();
I != E; ++I) {
if (!HiddenVBaseCs.count(cast<NamedDecl>(I.getDecl()->getCanonicalDecl())))
Output.addDecl(Context, I.getDecl(), I.getAccess());
}
}
/// getVisibleConversionFunctions - get all conversion functions visible
/// in current class; including conversion function templates.
llvm::iterator_range<CXXRecordDecl::conversion_iterator>
CXXRecordDecl::getVisibleConversionFunctions() {
ASTContext &Ctx = getASTContext();
ASTUnresolvedSet *Set;
if (bases_begin() == bases_end()) {
// If root class, all conversions are visible.
Set = &data().Conversions.get(Ctx);
} else {
Set = &data().VisibleConversions.get(Ctx);
// If visible conversion list is not evaluated, evaluate it.
if (!data().ComputedVisibleConversions) {
CollectVisibleConversions(Ctx, this, *Set);
data().ComputedVisibleConversions = true;
}
}
return llvm::make_range(Set->begin(), Set->end());
}
void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
// This operation is O(N) but extremely rare. Sema only uses it to
// remove UsingShadowDecls in a class that were followed by a direct
// declaration, e.g.:
// class A : B {
// using B::operator int;
// operator int();
// };
// This is uncommon by itself and even more uncommon in conjunction
// with sufficiently large numbers of directly-declared conversions
// that asymptotic behavior matters.
ASTUnresolvedSet &Convs = data().Conversions.get(getASTContext());
for (unsigned I = 0, E = Convs.size(); I != E; ++I) {
if (Convs[I].getDecl() == ConvDecl) {
Convs.erase(I);
assert(std::find(Convs.begin(), Convs.end(), ConvDecl) == Convs.end()
&& "conversion was found multiple times in unresolved set");
return;
}
}
llvm_unreachable("conversion not found in set!");
}
CXXRecordDecl *CXXRecordDecl::getInstantiatedFromMemberClass() const {
if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
return cast<CXXRecordDecl>(MSInfo->getInstantiatedFrom());
return nullptr;
}
void
CXXRecordDecl::setInstantiationOfMemberClass(CXXRecordDecl *RD,
TemplateSpecializationKind TSK) {
assert(TemplateOrInstantiation.isNull() &&
"Previous template or instantiation?");
assert(!isa<ClassTemplatePartialSpecializationDecl>(this));
TemplateOrInstantiation
= new (getASTContext()) MemberSpecializationInfo(RD, TSK);
}
TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{
if (const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(this))
return Spec->getSpecializationKind();
if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
return MSInfo->getTemplateSpecializationKind();
return TSK_Undeclared;
}
void
CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(this)) {
Spec->setSpecializationKind(TSK);
return;
}
if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
MSInfo->setTemplateSpecializationKind(TSK);
return;
}
llvm_unreachable("Not a class template or member class specialization");
}
const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
// If it's a class template specialization, find the template or partial
// specialization from which it was instantiated.
if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
auto From = TD->getInstantiatedFrom();
if (auto *CTD = From.dyn_cast<ClassTemplateDecl *>()) {
while (auto *NewCTD = CTD->getInstantiatedFromMemberTemplate()) {
if (NewCTD->isMemberSpecialization())
break;
CTD = NewCTD;
}
return CTD->getTemplatedDecl()->getDefinition();
}
if (auto *CTPSD =
From.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
while (auto *NewCTPSD = CTPSD->getInstantiatedFromMember()) {
if (NewCTPSD->isMemberSpecialization())
break;
CTPSD = NewCTPSD;
}
return CTPSD->getDefinition();
}
}
if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
if (isTemplateInstantiation(MSInfo->getTemplateSpecializationKind())) {
const CXXRecordDecl *RD = this;
while (auto *NewRD = RD->getInstantiatedFromMemberClass())
RD = NewRD;
return RD->getDefinition();
}
}
assert(!isTemplateInstantiation(this->getTemplateSpecializationKind()) &&
"couldn't find pattern for class template instantiation");
return nullptr;
}
CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
ASTContext &Context = getASTContext();
QualType ClassType = Context.getTypeDeclType(this);
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(ClassType));
DeclContext::lookup_result R = lookup(Name);
if (R.empty())
return nullptr;
CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(R.front());
return Dtor;
}
bool CXXRecordDecl::isAnyDestructorNoReturn() const {
// Destructor is noreturn.
if (const CXXDestructorDecl *Destructor = getDestructor())
if (Destructor->isNoReturn())
return true;
// Check base classes destructor for noreturn.
for (const auto &Base : bases())
if (Base.getType()->getAsCXXRecordDecl()->isAnyDestructorNoReturn())
return true;
// Check fields for noreturn.
for (const auto *Field : fields())
if (const CXXRecordDecl *RD =
Field->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl())
if (RD->isAnyDestructorNoReturn())
return true;
// All destructors are not noreturn.
return false;
}
void CXXRecordDecl::completeDefinition() {
completeDefinition(nullptr);
}
void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
RecordDecl::completeDefinition();
// If the class may be abstract (but hasn't been marked as such), check for
// any pure final overriders.
if (mayBeAbstract()) {
CXXFinalOverriderMap MyFinalOverriders;
if (!FinalOverriders) {
getFinalOverriders(MyFinalOverriders);
FinalOverriders = &MyFinalOverriders;
}
bool Done = false;
for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(),
MEnd = FinalOverriders->end();
M != MEnd && !Done; ++M) {
for (OverridingMethods::iterator SO = M->second.begin(),
SOEnd = M->second.end();
SO != SOEnd && !Done; ++SO) {
assert(SO->second.size() > 0 &&
"All virtual functions have overridding virtual functions");
// C++ [class.abstract]p4:
// A class is abstract if it contains or inherits at least one
// pure virtual function for which the final overrider is pure
// virtual.
if (SO->second.front().Method->isPure()) {
data().Abstract = true;
Done = true;
break;
}
}
}
}
// Set access bits correctly on the directly-declared conversions.
for (conversion_iterator I = conversion_begin(), E = conversion_end();
I != E; ++I)
I.setAccess((*I)->getAccess());
}
bool CXXRecordDecl::mayBeAbstract() const {
if (data().Abstract || isInvalidDecl() || !data().Polymorphic ||
isDependentContext())
return false;
for (const auto &B : bases()) {
CXXRecordDecl *BaseDecl
= cast<CXXRecordDecl>(B.getType()->getAs<RecordType>()->getDecl());
if (BaseDecl->isAbstract())
return true;
}
return false;
}
void CXXMethodDecl::anchor() { }
bool CXXMethodDecl::isStatic() const {
const CXXMethodDecl *MD = getCanonicalDecl();
if (MD->getStorageClass() == SC_Static)
return true;
OverloadedOperatorKind OOK = getDeclName().getCXXOverloadedOperator();
return isStaticOverloadedOperator(OOK);
}
static bool recursivelyOverrides(const CXXMethodDecl *DerivedMD,
const CXXMethodDecl *BaseMD) {
for (CXXMethodDecl::method_iterator I = DerivedMD->begin_overridden_methods(),
E = DerivedMD->end_overridden_methods(); I != E; ++I) {
const CXXMethodDecl *MD = *I;
if (MD->getCanonicalDecl() == BaseMD->getCanonicalDecl())
return true;
if (recursivelyOverrides(MD, BaseMD))
return true;
}
return false;
}
CXXMethodDecl *
CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
bool MayBeBase) {
if (this->getParent()->getCanonicalDecl() == RD->getCanonicalDecl())
return this;
// Lookup doesn't work for destructors, so handle them separately.
if (isa<CXXDestructorDecl>(this)) {
CXXMethodDecl *MD = RD->getDestructor();
if (MD) {
if (recursivelyOverrides(MD, this))
return MD;
if (MayBeBase && recursivelyOverrides(this, MD))
return MD;
}
return nullptr;
}
for (auto *ND : RD->lookup(getDeclName())) {
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND);
if (!MD)
continue;
if (recursivelyOverrides(MD, this))
return MD;
if (MayBeBase && recursivelyOverrides(this, MD))
return MD;
}
for (const auto &I : RD->bases()) {
const RecordType *RT = I.getType()->getAs<RecordType>();
if (!RT)
continue;
const CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
CXXMethodDecl *T = this->getCorrespondingMethodInClass(Base);
if (T)
return T;
}
return nullptr;
}
CXXMethodDecl *
CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
StorageClass SC, bool isInline,
bool isConstexpr, SourceLocation EndLocation) {
return new (C, RD) CXXMethodDecl(CXXMethod, C, RD, StartLoc, NameInfo,
T, TInfo, SC, isInline, isConstexpr,
EndLocation);
}
CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) CXXMethodDecl(CXXMethod, C, nullptr, SourceLocation(),
DeclarationNameInfo(), QualType(), nullptr,
SC_None, false, false, SourceLocation());
}
bool CXXMethodDecl::isUsualDeallocationFunction() const {
if (getOverloadedOperator() != OO_Delete &&
getOverloadedOperator() != OO_Array_Delete)
return false;
// C++ [basic.stc.dynamic.deallocation]p2:
// A template instance is never a usual deallocation function,
// regardless of its signature.
if (getPrimaryTemplate())
return false;
// C++ [basic.stc.dynamic.deallocation]p2:
// If a class T has a member deallocation function named operator delete
// with exactly one parameter, then that function is a usual (non-placement)
// deallocation function. [...]
if (getNumParams() == 1)
return true;
// C++ [basic.stc.dynamic.deallocation]p2:
// [...] If class T does not declare such an operator delete but does
// declare a member deallocation function named operator delete with
// exactly two parameters, the second of which has type std::size_t (18.1),
// then this function is a usual deallocation function.
ASTContext &Context = getASTContext();
if (getNumParams() != 2 ||
!Context.hasSameUnqualifiedType(getParamDecl(1)->getType(),
Context.getSizeType()))
return false;
// This function is a usual deallocation function if there are no
// single-parameter deallocation functions of the same kind.
DeclContext::lookup_result R = getDeclContext()->lookup(getDeclName());
for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end();
I != E; ++I) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I))
if (FD->getNumParams() == 1)
return false;
}
return true;
}
bool CXXMethodDecl::isCopyAssignmentOperator() const {
// C++0x [class.copy]p17:
// A user-declared copy assignment operator X::operator= is a non-static
// non-template member function of class X with exactly one parameter of
// type X, X&, const X&, volatile X& or const volatile X&.
if (/*operator=*/getOverloadedOperator() != OO_Equal ||
/*non-static*/ isStatic() ||
/*non-template*/getPrimaryTemplate() || getDescribedFunctionTemplate() ||
getNumParams() != 1)
return false;
QualType ParamType = getParamDecl(0)->getType();
if (const LValueReferenceType *Ref = ParamType->getAs<LValueReferenceType>())
ParamType = Ref->getPointeeType();
ASTContext &Context = getASTContext();
QualType ClassType
= Context.getCanonicalType(Context.getTypeDeclType(getParent()));
return Context.hasSameUnqualifiedType(ClassType, ParamType);
}
bool CXXMethodDecl::isMoveAssignmentOperator() const {
// C++0x [class.copy]p19:
// A user-declared move assignment operator X::operator= is a non-static
// non-template member function of class X with exactly one parameter of type
// X&&, const X&&, volatile X&&, or const volatile X&&.
if (getOverloadedOperator() != OO_Equal || isStatic() ||
getPrimaryTemplate() || getDescribedFunctionTemplate() ||
getNumParams() != 1)
return false;
QualType ParamType = getParamDecl(0)->getType();
if (!isa<RValueReferenceType>(ParamType))
return false;
ParamType = ParamType->getPointeeType();
ASTContext &Context = getASTContext();
QualType ClassType
= Context.getCanonicalType(Context.getTypeDeclType(getParent()));
return Context.hasSameUnqualifiedType(ClassType, ParamType);
}
void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) {
assert(MD->isCanonicalDecl() && "Method is not canonical!");
assert(!MD->getParent()->isDependentContext() &&
"Can't add an overridden method to a class template!");
assert(MD->isVirtual() && "Method is not virtual!");
getASTContext().addOverriddenMethod(this, MD);
}
CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const {
if (isa<CXXConstructorDecl>(this)) return nullptr;
return getASTContext().overridden_methods_begin(this);
}
CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
if (isa<CXXConstructorDecl>(this)) return nullptr;
return getASTContext().overridden_methods_end(this);
}
unsigned CXXMethodDecl::size_overridden_methods() const {
if (isa<CXXConstructorDecl>(this)) return 0;
return getASTContext().overridden_methods_size(this);
}
QualType CXXMethodDecl::getThisType(ASTContext &C) const {
// C++ 9.3.2p1: The type of this in a member function of a class X is X*.
// If the member function is declared const, the type of this is const X*,
// if the member function is declared volatile, the type of this is
// volatile X*, and if the member function is declared const volatile,
// the type of this is const volatile X*.
assert(isInstance() && "No 'this' for static methods!");
QualType ClassTy = C.getTypeDeclType(getParent());
ClassTy = C.getQualifiedType(ClassTy,
Qualifiers::fromCVRMask(getTypeQualifiers()));
return C.getLangOpts().HLSL ? C.getLValueReferenceType(ClassTy) : C.getPointerType(ClassTy);
}
// HLSL Change Begin - This is a reference.
QualType CXXMethodDecl::getThisObjectType(ASTContext &C) const {
QualType ClassTy = C.getTypeDeclType(getParent());
ClassTy = C.getQualifiedType(ClassTy,
Qualifiers::fromCVRMask(getTypeQualifiers()));
return ClassTy;
}
// HLSL Change End - This is a reference.
bool CXXMethodDecl::hasInlineBody() const {
// If this function is a template instantiation, look at the template from
// which it was instantiated.
const FunctionDecl *CheckFn = getTemplateInstantiationPattern();
if (!CheckFn)
CheckFn = this;
const FunctionDecl *fn;
return CheckFn->hasBody(fn) && !fn->isOutOfLine();
}
bool CXXMethodDecl::isLambdaStaticInvoker() const {
const CXXRecordDecl *P = getParent();
if (P->isLambda()) {
if (const CXXMethodDecl *StaticInvoker = P->getLambdaStaticInvoker()) {
if (StaticInvoker == this) return true;
if (P->isGenericLambda() && this->isFunctionTemplateSpecialization())
return StaticInvoker == this->getPrimaryTemplate()->getTemplatedDecl();
}
}
return false;
}
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
TypeSourceInfo *TInfo, bool IsVirtual,
SourceLocation L, Expr *Init,
SourceLocation R,
SourceLocation EllipsisLoc)
: Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
FieldDecl *Member,
SourceLocation MemberLoc,
SourceLocation L, Expr *Init,
SourceLocation R)
: Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
IndirectFieldDecl *Member,
SourceLocation MemberLoc,
SourceLocation L, Expr *Init,
SourceLocation R)
: Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
TypeSourceInfo *TInfo,
SourceLocation L, Expr *Init,
SourceLocation R)
: Initializee(TInfo), MemberOrEllipsisLocation(), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(true), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(0)
{
}
CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
FieldDecl *Member,
SourceLocation MemberLoc,
SourceLocation L, Expr *Init,
SourceLocation R,
VarDecl **Indices,
unsigned NumIndices)
: Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
{
VarDecl **MyIndices = reinterpret_cast<VarDecl **> (this + 1);
memcpy(MyIndices, Indices, NumIndices * sizeof(VarDecl *));
}
CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
FieldDecl *Member,
SourceLocation MemberLoc,
SourceLocation L, Expr *Init,
SourceLocation R,
VarDecl **Indices,
unsigned NumIndices) {
void *Mem = Context.Allocate(sizeof(CXXCtorInitializer) +
sizeof(VarDecl *) * NumIndices,
llvm::alignOf<CXXCtorInitializer>());
return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R,
Indices, NumIndices);
}
TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
if (isBaseInitializer())
return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
else
return TypeLoc();
}
const Type *CXXCtorInitializer::getBaseClass() const {
if (isBaseInitializer())
return Initializee.get<TypeSourceInfo*>()->getType().getTypePtr();
else
return nullptr;
}
SourceLocation CXXCtorInitializer::getSourceLocation() const {
if (isInClassMemberInitializer())
return getAnyMember()->getLocation();
if (isAnyMemberInitializer())
return getMemberLocation();
if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>())
return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();
return SourceLocation();
}
SourceRange CXXCtorInitializer::getSourceRange() const {
if (isInClassMemberInitializer()) {
FieldDecl *D = getAnyMember();
if (Expr *I = D->getInClassInitializer())
return I->getSourceRange();
return SourceRange();
}
return SourceRange(getSourceLocation(), getRParenLoc());
}
void CXXConstructorDecl::anchor() { }
CXXConstructorDecl *
CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) CXXConstructorDecl(C, nullptr, SourceLocation(),
DeclarationNameInfo(), QualType(),
nullptr, false, false, false, false);
}
CXXConstructorDecl *
CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
bool isExplicit, bool isInline,
bool isImplicitlyDeclared, bool isConstexpr) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConstructorName &&
"Name must refer to a constructor");
return new (C, RD) CXXConstructorDecl(C, RD, StartLoc, NameInfo, T, TInfo,
isExplicit, isInline,
isImplicitlyDeclared, isConstexpr);
}
CXXConstructorDecl::init_const_iterator CXXConstructorDecl::init_begin() const {
return CtorInitializers.get(getASTContext().getExternalSource());
}
CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
assert(isDelegatingConstructor() && "Not a delegating constructor!");
Expr *E = (*init_begin())->getInit()->IgnoreImplicit();
if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(E))
return Construct->getConstructor();
return nullptr;
}
bool CXXConstructorDecl::isDefaultConstructor() const {
// C++ [class.ctor]p5:
// A default constructor for a class X is a constructor of class
// X that can be called without an argument.
return (getNumParams() == 0) ||
(getNumParams() > 0 && getParamDecl(0)->hasDefaultArg());
}
bool
CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
return isCopyOrMoveConstructor(TypeQuals) &&
getParamDecl(0)->getType()->isLValueReferenceType();
}
bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
return isCopyOrMoveConstructor(TypeQuals) &&
getParamDecl(0)->getType()->isRValueReferenceType();
}
/// \brief Determine whether this is a copy or move constructor.
bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
// C++ [class.copy]p2:
// A non-template constructor for class X is a copy constructor
// if its first parameter is of type X&, const X&, volatile X& or
// const volatile X&, and either there are no other parameters
// or else all other parameters have default arguments (8.3.6).
// C++0x [class.copy]p3:
// A non-template constructor for class X is a move constructor if its
// first parameter is of type X&&, const X&&, volatile X&&, or
// const volatile X&&, and either there are no other parameters or else
// all other parameters have default arguments.
if ((getNumParams() < 1) ||
(getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
(getPrimaryTemplate() != nullptr) ||
(getDescribedFunctionTemplate() != nullptr))
return false;
const ParmVarDecl *Param = getParamDecl(0);
// Do we have a reference type?
const ReferenceType *ParamRefType = Param->getType()->getAs<ReferenceType>();
if (!ParamRefType)
return false;
// Is it a reference to our class type?
ASTContext &Context = getASTContext();
CanQualType PointeeType
= Context.getCanonicalType(ParamRefType->getPointeeType());
CanQualType ClassTy
= Context.getCanonicalType(Context.getTagDeclType(getParent()));
if (PointeeType.getUnqualifiedType() != ClassTy)
return false;
// FIXME: other qualifiers?
// We have a copy or move constructor.
TypeQuals = PointeeType.getCVRQualifiers();
return true;
}
bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
// C++ [class.conv.ctor]p1:
// A constructor declared without the function-specifier explicit
// that can be called with a single parameter specifies a
// conversion from the type of its first parameter to the type of
// its class. Such a constructor is called a converting
// constructor.
if (isExplicit() && !AllowExplicit)
return false;
return (getNumParams() == 0 &&
getType()->getAs<FunctionProtoType>()->isVariadic()) ||
(getNumParams() == 1) ||
(getNumParams() > 1 &&
(getParamDecl(1)->hasDefaultArg() ||
getParamDecl(1)->isParameterPack()));
}
bool CXXConstructorDecl::isSpecializationCopyingObject() const {
if ((getNumParams() < 1) ||
(getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
(getDescribedFunctionTemplate() != nullptr))
return false;
const ParmVarDecl *Param = getParamDecl(0);
ASTContext &Context = getASTContext();
CanQualType ParamType = Context.getCanonicalType(Param->getType());
// Is it the same as our our class type?
CanQualType ClassTy
= Context.getCanonicalType(Context.getTagDeclType(getParent()));
if (ParamType.getUnqualifiedType() != ClassTy)
return false;
return true;
}
const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const {
// Hack: we store the inherited constructor in the overridden method table
method_iterator It = getASTContext().overridden_methods_begin(this);
if (It == getASTContext().overridden_methods_end(this))
return nullptr;
return cast<CXXConstructorDecl>(*It);
}
void
CXXConstructorDecl::setInheritedConstructor(const CXXConstructorDecl *BaseCtor){
// Hack: we store the inherited constructor in the overridden method table
assert(getASTContext().overridden_methods_size(this) == 0 &&
"Base ctor already set.");
getASTContext().addOverriddenMethod(this, BaseCtor);
}
void CXXDestructorDecl::anchor() { }
CXXDestructorDecl *
CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID)
CXXDestructorDecl(C, nullptr, SourceLocation(), DeclarationNameInfo(),
QualType(), nullptr, false, false);
}
CXXDestructorDecl *
CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
bool isInline, bool isImplicitlyDeclared) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXDestructorName &&
"Name must refer to a destructor");
return new (C, RD) CXXDestructorDecl(C, RD, StartLoc, NameInfo, T, TInfo,
isInline, isImplicitlyDeclared);
}
void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD) {
auto *First = cast<CXXDestructorDecl>(getFirstDecl());
if (OD && !First->OperatorDelete) {
First->OperatorDelete = OD;
if (auto *L = getASTMutationListener())
L->ResolvedOperatorDelete(First, OD);
}
}
void CXXConversionDecl::anchor() { }
CXXConversionDecl *
CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) CXXConversionDecl(C, nullptr, SourceLocation(),
DeclarationNameInfo(), QualType(),
nullptr, false, false, false,
SourceLocation());
}
CXXConversionDecl *
CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
bool isInline, bool isExplicit,
bool isConstexpr, SourceLocation EndLocation) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConversionFunctionName &&
"Name must refer to a conversion function");
return new (C, RD) CXXConversionDecl(C, RD, StartLoc, NameInfo, T, TInfo,
isInline, isExplicit, isConstexpr,
EndLocation);
}
bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
return isImplicit() && getParent()->isLambda() &&
getConversionType()->isBlockPointerType();
}
void LinkageSpecDecl::anchor() { }
LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation ExternLoc,
SourceLocation LangLoc,
LanguageIDs Lang,
bool HasBraces) {
return new (C, DC) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces);
}
LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) LinkageSpecDecl(nullptr, SourceLocation(),
SourceLocation(), lang_c, false);
}
void UsingDirectiveDecl::anchor() { }
UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
SourceLocation NamespaceLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation IdentLoc,
NamedDecl *Used,
DeclContext *CommonAncestor) {
if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used))
Used = NS->getOriginalNamespace();
return new (C, DC) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
IdentLoc, Used, CommonAncestor);
}
UsingDirectiveDecl *UsingDirectiveDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) UsingDirectiveDecl(nullptr, SourceLocation(),
SourceLocation(),
NestedNameSpecifierLoc(),
SourceLocation(), nullptr, nullptr);
}
NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
if (NamespaceAliasDecl *NA =
dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace))
return NA->getNamespace();
return cast_or_null<NamespaceDecl>(NominatedNamespace);
}
NamespaceDecl::NamespaceDecl(ASTContext &C, DeclContext *DC, bool Inline,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, NamespaceDecl *PrevDecl)
: NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
redeclarable_base(C), LocStart(StartLoc), RBraceLoc(),
AnonOrFirstNamespaceAndInline(nullptr, Inline) {
setPreviousDecl(PrevDecl);
if (PrevDecl)
AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace());
}
NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
bool Inline, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
NamespaceDecl *PrevDecl) {
return new (C, DC) NamespaceDecl(C, DC, Inline, StartLoc, IdLoc, Id,
PrevDecl);
}
NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) NamespaceDecl(C, nullptr, false, SourceLocation(),
SourceLocation(), nullptr, nullptr);
}
NamespaceDecl *NamespaceDecl::getNextRedeclarationImpl() {
return getNextRedeclaration();
}
NamespaceDecl *NamespaceDecl::getPreviousDeclImpl() {
return getPreviousDecl();
}
NamespaceDecl *NamespaceDecl::getMostRecentDeclImpl() {
return getMostRecentDecl();
}
void NamespaceAliasDecl::anchor() { }
NamespaceAliasDecl *NamespaceAliasDecl::getNextRedeclarationImpl() {
return getNextRedeclaration();
}
NamespaceAliasDecl *NamespaceAliasDecl::getPreviousDeclImpl() {
return getPreviousDecl();
}
NamespaceAliasDecl *NamespaceAliasDecl::getMostRecentDeclImpl() {
return getMostRecentDecl();
}
NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation IdentLoc,
NamedDecl *Namespace) {
// FIXME: Preserve the aliased namespace as written.
if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
Namespace = NS->getOriginalNamespace();
return new (C, DC) NamespaceAliasDecl(C, DC, UsingLoc, AliasLoc, Alias,
QualifierLoc, IdentLoc, Namespace);
}
NamespaceAliasDecl *
NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) NamespaceAliasDecl(C, nullptr, SourceLocation(),
SourceLocation(), nullptr,
NestedNameSpecifierLoc(),
SourceLocation(), nullptr);
}
void UsingShadowDecl::anchor() { }
UsingShadowDecl *
UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) UsingShadowDecl(C, nullptr, SourceLocation(),
nullptr, nullptr);
}
UsingDecl *UsingShadowDecl::getUsingDecl() const {
const UsingShadowDecl *Shadow = this;
while (const UsingShadowDecl *NextShadow =
dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
Shadow = NextShadow;
return cast<UsingDecl>(Shadow->UsingOrNextShadow);
}
void UsingDecl::anchor() { }
void UsingDecl::addShadowDecl(UsingShadowDecl *S) {
assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
"declaration already in set");
assert(S->getUsingDecl() == this);
if (FirstUsingShadow.getPointer())
S->UsingOrNextShadow = FirstUsingShadow.getPointer();
FirstUsingShadow.setPointer(S);
}
void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() &&
"declaration not in set");
assert(S->getUsingDecl() == this);
// Remove S from the shadow decl chain. This is O(n) but hopefully rare.
if (FirstUsingShadow.getPointer() == S) {
FirstUsingShadow.setPointer(
dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow));
S->UsingOrNextShadow = this;
return;
}
UsingShadowDecl *Prev = FirstUsingShadow.getPointer();
while (Prev->UsingOrNextShadow != S)
Prev = cast<UsingShadowDecl>(Prev->UsingOrNextShadow);
Prev->UsingOrNextShadow = S->UsingOrNextShadow;
S->UsingOrNextShadow = this;
}
UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo,
bool HasTypename) {
return new (C, DC) UsingDecl(DC, UL, QualifierLoc, NameInfo, HasTypename);
}
UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) UsingDecl(nullptr, SourceLocation(),
NestedNameSpecifierLoc(), DeclarationNameInfo(),
false);
}
SourceRange UsingDecl::getSourceRange() const {
SourceLocation Begin = isAccessDeclaration()
? getQualifierLoc().getBeginLoc() : UsingLocation;
return SourceRange(Begin, getNameInfo().getEndLoc());
}
void UnresolvedUsingValueDecl::anchor() { }
UnresolvedUsingValueDecl *
UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo) {
return new (C, DC) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
QualifierLoc, NameInfo);
}
UnresolvedUsingValueDecl *
UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) UnresolvedUsingValueDecl(nullptr, QualType(),
SourceLocation(),
NestedNameSpecifierLoc(),
DeclarationNameInfo());
}
SourceRange UnresolvedUsingValueDecl::getSourceRange() const {
SourceLocation Begin = isAccessDeclaration()
? getQualifierLoc().getBeginLoc() : UsingLocation;
return SourceRange(Begin, getNameInfo().getEndLoc());
}
void UnresolvedUsingTypenameDecl::anchor() { }
UnresolvedUsingTypenameDecl *
UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
SourceLocation TypenameLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TargetNameLoc,
DeclarationName TargetName) {
return new (C, DC) UnresolvedUsingTypenameDecl(
DC, UsingLoc, TypenameLoc, QualifierLoc, TargetNameLoc,
TargetName.getAsIdentifierInfo());
}
UnresolvedUsingTypenameDecl *
UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) UnresolvedUsingTypenameDecl(
nullptr, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(),
SourceLocation(), nullptr);
}
void StaticAssertDecl::anchor() { }
StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *Message,
SourceLocation RParenLoc,
bool Failed) {
return new (C, DC) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
RParenLoc, Failed);
}
StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) StaticAssertDecl(nullptr, SourceLocation(), nullptr,
nullptr, SourceLocation(), false);
}
MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName N,
QualType T, TypeSourceInfo *TInfo,
SourceLocation StartL,
IdentifierInfo *Getter,
IdentifierInfo *Setter) {
return new (C, DC) MSPropertyDecl(DC, L, N, T, TInfo, StartL, Getter, Setter);
}
MSPropertyDecl *MSPropertyDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) MSPropertyDecl(nullptr, SourceLocation(),
DeclarationName(), QualType(), nullptr,
SourceLocation(), nullptr, nullptr);
}
static const char *getAccessName(AccessSpecifier AS) {
switch (AS) {
case AS_none:
llvm_unreachable("Invalid access specifier!");
case AS_public:
return "public";
case AS_private:
return "private";
case AS_protected:
return "protected";
}
llvm_unreachable("Invalid access specifier!");
}
const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
AccessSpecifier AS) {
return DB << getAccessName(AS);
}
const PartialDiagnostic &clang::operator<<(const PartialDiagnostic &DB,
AccessSpecifier AS) {
return DB << getAccessName(AS);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CMakeLists.txt | set(LLVM_LINK_COMPONENTS hlsl support)
set (HLSL_IGNORE_SOURCES
NSAPI.cpp
)
add_clang_library(clangAST
APValue.cpp
ASTConsumer.cpp
ASTContext.cpp
ASTContextHLSL.cpp # HLSL Change
ASTDiagnostic.cpp
ASTDumper.cpp
ASTImporter.cpp
ASTTypeTraits.cpp
AttrImpl.cpp
CXXInheritance.cpp
Comment.cpp
CommentBriefParser.cpp
CommentCommandTraits.cpp
CommentLexer.cpp
CommentParser.cpp
CommentSema.cpp
Decl.cpp
DeclarationName.cpp
DeclBase.cpp
DeclCXX.cpp
DeclFriend.cpp
DeclGroup.cpp
DeclObjC.cpp
DeclOpenMP.cpp
DeclPrinter.cpp
DeclTemplate.cpp
Expr.cpp
ExprClassification.cpp
ExprConstant.cpp
ExprCXX.cpp
ExternalASTSource.cpp
HlslBuiltinTypeDeclBuilder.cpp # HLSL Change
HlslTypes.cpp # HLSL Change
InheritViz.cpp
ItaniumCXXABI.cpp
ItaniumMangle.cpp
Mangle.cpp
MicrosoftCXXABI.cpp
MicrosoftMangle.cpp
NestedNameSpecifier.cpp
ParentMap.cpp
RawCommentList.cpp
RecordLayout.cpp
RecordLayoutBuilder.cpp
SelectorLocationsKind.cpp
Stmt.cpp
StmtIterator.cpp
StmtPrinter.cpp
StmtProfile.cpp
StmtViz.cpp
TemplateBase.cpp
TemplateName.cpp
Type.cpp
TypeLoc.cpp
TypePrinter.cpp
VTableBuilder.cpp
VTTBuilder.cpp
LINK_LIBS
clangCodeGen
clangBasic
clangLex
clangSema
)
# MERGE REVIEW:
# ClangAttrParsedAttrList # MS Change - AST will refer to Sema headers
# ClangAttrParsedAttrKinds # MS Change - AST will refer to Sema headers
# ClangAttrParsedAttrImpl # MS Change - AST will refer to Sema headers
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CXXInheritance.cpp | //===------ CXXInheritance.cpp - C++ Inheritance ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides routines that help analyzing C++ inheritance hierarchies.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/RecordLayout.h"
#include "llvm/ADT/SetVector.h"
#include <algorithm>
#include <set>
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
/// \brief Computes the set of declarations referenced by these base
/// paths.
void CXXBasePaths::ComputeDeclsFound() {
assert(NumDeclsFound == 0 && !DeclsFound &&
"Already computed the set of declarations");
llvm::SetVector<NamedDecl *, SmallVector<NamedDecl *, 8> > Decls;
for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
Decls.insert(Path->Decls.front());
NumDeclsFound = Decls.size();
DeclsFound = new NamedDecl * [NumDeclsFound];
std::copy(Decls.begin(), Decls.end(), DeclsFound);
}
CXXBasePaths::decl_range CXXBasePaths::found_decls() {
if (NumDeclsFound == 0)
ComputeDeclsFound();
return decl_range(decl_iterator(DeclsFound),
decl_iterator(DeclsFound + NumDeclsFound));
}
/// isAmbiguous - Determines whether the set of paths provided is
/// ambiguous, i.e., there are two or more paths that refer to
/// different base class subobjects of the same type. BaseType must be
/// an unqualified, canonical class type.
bool CXXBasePaths::isAmbiguous(CanQualType BaseType) {
BaseType = BaseType.getUnqualifiedType();
std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
return Subobjects.second + (Subobjects.first? 1 : 0) > 1;
}
/// clear - Clear out all prior path information.
void CXXBasePaths::clear() {
Paths.clear();
ClassSubobjects.clear();
ScratchPath.clear();
DetectedVirtual = nullptr;
}
/// @brief Swaps the contents of this CXXBasePaths structure with the
/// contents of Other.
void CXXBasePaths::swap(CXXBasePaths &Other) {
std::swap(Origin, Other.Origin);
Paths.swap(Other.Paths);
ClassSubobjects.swap(Other.ClassSubobjects);
std::swap(FindAmbiguities, Other.FindAmbiguities);
std::swap(RecordPaths, Other.RecordPaths);
std::swap(DetectVirtual, Other.DetectVirtual);
std::swap(DetectedVirtual, Other.DetectedVirtual);
}
bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base) const {
CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
/*DetectVirtual=*/false);
return isDerivedFrom(Base, Paths);
}
bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
CXXBasePaths &Paths) const {
if (getCanonicalDecl() == Base->getCanonicalDecl())
return false;
Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
return lookupInBases(&FindBaseClass,
const_cast<CXXRecordDecl*>(Base->getCanonicalDecl()),
Paths);
}
bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const {
if (!getNumVBases())
return false;
CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
/*DetectVirtual=*/false);
if (getCanonicalDecl() == Base->getCanonicalDecl())
return false;
Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
const void *BasePtr = static_cast<const void*>(Base->getCanonicalDecl());
return lookupInBases(&FindVirtualBaseClass,
const_cast<void *>(BasePtr),
Paths);
}
static bool BaseIsNot(const CXXRecordDecl *Base, void *OpaqueTarget) {
// OpaqueTarget is a CXXRecordDecl*.
return Base->getCanonicalDecl() != (const CXXRecordDecl*) OpaqueTarget;
}
bool CXXRecordDecl::isProvablyNotDerivedFrom(const CXXRecordDecl *Base) const {
return forallBases(BaseIsNot,
const_cast<CXXRecordDecl *>(Base->getCanonicalDecl()));
}
bool
CXXRecordDecl::isCurrentInstantiation(const DeclContext *CurContext) const {
assert(isDependentContext());
for (; !CurContext->isFileContext(); CurContext = CurContext->getParent())
if (CurContext->Equals(this))
return true;
return false;
}
bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
void *OpaqueData,
bool AllowShortCircuit) const {
SmallVector<const CXXRecordDecl*, 8> Queue;
const CXXRecordDecl *Record = this;
bool AllMatches = true;
while (true) {
for (const auto &I : Record->bases()) {
const RecordType *Ty = I.getType()->getAs<RecordType>();
if (!Ty) {
if (AllowShortCircuit) return false;
AllMatches = false;
continue;
}
CXXRecordDecl *Base =
cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition());
if (!Base ||
(Base->isDependentContext() &&
!Base->isCurrentInstantiation(Record))) {
if (AllowShortCircuit) return false;
AllMatches = false;
continue;
}
Queue.push_back(Base);
if (!BaseMatches(Base, OpaqueData)) {
if (AllowShortCircuit) return false;
AllMatches = false;
continue;
}
}
if (Queue.empty())
break;
Record = Queue.pop_back_val(); // not actually a queue.
}
return AllMatches;
}
bool CXXBasePaths::lookupInBases(ASTContext &Context,
const CXXRecordDecl *Record,
CXXRecordDecl::BaseMatchesCallback *BaseMatches,
void *UserData) {
bool FoundPath = false;
// The access of the path down to this record.
AccessSpecifier AccessToHere = ScratchPath.Access;
bool IsFirstStep = ScratchPath.empty();
for (const auto &BaseSpec : Record->bases()) {
// Find the record of the base class subobjects for this type.
QualType BaseType =
Context.getCanonicalType(BaseSpec.getType()).getUnqualifiedType();
// C++ [temp.dep]p3:
// In the definition of a class template or a member of a class template,
// if a base class of the class template depends on a template-parameter,
// the base class scope is not examined during unqualified name lookup
// either at the point of definition of the class template or member or
// during an instantiation of the class tem- plate or member.
if (BaseType->isDependentType())
continue;
// Determine whether we need to visit this base class at all,
// updating the count of subobjects appropriately.
std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
bool VisitBase = true;
bool SetVirtual = false;
if (BaseSpec.isVirtual()) {
VisitBase = !Subobjects.first;
Subobjects.first = true;
if (isDetectingVirtual() && DetectedVirtual == nullptr) {
// If this is the first virtual we find, remember it. If it turns out
// there is no base path here, we'll reset it later.
DetectedVirtual = BaseType->getAs<RecordType>();
SetVirtual = true;
}
} else
++Subobjects.second;
if (isRecordingPaths()) {
// Add this base specifier to the current path.
CXXBasePathElement Element;
Element.Base = &BaseSpec;
Element.Class = Record;
if (BaseSpec.isVirtual())
Element.SubobjectNumber = 0;
else
Element.SubobjectNumber = Subobjects.second;
ScratchPath.push_back(Element);
// Calculate the "top-down" access to this base class.
// The spec actually describes this bottom-up, but top-down is
// equivalent because the definition works out as follows:
// 1. Write down the access along each step in the inheritance
// chain, followed by the access of the decl itself.
// For example, in
// class A { public: int foo; };
// class B : protected A {};
// class C : public B {};
// class D : private C {};
// we would write:
// private public protected public
// 2. If 'private' appears anywhere except far-left, access is denied.
// 3. Otherwise, overall access is determined by the most restrictive
// access in the sequence.
if (IsFirstStep)
ScratchPath.Access = BaseSpec.getAccessSpecifier();
else
ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere,
BaseSpec.getAccessSpecifier());
}
// Track whether there's a path involving this specific base.
bool FoundPathThroughBase = false;
if (BaseMatches(&BaseSpec, ScratchPath, UserData)) {
// We've found a path that terminates at this base.
FoundPath = FoundPathThroughBase = true;
if (isRecordingPaths()) {
// We have a path. Make a copy of it before moving on.
Paths.push_back(ScratchPath);
} else if (!isFindingAmbiguities()) {
// We found a path and we don't care about ambiguities;
// return immediately.
return FoundPath;
}
} else if (VisitBase) {
CXXRecordDecl *BaseRecord
= cast<CXXRecordDecl>(BaseSpec.getType()->castAs<RecordType>()
->getDecl());
if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
// C++ [class.member.lookup]p2:
// A member name f in one sub-object B hides a member name f in
// a sub-object A if A is a base class sub-object of B. Any
// declarations that are so hidden are eliminated from
// consideration.
// There is a path to a base class that meets the criteria. If we're
// not collecting paths or finding ambiguities, we're done.
FoundPath = FoundPathThroughBase = true;
if (!isFindingAmbiguities())
return FoundPath;
}
}
// Pop this base specifier off the current path (if we're
// collecting paths).
if (isRecordingPaths()) {
ScratchPath.pop_back();
}
// If we set a virtual earlier, and this isn't a path, forget it again.
if (SetVirtual && !FoundPathThroughBase) {
DetectedVirtual = nullptr;
}
}
// Reset the scratch path access.
ScratchPath.Access = AccessToHere;
return FoundPath;
}
bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
void *UserData,
CXXBasePaths &Paths) const {
// If we didn't find anything, report that.
if (!Paths.lookupInBases(getASTContext(), this, BaseMatches, UserData))
return false;
// If we're not recording paths or we won't ever find ambiguities,
// we're done.
if (!Paths.isRecordingPaths() || !Paths.isFindingAmbiguities())
return true;
// C++ [class.member.lookup]p6:
// When virtual base classes are used, a hidden declaration can be
// reached along a path through the sub-object lattice that does
// not pass through the hiding declaration. This is not an
// ambiguity. The identical use with nonvirtual base classes is an
// ambiguity; in that case there is no unique instance of the name
// that hides all the others.
//
// FIXME: This is an O(N^2) algorithm, but DPG doesn't see an easy
// way to make it any faster.
Paths.Paths.remove_if([&Paths](const CXXBasePath &Path) {
for (const CXXBasePathElement &PE : Path) {
if (!PE.Base->isVirtual())
continue;
CXXRecordDecl *VBase = nullptr;
if (const RecordType *Record = PE.Base->getType()->getAs<RecordType>())
VBase = cast<CXXRecordDecl>(Record->getDecl());
if (!VBase)
break;
// The declaration(s) we found along this path were found in a
// subobject of a virtual base. Check whether this virtual
// base is a subobject of any other path; if so, then the
// declaration in this path are hidden by that patch.
for (const CXXBasePath &HidingP : Paths) {
CXXRecordDecl *HidingClass = nullptr;
if (const RecordType *Record =
HidingP.back().Base->getType()->getAs<RecordType>())
HidingClass = cast<CXXRecordDecl>(Record->getDecl());
if (!HidingClass)
break;
if (HidingClass->isVirtuallyDerivedFrom(VBase))
return true;
}
}
return false;
});
return true;
}
bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *BaseRecord) {
assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
"User data for FindBaseClass is not canonical!");
return Specifier->getType()->castAs<RecordType>()->getDecl()
->getCanonicalDecl() == BaseRecord;
}
bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *BaseRecord) {
assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
"User data for FindBaseClass is not canonical!");
return Specifier->isVirtual() &&
Specifier->getType()->castAs<RecordType>()->getDecl()
->getCanonicalDecl() == BaseRecord;
}
bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
RecordDecl *BaseRecord =
Specifier->getType()->castAs<RecordType>()->getDecl();
DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
for (Path.Decls = BaseRecord->lookup(N);
!Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
if (Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag))
return true;
}
return false;
}
bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
RecordDecl *BaseRecord =
Specifier->getType()->castAs<RecordType>()->getDecl();
const unsigned IDNS = IDNS_Ordinary | IDNS_Tag | IDNS_Member;
DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
for (Path.Decls = BaseRecord->lookup(N);
!Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
if (Path.Decls.front()->isInIdentifierNamespace(IDNS))
return true;
}
return false;
}
bool CXXRecordDecl::
FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
RecordDecl *BaseRecord =
Specifier->getType()->castAs<RecordType>()->getDecl();
DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
for (Path.Decls = BaseRecord->lookup(N);
!Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
// FIXME: Refactor the "is it a nested-name-specifier?" check
if (isa<TypedefNameDecl>(Path.Decls.front()) ||
Path.Decls.front()->isInIdentifierNamespace(IDNS_Tag))
return true;
}
return false;
}
void OverridingMethods::add(unsigned OverriddenSubobject,
UniqueVirtualMethod Overriding) {
SmallVectorImpl<UniqueVirtualMethod> &SubobjectOverrides
= Overrides[OverriddenSubobject];
if (std::find(SubobjectOverrides.begin(), SubobjectOverrides.end(),
Overriding) == SubobjectOverrides.end())
SubobjectOverrides.push_back(Overriding);
}
void OverridingMethods::add(const OverridingMethods &Other) {
for (const_iterator I = Other.begin(), IE = Other.end(); I != IE; ++I) {
for (overriding_const_iterator M = I->second.begin(),
MEnd = I->second.end();
M != MEnd;
++M)
add(I->first, *M);
}
}
void OverridingMethods::replaceAll(UniqueVirtualMethod Overriding) {
for (iterator I = begin(), IEnd = end(); I != IEnd; ++I) {
I->second.clear();
I->second.push_back(Overriding);
}
}
namespace {
class FinalOverriderCollector {
/// \brief The number of subobjects of a given class type that
/// occur within the class hierarchy.
llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCount;
/// \brief Overriders for each virtual base subobject.
llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *> VirtualOverriders;
CXXFinalOverriderMap FinalOverriders;
public:
~FinalOverriderCollector();
void Collect(const CXXRecordDecl *RD, bool VirtualBase,
const CXXRecordDecl *InVirtualSubobject,
CXXFinalOverriderMap &Overriders);
};
}
void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
bool VirtualBase,
const CXXRecordDecl *InVirtualSubobject,
CXXFinalOverriderMap &Overriders) {
unsigned SubobjectNumber = 0;
if (!VirtualBase)
SubobjectNumber
= ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
for (const auto &Base : RD->bases()) {
if (const RecordType *RT = Base.getType()->getAs<RecordType>()) {
const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
if (!BaseDecl->isPolymorphic())
continue;
if (Overriders.empty() && !Base.isVirtual()) {
// There are no other overriders of virtual member functions,
// so let the base class fill in our overriders for us.
Collect(BaseDecl, false, InVirtualSubobject, Overriders);
continue;
}
// Collect all of the overridders from the base class subobject
// and merge them into the set of overridders for this class.
// For virtual base classes, populate or use the cached virtual
// overrides so that we do not walk the virtual base class (and
// its base classes) more than once.
CXXFinalOverriderMap ComputedBaseOverriders;
CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
if (Base.isVirtual()) {
CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
BaseOverriders = MyVirtualOverriders;
if (!MyVirtualOverriders) {
MyVirtualOverriders = new CXXFinalOverriderMap;
// Collect may cause VirtualOverriders to reallocate, invalidating the
// MyVirtualOverriders reference. Set BaseOverriders to the right
// value now.
BaseOverriders = MyVirtualOverriders;
Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
}
} else
Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);
// Merge the overriders from this base class into our own set of
// overriders.
for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(),
OMEnd = BaseOverriders->end();
OM != OMEnd;
++OM) {
const CXXMethodDecl *CanonOM
= cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
Overriders[CanonOM].add(OM->second);
}
}
}
for (auto *M : RD->methods()) {
// We only care about virtual methods.
if (!M->isVirtual())
continue;
CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());
if (CanonM->begin_overridden_methods()
== CanonM->end_overridden_methods()) {
// This is a new virtual function that does not override any
// other virtual function. Add it to the map of virtual
// functions for which we are tracking overridders.
// C++ [class.virtual]p2:
// For convenience we say that any virtual function overrides itself.
Overriders[CanonM].add(SubobjectNumber,
UniqueVirtualMethod(CanonM, SubobjectNumber,
InVirtualSubobject));
continue;
}
// This virtual method overrides other virtual methods, so it does
// not add any new slots into the set of overriders. Instead, we
// replace entries in the set of overriders with the new
// overrider. To do so, we dig down to the original virtual
// functions using data recursion and update all of the methods it
// overrides.
typedef llvm::iterator_range<CXXMethodDecl::method_iterator>
OverriddenMethods;
SmallVector<OverriddenMethods, 4> Stack;
Stack.push_back(llvm::make_range(CanonM->begin_overridden_methods(),
CanonM->end_overridden_methods()));
while (!Stack.empty()) {
for (const CXXMethodDecl *OM : Stack.pop_back_val()) {
const CXXMethodDecl *CanonOM = OM->getCanonicalDecl();
// C++ [class.virtual]p2:
// A virtual member function C::vf of a class object S is
// a final overrider unless the most derived class (1.8)
// of which S is a base class subobject (if any) declares
// or inherits another member function that overrides vf.
//
// Treating this object like the most derived class, we
// replace any overrides from base classes with this
// overriding virtual function.
Overriders[CanonOM].replaceAll(
UniqueVirtualMethod(CanonM, SubobjectNumber,
InVirtualSubobject));
if (CanonOM->begin_overridden_methods()
== CanonOM->end_overridden_methods())
continue;
// Continue recursion to the methods that this virtual method
// overrides.
Stack.push_back(llvm::make_range(CanonOM->begin_overridden_methods(),
CanonOM->end_overridden_methods()));
}
}
// C++ [class.virtual]p2:
// For convenience we say that any virtual function overrides itself.
Overriders[CanonM].add(SubobjectNumber,
UniqueVirtualMethod(CanonM, SubobjectNumber,
InVirtualSubobject));
}
}
FinalOverriderCollector::~FinalOverriderCollector() {
for (llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *>::iterator
VO = VirtualOverriders.begin(), VOEnd = VirtualOverriders.end();
VO != VOEnd;
++VO)
delete VO->second;
}
void
CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
FinalOverriderCollector Collector;
Collector.Collect(this, false, nullptr, FinalOverriders);
// Weed out any final overriders that come from virtual base class
// subobjects that were hidden by other subobjects along any path.
// This is the final-overrider variant of C++ [class.member.lookup]p10.
for (auto &OM : FinalOverriders) {
for (auto &SO : OM.second) {
SmallVectorImpl<UniqueVirtualMethod> &Overriding = SO.second;
if (Overriding.size() < 2)
continue;
auto IsHidden = [&Overriding](const UniqueVirtualMethod &M) {
if (!M.InVirtualSubobject)
return false;
// We have an overriding method in a virtual base class
// subobject (or non-virtual base class subobject thereof);
// determine whether there exists an other overriding method
// in a base class subobject that hides the virtual base class
// subobject.
for (const UniqueVirtualMethod &OP : Overriding)
if (&M != &OP &&
OP.Method->getParent()->isVirtuallyDerivedFrom(
M.InVirtualSubobject))
return true;
return false;
};
Overriding.erase(
std::remove_if(Overriding.begin(), Overriding.end(), IsHidden),
Overriding.end());
}
}
}
static void
AddIndirectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
CXXIndirectPrimaryBaseSet& Bases) {
// If the record has a virtual primary base class, add it to our set.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (Layout.isPrimaryBaseVirtual())
Bases.insert(Layout.getPrimaryBase());
for (const auto &I : RD->bases()) {
assert(!I.getType()->isDependentType() &&
"Cannot get indirect primary bases for class with dependent bases.");
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
// Only bases with virtual bases participate in computing the
// indirect primary virtual base classes.
if (BaseDecl->getNumVBases())
AddIndirectPrimaryBases(BaseDecl, Context, Bases);
}
}
void
CXXRecordDecl::getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const {
ASTContext &Context = getASTContext();
if (!getNumVBases())
return;
for (const auto &I : bases()) {
assert(!I.getType()->isDependentType() &&
"Cannot get indirect primary bases for class with dependent bases.");
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
// Only bases with virtual bases participate in computing the
// indirect primary virtual base classes.
if (BaseDecl->getNumVBases())
AddIndirectPrimaryBases(BaseDecl, Context, Bases);
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CommentCommandTraits.cpp | //===--- CommentCommandTraits.cpp - Comment command properties --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CommentCommandTraits.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace comments {
#include "clang/AST/CommentCommandInfo.inc"
CommandTraits::CommandTraits(llvm::BumpPtrAllocator &Allocator,
const CommentOptions &CommentOptions) :
NextID(llvm::array_lengthof(Commands)), Allocator(Allocator) {
registerCommentOptions(CommentOptions);
}
void CommandTraits::registerCommentOptions(
const CommentOptions &CommentOptions) {
for (CommentOptions::BlockCommandNamesTy::const_iterator
I = CommentOptions.BlockCommandNames.begin(),
E = CommentOptions.BlockCommandNames.end();
I != E; I++) {
registerBlockCommand(*I);
}
}
const CommandInfo *CommandTraits::getCommandInfoOrNULL(StringRef Name) const {
if (const CommandInfo *Info = getBuiltinCommandInfo(Name))
return Info;
return getRegisteredCommandInfo(Name);
}
const CommandInfo *CommandTraits::getCommandInfo(unsigned CommandID) const {
if (const CommandInfo *Info = getBuiltinCommandInfo(CommandID))
return Info;
return getRegisteredCommandInfo(CommandID);
}
const CommandInfo *
CommandTraits::getTypoCorrectCommandInfo(StringRef Typo) const {
// Single-character command impostures, such as \t or \n, should not go
// through the fixit logic.
if (Typo.size() <= 1)
return nullptr;
// The maximum edit distance we're prepared to accept.
const unsigned MaxEditDistance = 1;
unsigned BestEditDistance = MaxEditDistance;
SmallVector<const CommandInfo *, 2> BestCommand;
auto ConsiderCorrection = [&](const CommandInfo *Command) {
StringRef Name = Command->Name;
unsigned MinPossibleEditDistance = abs((int)Name.size() - (int)Typo.size());
if (MinPossibleEditDistance <= BestEditDistance) {
unsigned EditDistance = Typo.edit_distance(Name, true, BestEditDistance);
if (EditDistance < BestEditDistance) {
BestEditDistance = EditDistance;
BestCommand.clear();
}
if (EditDistance == BestEditDistance)
BestCommand.push_back(Command);
}
};
for (const auto &Command : Commands)
ConsiderCorrection(&Command);
for (const auto *Command : RegisteredCommands)
if (!Command->IsUnknownCommand)
ConsiderCorrection(Command);
return BestCommand.size() == 1 ? BestCommand[0] : nullptr;
}
CommandInfo *CommandTraits::createCommandInfoWithName(StringRef CommandName) {
char *Name = Allocator.Allocate<char>(CommandName.size() + 1);
memcpy(Name, CommandName.data(), CommandName.size());
Name[CommandName.size()] = '\0';
// Value-initialize (=zero-initialize in this case) a new CommandInfo.
CommandInfo *Info = new (Allocator) CommandInfo();
Info->Name = Name;
// We only have a limited number of bits to encode command IDs in the
// CommandInfo structure, so the ID numbers can potentially wrap around.
assert((NextID < (1 << CommandInfo::NumCommandIDBits))
&& "Too many commands. We have limited bits for the command ID.");
Info->ID = NextID++;
RegisteredCommands.push_back(Info);
return Info;
}
const CommandInfo *CommandTraits::registerUnknownCommand(
StringRef CommandName) {
CommandInfo *Info = createCommandInfoWithName(CommandName);
Info->IsUnknownCommand = true;
return Info;
}
const CommandInfo *CommandTraits::registerBlockCommand(StringRef CommandName) {
CommandInfo *Info = createCommandInfoWithName(CommandName);
Info->IsBlockCommand = true;
return Info;
}
const CommandInfo *CommandTraits::getBuiltinCommandInfo(
unsigned CommandID) {
if (CommandID < llvm::array_lengthof(Commands))
return &Commands[CommandID];
return nullptr;
}
const CommandInfo *CommandTraits::getRegisteredCommandInfo(
StringRef Name) const {
for (unsigned i = 0, e = RegisteredCommands.size(); i != e; ++i) {
if (RegisteredCommands[i]->Name == Name)
return RegisteredCommands[i];
}
return nullptr;
}
const CommandInfo *CommandTraits::getRegisteredCommandInfo(
unsigned CommandID) const {
return RegisteredCommands[CommandID - llvm::array_lengthof(Commands)];
}
} // end namespace comments
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/Type.cpp | //===--- Type.cpp - Type representation and manipulation ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements type-related functionality.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const {
return (*this != Other) &&
// CVR qualifiers superset
(((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) &&
// ObjC GC qualifiers superset
((getObjCGCAttr() == Other.getObjCGCAttr()) ||
(hasObjCGCAttr() && !Other.hasObjCGCAttr())) &&
// Address space superset.
((getAddressSpace() == Other.getAddressSpace()) ||
(hasAddressSpace()&& !Other.hasAddressSpace())) &&
// Lifetime qualifier superset.
((getObjCLifetime() == Other.getObjCLifetime()) ||
(hasObjCLifetime() && !Other.hasObjCLifetime()));
}
const IdentifierInfo* QualType::getBaseTypeIdentifier() const {
const Type* ty = getTypePtr();
NamedDecl *ND = nullptr;
if (ty->isPointerType() || ty->isReferenceType())
return ty->getPointeeType().getBaseTypeIdentifier();
else if (ty->isRecordType())
ND = ty->getAs<RecordType>()->getDecl();
else if (ty->isEnumeralType())
ND = ty->getAs<EnumType>()->getDecl();
else if (ty->getTypeClass() == Type::Typedef)
ND = ty->getAs<TypedefType>()->getDecl();
else if (ty->isArrayType())
return ty->castAsArrayTypeUnsafe()->
getElementType().getBaseTypeIdentifier();
if (ND)
return ND->getIdentifier();
return nullptr;
}
bool QualType::isConstant(QualType T, ASTContext &Ctx) {
if (T.isConstQualified())
return true;
if (const ArrayType *AT = Ctx.getAsArrayType(T))
return AT->getElementType().isConstant(Ctx);
return T.getAddressSpace() == LangAS::opencl_constant;
}
unsigned ConstantArrayType::getNumAddressingBits(ASTContext &Context,
QualType ElementType,
const llvm::APInt &NumElements) {
uint64_t ElementSize = Context.getTypeSizeInChars(ElementType).getQuantity();
// Fast path the common cases so we can avoid the conservative computation
// below, which in common cases allocates "large" APSInt values, which are
// slow.
// If the element size is a power of 2, we can directly compute the additional
// number of addressing bits beyond those required for the element count.
if (llvm::isPowerOf2_64(ElementSize)) {
return NumElements.getActiveBits() + llvm::Log2_64(ElementSize);
}
// If both the element count and element size fit in 32-bits, we can do the
// computation directly in 64-bits.
if ((ElementSize >> 32) == 0 && NumElements.getBitWidth() <= 64 &&
(NumElements.getZExtValue() >> 32) == 0) {
uint64_t TotalSize = NumElements.getZExtValue() * ElementSize;
return 64 - llvm::countLeadingZeros(TotalSize);
}
// Otherwise, use APSInt to handle arbitrary sized values.
llvm::APSInt SizeExtended(NumElements, true);
unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType());
SizeExtended = SizeExtended.extend(std::max(SizeTypeBits,
SizeExtended.getBitWidth()) * 2);
llvm::APSInt TotalSize(llvm::APInt(SizeExtended.getBitWidth(), ElementSize));
TotalSize *= SizeExtended;
return TotalSize.getActiveBits();
}
unsigned ConstantArrayType::getMaxSizeBits(ASTContext &Context) {
unsigned Bits = Context.getTypeSize(Context.getSizeType());
// Limit the number of bits in size_t so that maximal bit size fits 64 bit
// integer (see PR8256). We can do this as currently there is no hardware
// that supports full 64-bit virtual space.
if (Bits > 61)
Bits = 61;
return Bits;
}
DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context,
QualType et, QualType can,
Expr *e, ArraySizeModifier sm,
unsigned tq,
SourceRange brackets)
: ArrayType(DependentSizedArray, et, can, sm, tq,
(et->containsUnexpandedParameterPack() ||
(e && e->containsUnexpandedParameterPack()))),
Context(Context), SizeExpr((Stmt*) e), Brackets(brackets)
{
}
void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context,
QualType ET,
ArraySizeModifier SizeMod,
unsigned TypeQuals,
Expr *E) {
ID.AddPointer(ET.getAsOpaquePtr());
ID.AddInteger(SizeMod);
ID.AddInteger(TypeQuals);
E->Profile(ID, Context, true);
}
DependentSizedExtVectorType::DependentSizedExtVectorType(const
ASTContext &Context,
QualType ElementType,
QualType can,
Expr *SizeExpr,
SourceLocation loc)
: Type(DependentSizedExtVector, can, /*Dependent=*/true,
/*InstantiationDependent=*/true,
ElementType->isVariablyModifiedType(),
(ElementType->containsUnexpandedParameterPack() ||
(SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
loc(loc)
{
}
void
DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context,
QualType ElementType, Expr *SizeExpr) {
ID.AddPointer(ElementType.getAsOpaquePtr());
SizeExpr->Profile(ID, Context, true);
}
VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
VectorKind vecKind)
: VectorType(Vector, vecType, nElements, canonType, vecKind) {}
VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
QualType canonType, VectorKind vecKind)
: Type(tc, canonType, vecType->isDependentType(),
vecType->isInstantiationDependentType(),
vecType->isVariablyModifiedType(),
vecType->containsUnexpandedParameterPack()),
ElementType(vecType)
{
VectorTypeBits.VecKind = vecKind;
VectorTypeBits.NumElements = nElements;
}
/// getArrayElementTypeNoTypeQual - If this is an array type, return the
/// element type of the array, potentially with type qualifiers missing.
/// This method should never be used when type qualifiers are meaningful.
const Type *Type::getArrayElementTypeNoTypeQual() const {
// If this is directly an array type, return it.
if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
return ATy->getElementType().getTypePtr();
// If the canonical form of this type isn't the right kind, reject it.
if (!isa<ArrayType>(CanonicalType))
return nullptr;
// If this is a typedef for an array type, strip the typedef off without
// losing all typedef information.
return cast<ArrayType>(getUnqualifiedDesugaredType())
->getElementType().getTypePtr();
}
/// getDesugaredType - Return the specified type with any "sugar" removed from
/// the type. This takes off typedefs, typeof's etc. If the outer level of
/// the type is already concrete, it returns it unmodified. This is similar
/// to getting the canonical type, but it doesn't remove *all* typedefs. For
/// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
/// concrete.
QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) {
SplitQualType split = getSplitDesugaredType(T);
return Context.getQualifiedType(split.Ty, split.Quals);
}
QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
const ASTContext &Context) {
SplitQualType split = type.split();
QualType desugar = split.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
return Context.getQualifiedType(desugar, split.Quals);
}
QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
switch (getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
const Class##Type *ty = cast<Class##Type>(this); \
if (!ty->isSugared()) return QualType(ty, 0); \
return ty->desugar(); \
}
#include "clang/AST/TypeNodes.def"
}
llvm_unreachable("bad type kind!");
}
SplitQualType QualType::getSplitDesugaredType(QualType T) {
QualifierCollector Qs;
QualType Cur = T;
while (true) {
const Type *CurTy = Qs.strip(Cur);
switch (CurTy->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
const Class##Type *Ty = cast<Class##Type>(CurTy); \
if (!Ty->isSugared()) \
return SplitQualType(Ty, Qs); \
Cur = Ty->desugar(); \
break; \
}
#include "clang/AST/TypeNodes.def"
}
}
}
SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
SplitQualType split = type.split();
// All the qualifiers we've seen so far.
Qualifiers quals = split.Quals;
// The last type node we saw with any nodes inside it.
const Type *lastTypeWithQuals = split.Ty;
while (true) {
QualType next;
// Do a single-step desugar, aborting the loop if the type isn't
// sugared.
switch (split.Ty->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
const Class##Type *ty = cast<Class##Type>(split.Ty); \
if (!ty->isSugared()) goto done; \
next = ty->desugar(); \
break; \
}
#include "clang/AST/TypeNodes.def"
}
// Otherwise, split the underlying type. If that yields qualifiers,
// update the information.
split = next.split();
if (!split.Quals.empty()) {
lastTypeWithQuals = split.Ty;
quals.addConsistentQualifiers(split.Quals);
}
}
done:
return SplitQualType(lastTypeWithQuals, quals);
}
QualType QualType::IgnoreParens(QualType T) {
// FIXME: this seems inherently un-qualifiers-safe.
while (const ParenType *PT = T->getAs<ParenType>())
T = PT->getInnerType();
return T;
}
/// \brief This will check for a T (which should be a Type which can act as
/// sugar, such as a TypedefType) by removing any existing sugar until it
/// reaches a T or a non-sugared type.
template<typename T> static const T *getAsSugar(const Type *Cur) {
while (true) {
if (const T *Sugar = dyn_cast<T>(Cur))
return Sugar;
switch (Cur->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Type::Class: { \
const Class##Type *Ty = cast<Class##Type>(Cur); \
if (!Ty->isSugared()) return 0; \
Cur = Ty->desugar().getTypePtr(); \
break; \
}
#include "clang/AST/TypeNodes.def"
}
}
}
template <> const TypedefType *Type::getAs() const {
return getAsSugar<TypedefType>(this);
}
template <> const TemplateSpecializationType *Type::getAs() const {
return getAsSugar<TemplateSpecializationType>(this);
}
template <> const AttributedType *Type::getAs() const {
return getAsSugar<AttributedType>(this);
}
/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
/// sugar off the given type. This should produce an object of the
/// same dynamic type as the canonical type.
const Type *Type::getUnqualifiedDesugaredType() const {
const Type *Cur = this;
while (true) {
switch (Cur->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Parent)
#define TYPE(Class, Parent) \
case Class: { \
const Class##Type *Ty = cast<Class##Type>(Cur); \
if (!Ty->isSugared()) return Cur; \
Cur = Ty->desugar().getTypePtr(); \
break; \
}
#include "clang/AST/TypeNodes.def"
}
}
}
bool Type::isClassType() const {
if (const RecordType *RT = getAs<RecordType>())
return RT->getDecl()->isClass();
return false;
}
bool Type::isStructureType() const {
if (const RecordType *RT = getAs<RecordType>())
return RT->getDecl()->isStruct();
return false;
}
bool Type::isObjCBoxableRecordType() const {
if (const RecordType *RT = getAs<RecordType>())
return RT->getDecl()->hasAttr<ObjCBoxableAttr>();
return false;
}
bool Type::isInterfaceType() const {
if (const RecordType *RT = getAs<RecordType>())
return RT->getDecl()->isInterface();
return false;
}
bool Type::isStructureOrClassType() const {
if (const RecordType *RT = getAs<RecordType>()) {
RecordDecl *RD = RT->getDecl();
return RD->isStruct() || RD->isClass() || RD->isInterface();
}
return false;
}
bool Type::isVoidPointerType() const {
if (const PointerType *PT = getAs<PointerType>())
return PT->getPointeeType()->isVoidType();
return false;
}
bool Type::isUnionType() const {
if (const RecordType *RT = getAs<RecordType>())
return RT->getDecl()->isUnion();
return false;
}
bool Type::isComplexType() const {
if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
return CT->getElementType()->isFloatingType();
return false;
}
bool Type::isComplexIntegerType() const {
// Check for GCC complex integer extension.
return getAsComplexIntegerType();
}
const ComplexType *Type::getAsComplexIntegerType() const {
if (const ComplexType *Complex = getAs<ComplexType>())
if (Complex->getElementType()->isIntegerType())
return Complex;
return nullptr;
}
QualType Type::getPointeeType() const {
if (const PointerType *PT = getAs<PointerType>())
return PT->getPointeeType();
if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
return OPT->getPointeeType();
if (const BlockPointerType *BPT = getAs<BlockPointerType>())
return BPT->getPointeeType();
if (const ReferenceType *RT = getAs<ReferenceType>())
return RT->getPointeeType();
if (const MemberPointerType *MPT = getAs<MemberPointerType>())
return MPT->getPointeeType();
if (const DecayedType *DT = getAs<DecayedType>())
return DT->getPointeeType();
return QualType();
}
const RecordType *Type::getAsStructureType() const {
// If this is directly a structure type, return it.
if (const RecordType *RT = dyn_cast<RecordType>(this)) {
if (RT->getDecl()->isStruct())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
if (!RT->getDecl()->isStruct())
return nullptr;
// If this is a typedef for a structure type, strip the typedef off without
// losing all typedef information.
return cast<RecordType>(getUnqualifiedDesugaredType());
}
return nullptr;
}
const RecordType *Type::getAsUnionType() const {
// If this is directly a union type, return it.
if (const RecordType *RT = dyn_cast<RecordType>(this)) {
if (RT->getDecl()->isUnion())
return RT;
}
// If the canonical form of this type isn't the right kind, reject it.
if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
if (!RT->getDecl()->isUnion())
return nullptr;
// If this is a typedef for a union type, strip the typedef off without
// losing all typedef information.
return cast<RecordType>(getUnqualifiedDesugaredType());
}
return nullptr;
}
bool Type::isObjCIdOrObjectKindOfType(const ASTContext &ctx,
const ObjCObjectType *&bound) const {
bound = nullptr;
const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>();
if (!OPT)
return false;
// Easy case: id.
if (OPT->isObjCIdType())
return true;
// If it's not a __kindof type, reject it now.
if (!OPT->isKindOfType())
return false;
// If it's Class or qualified Class, it's not an object type.
if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType())
return false;
// Figure out the type bound for the __kindof type.
bound = OPT->getObjectType()->stripObjCKindOfTypeAndQuals(ctx)
->getAs<ObjCObjectType>();
return true;
}
bool Type::isObjCClassOrClassKindOfType() const {
const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>();
if (!OPT)
return false;
// Easy case: Class.
if (OPT->isObjCClassType())
return true;
// If it's not a __kindof type, reject it now.
if (!OPT->isKindOfType())
return false;
// If it's Class or qualified Class, it's a class __kindof type.
return OPT->isObjCClassType() || OPT->isObjCQualifiedClassType();
}
ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ArrayRef<QualType> typeArgs,
ArrayRef<ObjCProtocolDecl *> protocols,
bool isKindOf)
: Type(ObjCObject, Canonical, Base->isDependentType(),
Base->isInstantiationDependentType(),
Base->isVariablyModifiedType(),
Base->containsUnexpandedParameterPack()),
BaseType(Base)
{
ObjCObjectTypeBits.IsKindOf = isKindOf;
ObjCObjectTypeBits.NumTypeArgs = typeArgs.size();
assert(getTypeArgsAsWritten().size() == typeArgs.size() &&
"bitfield overflow in type argument count");
ObjCObjectTypeBits.NumProtocols = protocols.size();
assert(getNumProtocols() == protocols.size() &&
"bitfield overflow in protocol count");
if (!typeArgs.empty())
memcpy(getTypeArgStorage(), typeArgs.data(),
typeArgs.size() * sizeof(QualType));
if (!protocols.empty())
memcpy(getProtocolStorage(), protocols.data(),
protocols.size() * sizeof(ObjCProtocolDecl*));
for (auto typeArg : typeArgs) {
if (typeArg->isDependentType())
setDependent();
else if (typeArg->isInstantiationDependentType())
setInstantiationDependent();
if (typeArg->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
}
}
bool ObjCObjectType::isSpecialized() const {
// If we have type arguments written here, the type is specialized.
if (ObjCObjectTypeBits.NumTypeArgs > 0)
return true;
// Otherwise, check whether the base type is specialized.
if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
// Terminate when we reach an interface type.
if (isa<ObjCInterfaceType>(objcObject))
return false;
return objcObject->isSpecialized();
}
// Not specialized.
return false;
}
ArrayRef<QualType> ObjCObjectType::getTypeArgs() const {
// We have type arguments written on this type.
if (isSpecializedAsWritten())
return getTypeArgsAsWritten();
// Look at the base type, which might have type arguments.
if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
// Terminate when we reach an interface type.
if (isa<ObjCInterfaceType>(objcObject))
return { };
return objcObject->getTypeArgs();
}
// No type arguments.
return { };
}
bool ObjCObjectType::isKindOfType() const {
if (isKindOfTypeAsWritten())
return true;
// Look at the base type, which might have type arguments.
if (auto objcObject = getBaseType()->getAs<ObjCObjectType>()) {
// Terminate when we reach an interface type.
if (isa<ObjCInterfaceType>(objcObject))
return false;
return objcObject->isKindOfType();
}
// Not a "__kindof" type.
return false;
}
QualType ObjCObjectType::stripObjCKindOfTypeAndQuals(
const ASTContext &ctx) const {
if (!isKindOfType() && qual_empty())
return QualType(this, 0);
// Recursively strip __kindof.
SplitQualType splitBaseType = getBaseType().split();
QualType baseType(splitBaseType.Ty, 0);
if (const ObjCObjectType *baseObj
= splitBaseType.Ty->getAs<ObjCObjectType>()) {
baseType = baseObj->stripObjCKindOfTypeAndQuals(ctx);
}
return ctx.getObjCObjectType(ctx.getQualifiedType(baseType,
splitBaseType.Quals),
getTypeArgsAsWritten(),
/*protocols=*/{ },
/*isKindOf=*/false);
}
const ObjCObjectPointerType *ObjCObjectPointerType::stripObjCKindOfTypeAndQuals(
const ASTContext &ctx) const {
if (!isKindOfType() && qual_empty())
return this;
QualType obj = getObjectType()->stripObjCKindOfTypeAndQuals(ctx);
return ctx.getObjCObjectPointerType(obj)->castAs<ObjCObjectPointerType>();
}
namespace {
template<typename F>
QualType simpleTransform(ASTContext &ctx, QualType type, F &&f);
/// Visitor used by simpleTransform() to perform the transformation.
template<typename F>
struct SimpleTransformVisitor
: public TypeVisitor<SimpleTransformVisitor<F>, QualType> {
ASTContext &Ctx;
F &&TheFunc;
QualType recurse(QualType type) {
return simpleTransform(Ctx, type, std::move(TheFunc));
}
public:
SimpleTransformVisitor(ASTContext &ctx, F &&f) : Ctx(ctx), TheFunc(std::move(f)) { }
// None of the clients of this transformation can occur where
// there are dependent types, so skip dependent types.
#define TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) \
QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); }
#include "clang/AST/TypeNodes.def"
#define TRIVIAL_TYPE_CLASS(Class) \
QualType Visit##Class##Type(const Class##Type *T) { return QualType(T, 0); }
TRIVIAL_TYPE_CLASS(Builtin)
QualType VisitComplexType(const ComplexType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
return QualType();
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getComplexType(elementType);
}
QualType VisitPointerType(const PointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
return QualType();
if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getPointerType(pointeeType);
}
QualType VisitBlockPointerType(const BlockPointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
return QualType();
if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getBlockPointerType(pointeeType);
}
QualType VisitLValueReferenceType(const LValueReferenceType *T) {
QualType pointeeType = recurse(T->getPointeeTypeAsWritten());
if (pointeeType.isNull())
return QualType();
if (pointeeType.getAsOpaquePtr()
== T->getPointeeTypeAsWritten().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getLValueReferenceType(pointeeType, T->isSpelledAsLValue());
}
QualType VisitRValueReferenceType(const RValueReferenceType *T) {
QualType pointeeType = recurse(T->getPointeeTypeAsWritten());
if (pointeeType.isNull())
return QualType();
if (pointeeType.getAsOpaquePtr()
== T->getPointeeTypeAsWritten().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getRValueReferenceType(pointeeType);
}
QualType VisitMemberPointerType(const MemberPointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
return QualType();
if (pointeeType.getAsOpaquePtr() == T->getPointeeType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getMemberPointerType(pointeeType, T->getClass());
}
QualType VisitConstantArrayType(const ConstantArrayType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
return QualType();
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getConstantArrayType(elementType, T->getSize(),
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
QualType VisitVariableArrayType(const VariableArrayType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
return QualType();
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getVariableArrayType(elementType, T->getSizeExpr(),
T->getSizeModifier(),
T->getIndexTypeCVRQualifiers(),
T->getBracketsRange());
}
QualType VisitIncompleteArrayType(const IncompleteArrayType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
return QualType();
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getIncompleteArrayType(elementType, T->getSizeModifier(),
T->getIndexTypeCVRQualifiers());
}
QualType VisitVectorType(const VectorType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
return QualType();
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getVectorType(elementType, T->getNumElements(),
T->getVectorKind());
}
QualType VisitExtVectorType(const ExtVectorType *T) {
QualType elementType = recurse(T->getElementType());
if (elementType.isNull())
return QualType();
if (elementType.getAsOpaquePtr() == T->getElementType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getExtVectorType(elementType, T->getNumElements());
}
QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
QualType returnType = recurse(T->getReturnType());
if (returnType.isNull())
return QualType();
if (returnType.getAsOpaquePtr() == T->getReturnType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getFunctionNoProtoType(returnType, T->getExtInfo());
}
QualType VisitFunctionProtoType(const FunctionProtoType *T) {
QualType returnType = recurse(T->getReturnType());
if (returnType.isNull())
return QualType();
// Transform parameter types.
SmallVector<QualType, 4> paramTypes;
bool paramChanged = false;
for (auto paramType : T->getParamTypes()) {
QualType newParamType = recurse(paramType);
if (newParamType.isNull())
return QualType();
if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
paramChanged = true;
paramTypes.push_back(newParamType);
}
// Transform extended info.
FunctionProtoType::ExtProtoInfo info = T->getExtProtoInfo();
bool exceptionChanged = false;
if (info.ExceptionSpec.Type == EST_Dynamic) {
SmallVector<QualType, 4> exceptionTypes;
for (auto exceptionType : info.ExceptionSpec.Exceptions) {
QualType newExceptionType = recurse(exceptionType);
if (newExceptionType.isNull())
return QualType();
if (newExceptionType.getAsOpaquePtr()
!= exceptionType.getAsOpaquePtr())
exceptionChanged = true;
exceptionTypes.push_back(newExceptionType);
}
if (exceptionChanged) {
unsigned size = sizeof(QualType) * exceptionTypes.size();
void *mem = Ctx.Allocate(size, llvm::alignOf<QualType>());
memcpy(mem, exceptionTypes.data(), size);
info.ExceptionSpec.Exceptions
= llvm::makeArrayRef((QualType *)mem, exceptionTypes.size());
}
}
if (returnType.getAsOpaquePtr() == T->getReturnType().getAsOpaquePtr() &&
!paramChanged && !exceptionChanged)
return QualType(T, 0);
// HLSL Change - the following is incorrect w.r.t param modifiers
return Ctx.getFunctionType(returnType, paramTypes, info, T->getParamMods());
}
QualType VisitParenType(const ParenType *T) {
QualType innerType = recurse(T->getInnerType());
if (innerType.isNull())
return QualType();
if (innerType.getAsOpaquePtr() == T->getInnerType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getParenType(innerType);
}
TRIVIAL_TYPE_CLASS(Typedef)
QualType VisitAdjustedType(const AdjustedType *T) {
QualType originalType = recurse(T->getOriginalType());
if (originalType.isNull())
return QualType();
QualType adjustedType = recurse(T->getAdjustedType());
if (adjustedType.isNull())
return QualType();
if (originalType.getAsOpaquePtr()
== T->getOriginalType().getAsOpaquePtr() &&
adjustedType.getAsOpaquePtr() == T->getAdjustedType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getAdjustedType(originalType, adjustedType);
}
QualType VisitDecayedType(const DecayedType *T) {
QualType originalType = recurse(T->getOriginalType());
if (originalType.isNull())
return QualType();
if (originalType.getAsOpaquePtr()
== T->getOriginalType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getDecayedType(originalType);
}
TRIVIAL_TYPE_CLASS(TypeOfExpr)
TRIVIAL_TYPE_CLASS(TypeOf)
TRIVIAL_TYPE_CLASS(Decltype)
TRIVIAL_TYPE_CLASS(UnaryTransform)
TRIVIAL_TYPE_CLASS(Record)
TRIVIAL_TYPE_CLASS(Enum)
// FIXME: Non-trivial to implement, but important for C++
TRIVIAL_TYPE_CLASS(Elaborated)
QualType VisitAttributedType(const AttributedType *T) {
QualType modifiedType = recurse(T->getModifiedType());
if (modifiedType.isNull())
return QualType();
QualType equivalentType = recurse(T->getEquivalentType());
if (equivalentType.isNull())
return QualType();
if (modifiedType.getAsOpaquePtr()
== T->getModifiedType().getAsOpaquePtr() &&
equivalentType.getAsOpaquePtr()
== T->getEquivalentType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getAttributedType(T->getAttrKind(), modifiedType,
equivalentType);
}
QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
QualType replacementType = recurse(T->getReplacementType());
if (replacementType.isNull())
return QualType();
if (replacementType.getAsOpaquePtr()
== T->getReplacementType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getSubstTemplateTypeParmType(T->getReplacedParameter(),
replacementType);
}
// FIXME: Non-trivial to implement, but important for C++
TRIVIAL_TYPE_CLASS(TemplateSpecialization)
QualType VisitAutoType(const AutoType *T) {
if (!T->isDeduced())
return QualType(T, 0);
QualType deducedType = recurse(T->getDeducedType());
if (deducedType.isNull())
return QualType();
if (deducedType.getAsOpaquePtr()
== T->getDeducedType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getAutoType(deducedType, T->isDecltypeAuto(),
T->isDependentType());
}
// FIXME: Non-trivial to implement, but important for C++
TRIVIAL_TYPE_CLASS(PackExpansion)
QualType VisitObjCObjectType(const ObjCObjectType *T) {
QualType baseType = recurse(T->getBaseType());
if (baseType.isNull())
return QualType();
// Transform type arguments.
bool typeArgChanged = false;
SmallVector<QualType, 4> typeArgs;
for (auto typeArg : T->getTypeArgsAsWritten()) {
QualType newTypeArg = recurse(typeArg);
if (newTypeArg.isNull())
return QualType();
if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr())
typeArgChanged = true;
typeArgs.push_back(newTypeArg);
}
if (baseType.getAsOpaquePtr() == T->getBaseType().getAsOpaquePtr() &&
!typeArgChanged)
return QualType(T, 0);
return Ctx.getObjCObjectType(baseType, typeArgs,
llvm::makeArrayRef(T->qual_begin(),
T->getNumProtocols()),
T->isKindOfTypeAsWritten());
}
TRIVIAL_TYPE_CLASS(ObjCInterface)
QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
QualType pointeeType = recurse(T->getPointeeType());
if (pointeeType.isNull())
return QualType();
if (pointeeType.getAsOpaquePtr()
== T->getPointeeType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getObjCObjectPointerType(pointeeType);
}
QualType VisitAtomicType(const AtomicType *T) {
QualType valueType = recurse(T->getValueType());
if (valueType.isNull())
return QualType();
if (valueType.getAsOpaquePtr()
== T->getValueType().getAsOpaquePtr())
return QualType(T, 0);
return Ctx.getAtomicType(valueType);
}
#undef TRIVIAL_TYPE_CLASS
};
/// Perform a simple type transformation that does not change the
/// semantics of the type.
template<typename F>
QualType simpleTransform(ASTContext &ctx, QualType type, F &&f) {
// Transform the type. If it changed, return the transformed result.
QualType transformed = f(type);
if (transformed.getAsOpaquePtr() != type.getAsOpaquePtr())
return transformed;
// Split out the qualifiers from the type.
SplitQualType splitType = type.split();
// Visit the type itself.
SimpleTransformVisitor<F> visitor(ctx, std::move(f));
QualType result = visitor.Visit(splitType.Ty);
if (result.isNull())
return result;
// Reconstruct the transformed type by applying the local qualifiers
// from the split type.
return ctx.getQualifiedType(result, splitType.Quals);
}
} // end anonymous namespace
/// Substitute the given type arguments for Objective-C type
/// parameters within the given type, recursively.
QualType QualType::substObjCTypeArgs(
ASTContext &ctx,
ArrayRef<QualType> typeArgs,
ObjCSubstitutionContext context) const {
return simpleTransform(ctx, *this,
[&](QualType type) -> QualType {
SplitQualType splitType = type.split();
// Replace an Objective-C type parameter reference with the corresponding
// type argument.
if (const auto *typedefTy = dyn_cast<TypedefType>(splitType.Ty)) {
if (auto *typeParam = dyn_cast<ObjCTypeParamDecl>(typedefTy->getDecl())) {
// If we have type arguments, use them.
if (!typeArgs.empty()) {
// FIXME: Introduce SubstObjCTypeParamType ?
QualType argType = typeArgs[typeParam->getIndex()];
return ctx.getQualifiedType(argType, splitType.Quals);
}
switch (context) {
case ObjCSubstitutionContext::Ordinary:
case ObjCSubstitutionContext::Parameter:
case ObjCSubstitutionContext::Superclass:
// Substitute the bound.
return ctx.getQualifiedType(typeParam->getUnderlyingType(),
splitType.Quals);
case ObjCSubstitutionContext::Result:
case ObjCSubstitutionContext::Property: {
// Substitute the __kindof form of the underlying type.
const auto *objPtr = typeParam->getUnderlyingType()
->castAs<ObjCObjectPointerType>();
// __kindof types, id, and Class don't need an additional
// __kindof.
if (objPtr->isKindOfType() || objPtr->isObjCIdOrClassType())
return ctx.getQualifiedType(typeParam->getUnderlyingType(),
splitType.Quals);
// Add __kindof.
const auto *obj = objPtr->getObjectType();
QualType resultTy = ctx.getObjCObjectType(obj->getBaseType(),
obj->getTypeArgsAsWritten(),
obj->getProtocols(),
/*isKindOf=*/true);
// Rebuild object pointer type.
resultTy = ctx.getObjCObjectPointerType(resultTy);
return ctx.getQualifiedType(resultTy, splitType.Quals);
}
}
}
}
// If we have a function type, update the context appropriately.
if (const auto *funcType = dyn_cast<FunctionType>(splitType.Ty)) {
// Substitute result type.
QualType returnType = funcType->getReturnType().substObjCTypeArgs(
ctx,
typeArgs,
ObjCSubstitutionContext::Result);
if (returnType.isNull())
return QualType();
// Handle non-prototyped functions, which only substitute into the result
// type.
if (isa<FunctionNoProtoType>(funcType)) {
// If the return type was unchanged, do nothing.
if (returnType.getAsOpaquePtr()
== funcType->getReturnType().getAsOpaquePtr())
return type;
// Otherwise, build a new type.
return ctx.getFunctionNoProtoType(returnType, funcType->getExtInfo());
}
const auto *funcProtoType = cast<FunctionProtoType>(funcType);
// Transform parameter types.
SmallVector<QualType, 4> paramTypes;
bool paramChanged = false;
for (auto paramType : funcProtoType->getParamTypes()) {
QualType newParamType = paramType.substObjCTypeArgs(
ctx,
typeArgs,
ObjCSubstitutionContext::Parameter);
if (newParamType.isNull())
return QualType();
if (newParamType.getAsOpaquePtr() != paramType.getAsOpaquePtr())
paramChanged = true;
paramTypes.push_back(newParamType);
}
// Transform extended info.
FunctionProtoType::ExtProtoInfo info = funcProtoType->getExtProtoInfo();
bool exceptionChanged = false;
if (info.ExceptionSpec.Type == EST_Dynamic) {
SmallVector<QualType, 4> exceptionTypes;
for (auto exceptionType : info.ExceptionSpec.Exceptions) {
QualType newExceptionType = exceptionType.substObjCTypeArgs(
ctx,
typeArgs,
ObjCSubstitutionContext::Ordinary);
if (newExceptionType.isNull())
return QualType();
if (newExceptionType.getAsOpaquePtr()
!= exceptionType.getAsOpaquePtr())
exceptionChanged = true;
exceptionTypes.push_back(newExceptionType);
}
if (exceptionChanged) {
unsigned size = sizeof(QualType) * exceptionTypes.size();
void *mem = ctx.Allocate(size, llvm::alignOf<QualType>());
memcpy(mem, exceptionTypes.data(), size);
info.ExceptionSpec.Exceptions
= llvm::makeArrayRef((QualType *)mem, exceptionTypes.size());
}
}
if (returnType.getAsOpaquePtr()
== funcProtoType->getReturnType().getAsOpaquePtr() &&
!paramChanged && !exceptionChanged)
return type;
return ctx.getFunctionType(returnType, paramTypes, info, None); // HLSL Change - add param mods
}
// Substitute into the type arguments of a specialized Objective-C object
// type.
if (const auto *objcObjectType = dyn_cast<ObjCObjectType>(splitType.Ty)) {
if (objcObjectType->isSpecializedAsWritten()) {
SmallVector<QualType, 4> newTypeArgs;
bool anyChanged = false;
for (auto typeArg : objcObjectType->getTypeArgsAsWritten()) {
QualType newTypeArg = typeArg.substObjCTypeArgs(
ctx, typeArgs,
ObjCSubstitutionContext::Ordinary);
if (newTypeArg.isNull())
return QualType();
if (newTypeArg.getAsOpaquePtr() != typeArg.getAsOpaquePtr()) {
// If we're substituting based on an unspecialized context type,
// produce an unspecialized type.
ArrayRef<ObjCProtocolDecl *> protocols(
objcObjectType->qual_begin(),
objcObjectType->getNumProtocols());
if (typeArgs.empty() &&
context != ObjCSubstitutionContext::Superclass) {
return ctx.getObjCObjectType(
objcObjectType->getBaseType(), { },
protocols,
objcObjectType->isKindOfTypeAsWritten());
}
anyChanged = true;
}
newTypeArgs.push_back(newTypeArg);
}
if (anyChanged) {
ArrayRef<ObjCProtocolDecl *> protocols(
objcObjectType->qual_begin(),
objcObjectType->getNumProtocols());
return ctx.getObjCObjectType(objcObjectType->getBaseType(),
newTypeArgs, protocols,
objcObjectType->isKindOfTypeAsWritten());
}
}
return type;
}
return type;
});
}
QualType QualType::substObjCMemberType(QualType objectType,
const DeclContext *dc,
ObjCSubstitutionContext context) const {
if (auto subs = objectType->getObjCSubstitutions(dc))
return substObjCTypeArgs(dc->getParentASTContext(), *subs, context);
return *this;
}
QualType QualType::stripObjCKindOfType(const ASTContext &constCtx) const {
// FIXME: Because ASTContext::getAttributedType() is non-const.
auto &ctx = const_cast<ASTContext &>(constCtx);
return simpleTransform(ctx, *this,
[&](QualType type) -> QualType {
SplitQualType splitType = type.split();
if (auto *objType = splitType.Ty->getAs<ObjCObjectType>()) {
if (!objType->isKindOfType())
return type;
QualType baseType
= objType->getBaseType().stripObjCKindOfType(ctx);
return ctx.getQualifiedType(
ctx.getObjCObjectType(baseType,
objType->getTypeArgsAsWritten(),
objType->getProtocols(),
/*isKindOf=*/false),
splitType.Quals);
}
return type;
});
}
Optional<ArrayRef<QualType>> Type::getObjCSubstitutions(
const DeclContext *dc) const {
// Look through method scopes.
if (auto method = dyn_cast<ObjCMethodDecl>(dc))
dc = method->getDeclContext();
// Find the class or category in which the type we're substituting
// was declared.
const ObjCInterfaceDecl *dcClassDecl = dyn_cast<ObjCInterfaceDecl>(dc);
const ObjCCategoryDecl *dcCategoryDecl = nullptr;
ObjCTypeParamList *dcTypeParams = nullptr;
if (dcClassDecl) {
// If the class does not have any type parameters, there's no
// substitution to do.
dcTypeParams = dcClassDecl->getTypeParamList();
if (!dcTypeParams)
return None;
} else {
// If we are in neither a class mor a category, there's no
// substitution to perform.
dcCategoryDecl = dyn_cast<ObjCCategoryDecl>(dc);
if (!dcCategoryDecl)
return None;
// If the category does not have any type parameters, there's no
// substitution to do.
dcTypeParams = dcCategoryDecl->getTypeParamList();
if (!dcTypeParams)
return None;
dcClassDecl = dcCategoryDecl->getClassInterface();
if (!dcClassDecl)
return None;
}
assert(dcTypeParams && "No substitutions to perform");
assert(dcClassDecl && "No class context");
// Find the underlying object type.
const ObjCObjectType *objectType;
if (const auto *objectPointerType = getAs<ObjCObjectPointerType>()) {
objectType = objectPointerType->getObjectType();
} else if (getAs<BlockPointerType>()) {
ASTContext &ctx = dc->getParentASTContext();
objectType = ctx.getObjCObjectType(ctx.ObjCBuiltinIdTy, { }, { })
->castAs<ObjCObjectType>();;
} else {
objectType = getAs<ObjCObjectType>();
}
/// Extract the class from the receiver object type.
ObjCInterfaceDecl *curClassDecl = objectType ? objectType->getInterface()
: nullptr;
if (!curClassDecl) {
// If we don't have a context type (e.g., this is "id" or some
// variant thereof), substitute the bounds.
return llvm::ArrayRef<QualType>();
}
// Follow the superclass chain until we've mapped the receiver type
// to the same class as the context.
while (curClassDecl != dcClassDecl) {
// Map to the superclass type.
QualType superType = objectType->getSuperClassType();
if (superType.isNull()) {
objectType = nullptr;
break;
}
objectType = superType->castAs<ObjCObjectType>();
curClassDecl = objectType->getInterface();
}
// If we don't have a receiver type, or the receiver type does not
// have type arguments, substitute in the defaults.
if (!objectType || objectType->isUnspecialized()) {
return llvm::ArrayRef<QualType>();
}
// The receiver type has the type arguments we want.
return objectType->getTypeArgs();
}
bool Type::acceptsObjCTypeParams() const {
if (auto *IfaceT = getAsObjCInterfaceType()) {
if (auto *ID = IfaceT->getInterface()) {
if (ID->getTypeParamList())
return true;
}
}
return false;
}
void ObjCObjectType::computeSuperClassTypeSlow() const {
// Retrieve the class declaration for this type. If there isn't one
// (e.g., this is some variant of "id" or "Class"), then there is no
// superclass type.
ObjCInterfaceDecl *classDecl = getInterface();
if (!classDecl) {
CachedSuperClassType.setInt(true);
return;
}
// Extract the superclass type.
const ObjCObjectType *superClassObjTy = classDecl->getSuperClassType();
if (!superClassObjTy) {
CachedSuperClassType.setInt(true);
return;
}
ObjCInterfaceDecl *superClassDecl = superClassObjTy->getInterface();
if (!superClassDecl) {
CachedSuperClassType.setInt(true);
return;
}
// If the superclass doesn't have type parameters, then there is no
// substitution to perform.
QualType superClassType(superClassObjTy, 0);
ObjCTypeParamList *superClassTypeParams = superClassDecl->getTypeParamList();
if (!superClassTypeParams) {
CachedSuperClassType.setPointerAndInt(
superClassType->castAs<ObjCObjectType>(), true);
return;
}
// If the superclass reference is unspecialized, return it.
if (superClassObjTy->isUnspecialized()) {
CachedSuperClassType.setPointerAndInt(superClassObjTy, true);
return;
}
// If the subclass is not parameterized, there aren't any type
// parameters in the superclass reference to substitute.
ObjCTypeParamList *typeParams = classDecl->getTypeParamList();
if (!typeParams) {
CachedSuperClassType.setPointerAndInt(
superClassType->castAs<ObjCObjectType>(), true);
return;
}
// If the subclass type isn't specialized, return the unspecialized
// superclass.
if (isUnspecialized()) {
QualType unspecializedSuper
= classDecl->getASTContext().getObjCInterfaceType(
superClassObjTy->getInterface());
CachedSuperClassType.setPointerAndInt(
unspecializedSuper->castAs<ObjCObjectType>(),
true);
return;
}
// Substitute the provided type arguments into the superclass type.
ArrayRef<QualType> typeArgs = getTypeArgs();
assert(typeArgs.size() == typeParams->size());
CachedSuperClassType.setPointerAndInt(
superClassType.substObjCTypeArgs(classDecl->getASTContext(), typeArgs,
ObjCSubstitutionContext::Superclass)
->castAs<ObjCObjectType>(),
true);
}
const ObjCInterfaceType *ObjCObjectPointerType::getInterfaceType() const {
if (auto interfaceDecl = getObjectType()->getInterface()) {
return interfaceDecl->getASTContext().getObjCInterfaceType(interfaceDecl)
->castAs<ObjCInterfaceType>();
}
return nullptr;
}
QualType ObjCObjectPointerType::getSuperClassType() const {
QualType superObjectType = getObjectType()->getSuperClassType();
if (superObjectType.isNull())
return superObjectType;
ASTContext &ctx = getInterfaceDecl()->getASTContext();
return ctx.getObjCObjectPointerType(superObjectType);
}
const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const {
// There is no sugar for ObjCObjectType's, just return the canonical
// type pointer if it is the right class. There is no typedef information to
// return and these cannot be Address-space qualified.
if (const ObjCObjectType *T = getAs<ObjCObjectType>())
if (T->getNumProtocols() && T->getInterface())
return T;
return nullptr;
}
bool Type::isObjCQualifiedInterfaceType() const {
return getAsObjCQualifiedInterfaceType() != nullptr;
}
const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const {
// There is no sugar for ObjCQualifiedIdType's, just return the canonical
// type pointer if it is the right class.
if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
if (OPT->isObjCQualifiedIdType())
return OPT;
}
return nullptr;
}
const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const {
// There is no sugar for ObjCQualifiedClassType's, just return the canonical
// type pointer if it is the right class.
if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
if (OPT->isObjCQualifiedClassType())
return OPT;
}
return nullptr;
}
const ObjCObjectType *Type::getAsObjCInterfaceType() const {
if (const ObjCObjectType *OT = getAs<ObjCObjectType>()) {
if (OT->getInterface())
return OT;
}
return nullptr;
}
const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
if (OPT->getInterfaceType())
return OPT;
}
return nullptr;
}
const CXXRecordDecl *Type::getPointeeCXXRecordDecl() const {
QualType PointeeType;
if (const PointerType *PT = getAs<PointerType>())
PointeeType = PT->getPointeeType();
else if (const ReferenceType *RT = getAs<ReferenceType>())
PointeeType = RT->getPointeeType();
else
return nullptr;
if (const RecordType *RT = PointeeType->getAs<RecordType>())
return dyn_cast<CXXRecordDecl>(RT->getDecl());
return nullptr;
}
CXXRecordDecl *Type::getAsCXXRecordDecl() const {
return dyn_cast_or_null<CXXRecordDecl>(getAsTagDecl());
}
TagDecl *Type::getAsTagDecl() const {
if (const auto *TT = getAs<TagType>())
return cast<TagDecl>(TT->getDecl());
if (const auto *Injected = getAs<InjectedClassNameType>())
return Injected->getDecl();
return nullptr;
}
namespace {
class GetContainedAutoVisitor :
public TypeVisitor<GetContainedAutoVisitor, AutoType*> {
public:
using TypeVisitor<GetContainedAutoVisitor, AutoType*>::Visit;
AutoType *Visit(QualType T) {
if (T.isNull())
return nullptr;
return Visit(T.getTypePtr());
}
// The 'auto' type itself.
AutoType *VisitAutoType(const AutoType *AT) {
return const_cast<AutoType*>(AT);
}
// Only these types can contain the desired 'auto' type.
AutoType *VisitPointerType(const PointerType *T) {
return Visit(T->getPointeeType());
}
AutoType *VisitBlockPointerType(const BlockPointerType *T) {
return Visit(T->getPointeeType());
}
AutoType *VisitReferenceType(const ReferenceType *T) {
return Visit(T->getPointeeTypeAsWritten());
}
AutoType *VisitMemberPointerType(const MemberPointerType *T) {
return Visit(T->getPointeeType());
}
AutoType *VisitArrayType(const ArrayType *T) {
return Visit(T->getElementType());
}
AutoType *VisitDependentSizedExtVectorType(
const DependentSizedExtVectorType *T) {
return Visit(T->getElementType());
}
AutoType *VisitVectorType(const VectorType *T) {
return Visit(T->getElementType());
}
AutoType *VisitFunctionType(const FunctionType *T) {
return Visit(T->getReturnType());
}
AutoType *VisitParenType(const ParenType *T) {
return Visit(T->getInnerType());
}
AutoType *VisitAttributedType(const AttributedType *T) {
return Visit(T->getModifiedType());
}
AutoType *VisitAdjustedType(const AdjustedType *T) {
return Visit(T->getOriginalType());
}
};
}
AutoType *Type::getContainedAutoType() const {
return GetContainedAutoVisitor().Visit(this);
}
bool Type::hasIntegerRepresentation() const {
if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isIntegerType();
// HLSL Change Begins
else if (hlsl::IsHLSLVecType(CanonicalType))
return hlsl::GetHLSLVecElementType(CanonicalType)->isIntegerType();
// HLSL Change Ends
else
return isIntegerType();
}
/// \brief Determine whether this type is an integral type.
///
/// This routine determines whether the given type is an integral type per
/// C++ [basic.fundamental]p7. Although the C standard does not define the
/// term "integral type", it has a similar term "integer type", and in C++
/// the two terms are equivalent. However, C's "integer type" includes
/// enumeration types, while C++'s "integer type" does not. The \c ASTContext
/// parameter is used to determine whether we should be following the C or
/// C++ rules when determining whether this type is an integral/integer type.
///
/// For cases where C permits "an integer type" and C++ permits "an integral
/// type", use this routine.
///
/// For cases where C permits "an integer type" and C++ permits "an integral
/// or enumeration type", use \c isIntegralOrEnumerationType() instead.
///
/// \param Ctx The context in which this type occurs.
///
/// \returns true if the type is considered an integral type, false otherwise.
bool Type::isIntegralType(ASTContext &Ctx) const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::LitInt; // HLSL Change - LitInt is the last integral, not Int128
// Complete enum types are integral in C.
if (!Ctx.getLangOpts().CPlusPlus)
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete();
return false;
}
bool Type::isIntegralOrUnscopedEnumerationType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::LitInt; // HLSL Change - LitInt is the last integral, not Int128
// Check for a complete enum type; incomplete enum types are not properly an
// enumeration type in the sense required here.
// C++0x: However, if the underlying type of the enum is fixed, it is
// considered complete.
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
return false;
}
bool Type::isCharType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::Char_U ||
BT->getKind() == BuiltinType::UChar ||
BT->getKind() == BuiltinType::Char_S ||
BT->getKind() == BuiltinType::SChar;
return false;
}
bool Type::isWideCharType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::WChar_S ||
BT->getKind() == BuiltinType::WChar_U;
return false;
}
bool Type::isChar16Type() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::Char16;
return false;
}
bool Type::isChar32Type() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() == BuiltinType::Char32;
return false;
}
/// \brief Determine whether this type is any of the built-in character
/// types.
bool Type::isAnyCharacterType() const {
const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType);
if (!BT) return false;
switch (BT->getKind()) {
default: return false;
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::WChar_U:
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::Char_S:
case BuiltinType::SChar:
case BuiltinType::WChar_S:
return true;
}
}
/// isSignedIntegerType - Return true if this is an integer type that is
/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
/// an enum decl which has a signed representation
bool Type::isSignedIntegerType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Char_S &&
BT->getKind() <= BuiltinType::LitInt; // HLSL Change - LitInt is the last integral
}
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
return false;
}
bool Type::isSignedIntegerOrEnumerationType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Char_S &&
BT->getKind() <= BuiltinType::LitInt; // HLSL Change - LitInt is the last integral
}
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
if (ET->getDecl()->isComplete())
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
return false;
}
bool Type::hasSignedIntegerRepresentation() const {
if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isSignedIntegerOrEnumerationType();
// HLSL Change Begins
else if (hlsl::IsHLSLVecType(CanonicalType))
return hlsl::GetHLSLVecElementType(CanonicalType)->isSignedIntegerOrEnumerationType();
// HLSL Change Ends
else
return isSignedIntegerOrEnumerationType();
}
/// isUnsignedIntegerType - Return true if this is an integer type that is
/// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum
/// decl which has an unsigned representation
bool Type::isUnsignedIntegerType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::UInt128;
}
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
return false;
}
bool Type::isUnsignedIntegerOrEnumerationType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::UInt128;
}
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
if (ET->getDecl()->isComplete())
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
return false;
}
bool Type::hasUnsignedIntegerRepresentation() const {
if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
// HLSL Change Begins
else if (hlsl::IsHLSLVecType(CanonicalType))
return hlsl::GetHLSLVecElementType(CanonicalType)->isUnsignedIntegerOrEnumerationType();
// HLSL Change Ends
else
return isUnsignedIntegerOrEnumerationType();
}
bool Type::isFloatingType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Half &&
BT->getKind() <= BuiltinType::LitFloat; // HLSL Change
if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
return CT->getElementType()->isFloatingType();
return false;
}
bool Type::hasFloatingRepresentation() const {
if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isFloatingType();
// HLSL Change Begins
else if (hlsl::IsHLSLVecType(CanonicalType))
return hlsl::GetHLSLVecElementType(CanonicalType)->isFloatingType();
// HLSL Change Ends
else
return isFloatingType();
}
bool Type::isRealFloatingType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isFloatingPoint();
return false;
}
bool Type::isRealType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::LitFloat; // HLSL Change
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
return false;
}
bool Type::isArithmeticType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::LitFloat; // HLSL Change
if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
//
// C++0x: Enumerations are not arithmetic types. For now, just return
// false for scoped enumerations since that will disable any
// unwanted implicit conversions.
return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
return isa<ComplexType>(CanonicalType);
}
Type::ScalarTypeKind Type::getScalarTypeKind() const {
assert(isScalarType());
const Type *T = CanonicalType.getTypePtr();
if (const BuiltinType *BT = dyn_cast<BuiltinType>(T)) {
if (BT->getKind() == BuiltinType::Bool) return STK_Bool;
if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
if (BT->isInteger()) return STK_Integral;
if (BT->isFloatingPoint()) return STK_Floating;
llvm_unreachable("unknown scalar builtin type");
} else if (isa<PointerType>(T)) {
return STK_CPointer;
} else if (isa<BlockPointerType>(T)) {
return STK_BlockPointer;
} else if (isa<ObjCObjectPointerType>(T)) {
return STK_ObjCObjectPointer;
} else if (isa<MemberPointerType>(T)) {
return STK_MemberPointer;
} else if (isa<EnumType>(T)) {
assert(cast<EnumType>(T)->getDecl()->isComplete());
return STK_Integral;
} else if (const ComplexType *CT = dyn_cast<ComplexType>(T)) {
if (CT->getElementType()->isRealFloatingType())
return STK_FloatingComplex;
return STK_IntegralComplex;
}
llvm_unreachable("unknown scalar type");
}
/// \brief Determines whether the type is a C++ aggregate type or C
/// aggregate or union type.
///
/// An aggregate type is an array or a class type (struct, union, or
/// class) that has no user-declared constructors, no private or
/// protected non-static data members, no base classes, and no virtual
/// functions (C++ [dcl.init.aggr]p1). The notion of an aggregate type
/// subsumes the notion of C aggregates (C99 6.2.5p21) because it also
/// includes union types.
bool Type::isAggregateType() const {
if (const RecordType *Record = dyn_cast<RecordType>(CanonicalType)) {
if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
return ClassDecl->isAggregate();
return true;
}
return isa<ArrayType>(CanonicalType);
}
/// isConstantSizeType - Return true if this is not a variable sized type,
/// according to the rules of C99 6.7.5p3. It is not legal to call this on
/// incomplete types or dependent types.
bool Type::isConstantSizeType() const {
assert(!isIncompleteType() && "This doesn't make sense for incomplete types");
assert(!isDependentType() && "This doesn't make sense for dependent types");
// The VAT must have a size, as it is known to be complete.
return !isa<VariableArrayType>(CanonicalType);
}
/// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1)
/// - a type that can describe objects, but which lacks information needed to
/// determine its size.
bool Type::isIncompleteType(NamedDecl **Def) const {
if (Def)
*Def = nullptr;
switch (CanonicalType->getTypeClass()) {
default: return false;
case Builtin:
// Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never
// be completed.
return isVoidType();
case Enum: {
EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
if (Def)
*Def = EnumD;
// An enumeration with fixed underlying type is complete (C++0x 7.2p3).
if (EnumD->isFixed())
return false;
return !EnumD->isCompleteDefinition();
}
case Record: {
// A tagged type (struct/union/enum/class) is incomplete if the decl is a
// forward declaration, but not a full definition (C99 6.2.5p22).
RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl();
if (Def)
*Def = Rec;
return !Rec->isCompleteDefinition();
}
case ConstantArray:
// An array is incomplete if its element type is incomplete
// (C++ [dcl.array]p1).
// We don't handle variable arrays (they're not allowed in C++) or
// dependent-sized arrays (dependent types are never treated as incomplete).
return cast<ArrayType>(CanonicalType)->getElementType()
->isIncompleteType(Def);
case IncompleteArray:
// An array of unknown size is an incomplete type (C99 6.2.5p22).
return true;
case ObjCObject:
return cast<ObjCObjectType>(CanonicalType)->getBaseType()
->isIncompleteType(Def);
case ObjCInterface: {
// ObjC interfaces are incomplete if they are @class, not @interface.
ObjCInterfaceDecl *Interface
= cast<ObjCInterfaceType>(CanonicalType)->getDecl();
if (Def)
*Def = Interface;
return !Interface->hasDefinition();
}
}
}
bool QualType::isPODType(ASTContext &Context) const {
// C++11 has a more relaxed definition of POD.
if (Context.getLangOpts().CPlusPlus11)
return isCXX11PODType(Context);
return isCXX98PODType(Context);
}
bool QualType::isCXX98PODType(ASTContext &Context) const {
// The compiler shouldn't query this for incomplete types, but the user might.
// We return false for that case. Except for incomplete arrays of PODs, which
// are PODs according to the standard.
if (isNull())
return 0;
if ((*this)->isIncompleteArrayType())
return Context.getBaseElementType(*this).isCXX98PODType(Context);
if ((*this)->isIncompleteType())
return false;
if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Autoreleasing:
return false;
case Qualifiers::OCL_None:
break;
}
}
QualType CanonicalType = getTypePtr()->CanonicalType;
switch (CanonicalType->getTypeClass()) {
// Everything not explicitly mentioned is not POD.
default: return false;
case Type::VariableArray:
case Type::ConstantArray:
// IncompleteArray is handled above.
return Context.getBaseElementType(*this).isCXX98PODType(Context);
case Type::ObjCObjectPointer:
case Type::BlockPointer:
case Type::Builtin:
case Type::Complex:
case Type::Pointer:
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
return true;
case Type::Enum:
return true;
case Type::Record:
if (CXXRecordDecl *ClassDecl
= dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
return ClassDecl->isPOD();
// C struct/union is POD.
return true;
}
}
bool QualType::isTrivialType(ASTContext &Context) const {
// The compiler shouldn't query this for incomplete types, but the user might.
// We return false for that case. Except for incomplete arrays of PODs, which
// are PODs according to the standard.
if (isNull())
return 0;
if ((*this)->isArrayType())
return Context.getBaseElementType(*this).isTrivialType(Context);
// Return false for incomplete types after skipping any incomplete array
// types which are expressly allowed by the standard and thus our API.
if ((*this)->isIncompleteType())
return false;
if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Autoreleasing:
return false;
case Qualifiers::OCL_None:
if ((*this)->isObjCLifetimeType())
return false;
break;
}
}
QualType CanonicalType = getTypePtr()->CanonicalType;
if (CanonicalType->isDependentType())
return false;
// C++0x [basic.types]p9:
// Scalar types, trivial class types, arrays of such types, and
// cv-qualified versions of these types are collectively called trivial
// types.
// As an extension, Clang treats vector types as Scalar types.
if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
return true;
if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
if (const CXXRecordDecl *ClassDecl =
dyn_cast<CXXRecordDecl>(RT->getDecl())) {
// C++11 [class]p6:
// A trivial class is a class that has a default constructor,
// has no non-trivial default constructors, and is trivially
// copyable.
return ClassDecl->hasDefaultConstructor() &&
!ClassDecl->hasNonTrivialDefaultConstructor() &&
ClassDecl->isTriviallyCopyable();
}
return true;
}
// No other types can match.
return false;
}
bool QualType::isTriviallyCopyableType(ASTContext &Context) const {
if ((*this)->isArrayType())
return Context.getBaseElementType(*this).isTriviallyCopyableType(Context);
if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Autoreleasing:
return false;
case Qualifiers::OCL_None:
if ((*this)->isObjCLifetimeType())
return false;
break;
}
}
// C++11 [basic.types]p9
// Scalar types, trivially copyable class types, arrays of such types, and
// non-volatile const-qualified versions of these types are collectively
// called trivially copyable types.
QualType CanonicalType = getCanonicalType();
if (CanonicalType->isDependentType())
return false;
if (CanonicalType.isVolatileQualified())
return false;
// Return false for incomplete types after skipping any incomplete array types
// which are expressly allowed by the standard and thus our API.
if (CanonicalType->isIncompleteType())
return false;
// As an extension, Clang treats vector types as Scalar types.
if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
return true;
if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
if (const CXXRecordDecl *ClassDecl =
dyn_cast<CXXRecordDecl>(RT->getDecl())) {
if (!ClassDecl->isTriviallyCopyable()) return false;
}
return true;
}
// No other types can match.
return false;
}
bool Type::isLiteralType(const ASTContext &Ctx) const {
if (isDependentType())
return false;
// C++1y [basic.types]p10:
// A type is a literal type if it is:
// -- cv void; or
if (Ctx.getLangOpts().CPlusPlus14 && isVoidType())
return true;
// C++11 [basic.types]p10:
// A type is a literal type if it is:
// [...]
// -- an array of literal type other than an array of runtime bound; or
if (isVariableArrayType())
return false;
const Type *BaseTy = getBaseElementTypeUnsafe();
assert(BaseTy && "NULL element type");
// Return false for incomplete types after skipping any incomplete array
// types; those are expressly allowed by the standard and thus our API.
if (BaseTy->isIncompleteType())
return false;
// C++11 [basic.types]p10:
// A type is a literal type if it is:
// -- a scalar type; or
// As an extension, Clang treats vector types and complex types as
// literal types.
if (BaseTy->isScalarType() || BaseTy->isVectorType() ||
BaseTy->isAnyComplexType())
return true;
// -- a reference type; or
if (BaseTy->isReferenceType())
return true;
// -- a class type that has all of the following properties:
if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
// -- a trivial destructor,
// -- every constructor call and full-expression in the
// brace-or-equal-initializers for non-static data members (if any)
// is a constant expression,
// -- it is an aggregate type or has at least one constexpr
// constructor or constructor template that is not a copy or move
// constructor, and
// -- all non-static data members and base classes of literal types
//
// We resolve DR1361 by ignoring the second bullet.
if (const CXXRecordDecl *ClassDecl =
dyn_cast<CXXRecordDecl>(RT->getDecl()))
return ClassDecl->isLiteral();
return true;
}
// We treat _Atomic T as a literal type if T is a literal type.
if (const AtomicType *AT = BaseTy->getAs<AtomicType>())
return AT->getValueType()->isLiteralType(Ctx);
// If this type hasn't been deduced yet, then conservatively assume that
// it'll work out to be a literal type.
if (isa<AutoType>(BaseTy->getCanonicalTypeInternal()))
return true;
return false;
}
bool Type::isStandardLayoutType() const {
if (isDependentType())
return false;
// C++0x [basic.types]p9:
// Scalar types, standard-layout class types, arrays of such types, and
// cv-qualified versions of these types are collectively called
// standard-layout types.
const Type *BaseTy = getBaseElementTypeUnsafe();
assert(BaseTy && "NULL element type");
// Return false for incomplete types after skipping any incomplete array
// types which are expressly allowed by the standard and thus our API.
if (BaseTy->isIncompleteType())
return false;
// As an extension, Clang treats vector types as Scalar types.
if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
if (const CXXRecordDecl *ClassDecl =
dyn_cast<CXXRecordDecl>(RT->getDecl()))
if (!ClassDecl->isStandardLayout())
return false;
// Default to 'true' for non-C++ class types.
// FIXME: This is a bit dubious, but plain C structs should trivially meet
// all the requirements of standard layout classes.
return true;
}
// No other types can match.
return false;
}
// This is effectively the intersection of isTrivialType and
// isStandardLayoutType. We implement it directly to avoid redundant
// conversions from a type to a CXXRecordDecl.
bool QualType::isCXX11PODType(ASTContext &Context) const {
const Type *ty = getTypePtr();
if (ty->isDependentType())
return false;
if (Context.getLangOpts().ObjCAutoRefCount) {
switch (getObjCLifetime()) {
case Qualifiers::OCL_ExplicitNone:
return true;
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Autoreleasing:
return false;
case Qualifiers::OCL_None:
break;
}
}
// C++11 [basic.types]p9:
// Scalar types, POD classes, arrays of such types, and cv-qualified
// versions of these types are collectively called trivial types.
const Type *BaseTy = ty->getBaseElementTypeUnsafe();
assert(BaseTy && "NULL element type");
// Return false for incomplete types after skipping any incomplete array
// types which are expressly allowed by the standard and thus our API.
if (BaseTy->isIncompleteType())
return false;
// As an extension, Clang treats vector types as Scalar types.
if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
if (const CXXRecordDecl *ClassDecl =
dyn_cast<CXXRecordDecl>(RT->getDecl())) {
// C++11 [class]p10:
// A POD struct is a non-union class that is both a trivial class [...]
if (!ClassDecl->isTrivial()) return false;
// C++11 [class]p10:
// A POD struct is a non-union class that is both a trivial class and
// a standard-layout class [...]
if (!ClassDecl->isStandardLayout()) return false;
// C++11 [class]p10:
// A POD struct is a non-union class that is both a trivial class and
// a standard-layout class, and has no non-static data members of type
// non-POD struct, non-POD union (or array of such types). [...]
//
// We don't directly query the recursive aspect as the requiremets for
// both standard-layout classes and trivial classes apply recursively
// already.
}
return true;
}
// No other types can match.
return false;
}
bool Type::isPromotableIntegerType() const {
if (const BuiltinType *BT = getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Bool:
case BuiltinType::Char_S:
case BuiltinType::Char_U:
case BuiltinType::SChar:
case BuiltinType::UChar:
case BuiltinType::Short:
case BuiltinType::UShort:
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::Min12Int: // HLSL Change
case BuiltinType::LitInt: // HLSL Change
return true;
default:
return false;
}
// Enumerated types are promotable to their compatible integer types
// (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
if (const EnumType *ET = getAs<EnumType>()){
if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull()
|| ET->getDecl()->isScoped())
return false;
return true;
}
return false;
}
bool Type::isSpecifierType() const {
// Note that this intentionally does not use the canonical type.
switch (getTypeClass()) {
case Builtin:
case Record:
case Enum:
case Typedef:
case Complex:
case TypeOfExpr:
case TypeOf:
case TemplateTypeParm:
case SubstTemplateTypeParm:
case TemplateSpecialization:
case Elaborated:
case DependentName:
case DependentTemplateSpecialization:
case ObjCInterface:
case ObjCObject:
case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
return true;
default:
return false;
}
}
ElaboratedTypeKeyword
TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
switch (TypeSpec) {
default: return ETK_None;
case TST_typename: return ETK_Typename;
case TST_class: return ETK_Class;
case TST_struct: return ETK_Struct;
case TST_interface: return ETK_Interface;
case TST_union: return ETK_Union;
case TST_enum: return ETK_Enum;
}
}
TagTypeKind
TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
switch(TypeSpec) {
case TST_class: return TTK_Class;
case TST_struct: return TTK_Struct;
case TST_interface: return TTK_Interface;
case TST_union: return TTK_Union;
case TST_enum: return TTK_Enum;
}
llvm_unreachable("Type specifier is not a tag type kind.");
}
ElaboratedTypeKeyword
TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
switch (Kind) {
case TTK_Class: return ETK_Class;
case TTK_Struct: return ETK_Struct;
case TTK_Interface: return ETK_Interface;
case TTK_Union: return ETK_Union;
case TTK_Enum: return ETK_Enum;
}
llvm_unreachable("Unknown tag type kind.");
}
TagTypeKind
TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ETK_Class: return TTK_Class;
case ETK_Struct: return TTK_Struct;
case ETK_Interface: return TTK_Interface;
case ETK_Union: return TTK_Union;
case ETK_Enum: return TTK_Enum;
case ETK_None: // Fall through.
case ETK_Typename:
llvm_unreachable("Elaborated type keyword is not a tag type kind.");
}
llvm_unreachable("Unknown elaborated type keyword.");
}
bool
TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ETK_None:
case ETK_Typename:
return false;
case ETK_Class:
case ETK_Struct:
case ETK_Interface:
case ETK_Union:
case ETK_Enum:
return true;
}
llvm_unreachable("Unknown elaborated type keyword.");
}
StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
case ETK_None: return "";
case ETK_Typename: return "typename";
case ETK_Class: return "class";
case ETK_Struct: return "struct";
case ETK_Interface: return "__interface";
case ETK_Union: return "union";
case ETK_Enum: return "enum";
}
llvm_unreachable("Unknown elaborated type keyword.");
}
DependentTemplateSpecializationType::DependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS, const IdentifierInfo *Name,
unsigned NumArgs, const TemplateArgument *Args,
QualType Canon)
: TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
/*VariablyModified=*/false,
NNS && NNS->containsUnexpandedParameterPack()),
NNS(NNS), Name(Name), NumArgs(NumArgs) {
assert((!NNS || NNS->isDependent()) &&
"DependentTemplateSpecializatonType requires dependent qualifier");
for (unsigned I = 0; I != NumArgs; ++I) {
if (Args[I].containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
new (&getArgBuffer()[I]) TemplateArgument(Args[I]);
}
}
void
DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context,
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *Qualifier,
const IdentifierInfo *Name,
unsigned NumArgs,
const TemplateArgument *Args) {
ID.AddInteger(Keyword);
ID.AddPointer(Qualifier);
ID.AddPointer(Name);
for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
Args[Idx].Profile(ID, Context);
}
bool Type::isElaboratedTypeSpecifier() const {
ElaboratedTypeKeyword Keyword;
if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
Keyword = Elab->getKeyword();
else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
Keyword = DepName->getKeyword();
else if (const DependentTemplateSpecializationType *DepTST =
dyn_cast<DependentTemplateSpecializationType>(this))
Keyword = DepTST->getKeyword();
else
return false;
return TypeWithKeyword::KeywordIsTagTypeKind(Keyword);
}
const char *Type::getTypeClassName() const {
switch (TypeBits.TC) {
#define ABSTRACT_TYPE(Derived, Base)
#define TYPE(Derived, Base) case Derived: return #Derived;
#include "clang/AST/TypeNodes.def"
}
llvm_unreachable("Invalid type class.");
}
StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
switch (getKind()) {
case Void: return "void";
case Bool: return Policy.Bool ? "bool" : "_Bool";
case Char_S: return "char";
case Char_U: return "char";
case SChar: return "signed char";
case Short: return /* "short" */ "int16_t" /* HLSL Change */;
case Int: return "int";
case Long: return "long";
case LongLong: return "long long";
case Int128: return "__int128";
case UChar: return "unsigned char";
case UShort: return /* "unsigned short" */ "uint16_t" /* HLSL Change */;
case UInt: return "unsigned int";
case ULong: return "unsigned long";
case ULongLong: return "unsigned long long";
case UInt128: return "unsigned __int128";
// HLSL Changes begin
case HalfFloat:
case Half: return /*Policy.Half*/ true ? "half" : "__fp16";
// HLSL Changes end
case Float: return "float";
case Double: return "double";
case LongDouble: return "long double";
case WChar_S:
case WChar_U: return Policy.MSWChar ? "__wchar_t" : "wchar_t";
case Char16: return "char16_t";
case Char32: return "char32_t";
case NullPtr: return "nullptr_t";
case Overload: return "<overloaded function type>";
case BoundMember: return "<bound member function type>";
case PseudoObject: return "<pseudo-object type>";
case Dependent: return "<dependent type>";
case UnknownAny: return "<unknown type>";
case ARCUnbridgedCast: return "<ARC unbridged cast type>";
case BuiltinFn: return "<builtin fn type>";
case ObjCId: return "id";
case ObjCClass: return "Class";
case ObjCSel: return "SEL";
case OCLImage1d: return "image1d_t";
case OCLImage1dArray: return "image1d_array_t";
case OCLImage1dBuffer: return "image1d_buffer_t";
case OCLImage2d: return "image2d_t";
case OCLImage2dArray: return "image2d_array_t";
case OCLImage3d: return "image3d_t";
case OCLSampler: return "sampler_t";
case OCLEvent: return "event_t";
// HLSL Change Starts
case Min10Float: return "min10float";
case Min16Float: return "min16float";
case Min16Int: return "min16int";
case Min16UInt: return "min16uint";
case Min12Int: return "min12int";
case LitFloat: return "literal float";
case LitInt: return "literal int";
case Int8_4Packed: return "int8_t4_packed";
case UInt8_4Packed: return "uint8_t4_packed";
// HLSL Change Ends
}
llvm_unreachable("Invalid builtin type.");
}
QualType QualType::getNonLValueExprType(const ASTContext &Context) const {
if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
return RefType->getPointeeType();
// C++0x [basic.lval]:
// Class prvalues can have cv-qualified types; non-class prvalues always
// have cv-unqualified types.
//
// See also C99 6.3.2.1p2.
if (!Context.getLangOpts().CPlusPlus ||
(!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType()))
return getUnqualifiedType();
return *this;
}
StringRef FunctionType::getNameForCallConv(CallingConv CC) {
switch (CC) {
case CC_C: return "cdecl";
case CC_X86StdCall: return "stdcall";
case CC_X86FastCall: return "fastcall";
case CC_X86ThisCall: return "thiscall";
case CC_X86Pascal: return "pascal";
case CC_X86VectorCall: return "vectorcall";
case CC_X86_64Win64: return "ms_abi";
case CC_X86_64SysV: return "sysv_abi";
case CC_AAPCS: return "aapcs";
case CC_AAPCS_VFP: return "aapcs-vfp";
case CC_IntelOclBicc: return "intel_ocl_bicc";
case CC_SpirFunction: return "spir_function";
case CC_SpirKernel: return "spir_kernel";
}
llvm_unreachable("Invalid calling convention.");
}
FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi,
ArrayRef<hlsl::ParameterModifier> paramModifiers) // HLSL Change
: FunctionType(FunctionProto, result, canonical,
result->isDependentType(),
result->isInstantiationDependentType(),
result->isVariablyModifiedType(),
result->containsUnexpandedParameterPack(), epi.ExtInfo),
NumParams(params.size()),
NumExceptions(epi.ExceptionSpec.Exceptions.size()),
ExceptionSpecType(epi.ExceptionSpec.Type),
HasAnyConsumedParams(epi.ConsumedParameters != nullptr),
Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn) {
assert(NumParams == params.size() && "function has too many parameters");
FunctionTypeBits.TypeQuals = epi.TypeQuals;
FunctionTypeBits.RefQualifier = epi.RefQualifier;
// Fill in the trailing argument array.
QualType *argSlot = reinterpret_cast<QualType*>(this+1);
for (unsigned i = 0; i != NumParams; ++i) {
if (params[i]->isDependentType())
setDependent();
else if (params[i]->isInstantiationDependentType())
setInstantiationDependent();
if (params[i]->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
argSlot[i] = params[i];
}
// HLSL Change Starts
hlsl::ParameterModifier *paramModifierSlot =
reinterpret_cast<hlsl::ParameterModifier *>(argSlot + NumParams);
if (paramModifiers.size() == 0) {
std::fill(paramModifierSlot, paramModifierSlot + NumParams,
hlsl::ParameterModifier(hlsl::ParameterModifier::Kind::In));
}
else {
std::copy(paramModifiers.begin(), paramModifiers.end(), paramModifierSlot);
}
return;
// HLSL Change Ends
if (getExceptionSpecType() == EST_Dynamic) {
// Fill in the exception array.
QualType *exnSlot = argSlot + NumParams;
unsigned I = 0;
for (QualType ExceptionType : epi.ExceptionSpec.Exceptions) {
// Note that a dependent exception specification does *not* make
// a type dependent; it's not even part of the C++ type system.
if (ExceptionType->isInstantiationDependentType())
setInstantiationDependent();
if (ExceptionType->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
exnSlot[I++] = ExceptionType;
}
paramModifierSlot = reinterpret_cast<hlsl::ParameterModifier *>(// HLSL Change
exnSlot + epi.ExceptionSpec.Exceptions.size());
} else if (getExceptionSpecType() == EST_ComputedNoexcept) {
// Store the noexcept expression and context.
Expr **noexSlot = reinterpret_cast<Expr **>(argSlot + NumParams);
*noexSlot = epi.ExceptionSpec.NoexceptExpr;
if (epi.ExceptionSpec.NoexceptExpr) {
if (epi.ExceptionSpec.NoexceptExpr->isValueDependent() ||
epi.ExceptionSpec.NoexceptExpr->isInstantiationDependent())
setInstantiationDependent();
if (epi.ExceptionSpec.NoexceptExpr->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
}
paramModifierSlot = // HLSL Change
reinterpret_cast<hlsl::ParameterModifier *>(noexSlot + 1);
} else if (getExceptionSpecType() == EST_Uninstantiated) {
// Store the function decl from which we will resolve our
// exception specification.
FunctionDecl **slot =
reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
slot[0] = epi.ExceptionSpec.SourceDecl;
slot[1] = epi.ExceptionSpec.SourceTemplate;
// This exception specification doesn't make the type dependent, because
// it's not instantiated as part of instantiating the type.
paramModifierSlot = // HLSL Change
reinterpret_cast<hlsl::ParameterModifier *>(slot[2]);
} else if (getExceptionSpecType() == EST_Unevaluated) {
// Store the function decl from which we will resolve our
// exception specification.
FunctionDecl **slot =
reinterpret_cast<FunctionDecl **>(argSlot + NumParams);
slot[0] = epi.ExceptionSpec.SourceDecl;
}
if (epi.ConsumedParameters) {
bool *consumedParams = const_cast<bool *>(getConsumedParamsBuffer());
for (unsigned i = 0; i != NumParams; ++i)
consumedParams[i] = epi.ConsumedParameters[i];
}
}
bool FunctionProtoType::hasDependentExceptionSpec() const {
if (Expr *NE = getNoexceptExpr())
return NE->isValueDependent();
for (QualType ET : exceptions())
// A pack expansion with a non-dependent pattern is still dependent,
// because we don't know whether the pattern is in the exception spec
// or not (that depends on whether the pack has 0 expansions).
if (ET->isDependentType() || ET->getAs<PackExpansionType>())
return true;
return false;
}
FunctionProtoType::NoexceptResult
FunctionProtoType::getNoexceptSpec(const ASTContext &ctx) const {
ExceptionSpecificationType est = getExceptionSpecType();
if (est == EST_BasicNoexcept)
return NR_Nothrow;
if (est != EST_ComputedNoexcept)
return NR_NoNoexcept;
Expr *noexceptExpr = getNoexceptExpr();
if (!noexceptExpr)
return NR_BadNoexcept;
if (noexceptExpr->isValueDependent())
return NR_Dependent;
llvm::APSInt value;
bool isICE = noexceptExpr->isIntegerConstantExpr(value, ctx, nullptr,
/*evaluated*/false);
(void)isICE;
assert(isICE && "AST should not contain bad noexcept expressions.");
return value.getBoolValue() ? NR_Nothrow : NR_Throw;
}
bool FunctionProtoType::isNothrow(const ASTContext &Ctx,
bool ResultIfDependent) const {
ExceptionSpecificationType EST = getExceptionSpecType();
assert(EST != EST_Unevaluated && EST != EST_Uninstantiated);
if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
return true;
if (EST == EST_Dynamic && ResultIfDependent) {
// A dynamic exception specification is throwing unless every exception
// type is an (unexpanded) pack expansion type.
for (unsigned I = 0, N = NumExceptions; I != N; ++I)
if (!getExceptionType(I)->getAs<PackExpansionType>())
return false;
return ResultIfDependent;
}
if (EST != EST_ComputedNoexcept)
return false;
NoexceptResult NR = getNoexceptSpec(Ctx);
if (NR == NR_Dependent)
return ResultIfDependent;
return NR == NR_Nothrow;
}
bool FunctionProtoType::isTemplateVariadic() const {
for (unsigned ArgIdx = getNumParams(); ArgIdx; --ArgIdx)
if (isa<PackExpansionType>(getParamType(ArgIdx - 1)))
return true;
return false;
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
const QualType *ArgTys, unsigned NumParams,
const ExtProtoInfo &epi,
ArrayRef<hlsl::ParameterModifier> ParamMods, // HLSL Change
const ASTContext &Context) {
// We have to be careful not to get ambiguous profile encodings.
// Note that valid type pointers are never ambiguous with anything else.
//
// The encoding grammar begins:
// type type* bool int bool
// If that final bool is true, then there is a section for the EH spec:
// bool type*
// This is followed by an optional "consumed argument" section of the
// same length as the first type sequence:
// bool*
// Finally, we have the ext info and trailing return type flag:
// int bool
//
// There is no ambiguity between the consumed arguments and an empty EH
// spec because of the leading 'bool' which unambiguously indicates
// whether the following bool is the EH spec or part of the arguments.
ID.AddPointer(Result.getAsOpaquePtr());
for (unsigned i = 0; i != NumParams; ++i) {
ID.AddPointer(ArgTys[i].getAsOpaquePtr());
// HLSL Change Starts
if (ParamMods.size() != 0)
ID.AddInteger(ParamMods[i].getAsUnsigned());
else
ID.AddInteger(hlsl::ParameterModifier().getAsUnsigned());
// HLSL Change Ends
}
// This method is relatively performance sensitive, so as a performance
// shortcut, use one AddInteger call instead of four for the next four
// fields.
assert(!(unsigned(epi.Variadic) & ~1) &&
!(unsigned(epi.TypeQuals) & ~255) &&
!(unsigned(epi.RefQualifier) & ~3) &&
!(unsigned(epi.ExceptionSpec.Type) & ~15) &&
"Values larger than expected.");
ID.AddInteger(unsigned(epi.Variadic) +
(epi.TypeQuals << 1) +
(epi.RefQualifier << 9) +
(epi.ExceptionSpec.Type << 11));
if (epi.ExceptionSpec.Type == EST_Dynamic) {
for (QualType Ex : epi.ExceptionSpec.Exceptions)
ID.AddPointer(Ex.getAsOpaquePtr());
} else if (epi.ExceptionSpec.Type == EST_ComputedNoexcept &&
epi.ExceptionSpec.NoexceptExpr) {
epi.ExceptionSpec.NoexceptExpr->Profile(ID, Context, false);
} else if (epi.ExceptionSpec.Type == EST_Uninstantiated ||
epi.ExceptionSpec.Type == EST_Unevaluated) {
ID.AddPointer(epi.ExceptionSpec.SourceDecl->getCanonicalDecl());
}
if (epi.ConsumedParameters) {
for (unsigned i = 0; i != NumParams; ++i)
ID.AddBoolean(epi.ConsumedParameters[i]);
}
epi.ExtInfo.Profile(ID);
ID.AddBoolean(epi.HasTrailingReturn);
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Ctx) {
Profile(ID, getReturnType(), param_type_begin(), NumParams, getExtProtoInfo(), getParamMods(), // HLSL Change
Ctx);
}
QualType TypedefType::desugar() const {
return getDecl()->getUnderlyingType();
}
TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
: Type(TypeOfExpr, can, E->isTypeDependent(),
E->isInstantiationDependent(),
E->getType()->isVariablyModifiedType(),
E->containsUnexpandedParameterPack()),
TOExpr(E) {
}
bool TypeOfExprType::isSugared() const {
return !TOExpr->isTypeDependent();
}
QualType TypeOfExprType::desugar() const {
if (isSugared())
return getUnderlyingExpr()->getType();
return QualType(this, 0);
}
void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context, Expr *E) {
E->Profile(ID, Context, true);
}
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
// C++11 [temp.type]p2: "If an expression e involves a template parameter,
// decltype(e) denotes a unique dependent type." Hence a decltype type is
// type-dependent even if its expression is only instantiation-dependent.
: Type(Decltype, can, E->isInstantiationDependent(),
E->isInstantiationDependent(),
E->getType()->isVariablyModifiedType(),
E->containsUnexpandedParameterPack()),
E(E),
UnderlyingType(underlyingType) {
}
bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); }
QualType DecltypeType::desugar() const {
if (isSugared())
return getUnderlyingType();
return QualType(this, 0);
}
DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E)
: DecltypeType(E, Context.DependentTy), Context(Context) { }
void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context, Expr *E) {
E->Profile(ID, Context, true);
}
TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
: Type(TC, can, D->isDependentType(),
/*InstantiationDependent=*/D->isDependentType(),
/*VariablyModified=*/false,
/*ContainsUnexpandedParameterPack=*/false),
decl(const_cast<TagDecl*>(D)) {}
static TagDecl *getInterestingTagDecl(TagDecl *decl) {
for (auto I : decl->redecls()) {
if (I->isCompleteDefinition() || I->isBeingDefined())
return I;
}
// If there's no definition (not even in progress), return what we have.
return decl;
}
UnaryTransformType::UnaryTransformType(QualType BaseType,
QualType UnderlyingType,
UTTKind UKind,
QualType CanonicalType)
: Type(UnaryTransform, CanonicalType, UnderlyingType->isDependentType(),
UnderlyingType->isInstantiationDependentType(),
UnderlyingType->isVariablyModifiedType(),
BaseType->containsUnexpandedParameterPack())
, BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind)
{}
TagDecl *TagType::getDecl() const {
return getInterestingTagDecl(decl);
}
bool TagType::isBeingDefined() const {
return getDecl()->isBeingDefined();
}
bool AttributedType::isMSTypeSpec() const {
switch (getAttrKind()) {
default: return false;
case attr_ptr32:
case attr_ptr64:
case attr_sptr:
case attr_uptr:
return true;
}
llvm_unreachable("invalid attr kind");
}
// HLSL Change Starts
bool AttributedType::isHLSLTypeSpec() const {
switch (getAttrKind()) {
default: return false;
case attr_hlsl_row_major:
case attr_hlsl_column_major:
case attr_hlsl_snorm:
case attr_hlsl_unorm:
case attr_hlsl_globallycoherent:
return true;
}
llvm_unreachable("invalid attr kind");
}
// HLSL Change Ends
bool AttributedType::isCallingConv() const {
switch (getAttrKind()) {
case attr_ptr32:
case attr_ptr64:
case attr_sptr:
case attr_uptr:
case attr_address_space:
case attr_regparm:
case attr_vector_size:
case attr_neon_vector_type:
case attr_neon_polyvector_type:
case attr_objc_gc:
case attr_objc_ownership:
case attr_noreturn:
case attr_nonnull:
case attr_nullable:
case attr_null_unspecified:
case attr_objc_kindof:
// HLSL Change Starts
case attr_hlsl_row_major:
case attr_hlsl_column_major:
case attr_hlsl_snorm:
case attr_hlsl_unorm:
case attr_hlsl_globallycoherent:
// HLSL Change Ends
return false;
case attr_pcs:
case attr_pcs_vfp:
case attr_cdecl:
case attr_fastcall:
case attr_stdcall:
case attr_thiscall:
case attr_vectorcall:
case attr_pascal:
case attr_ms_abi:
case attr_sysv_abi:
case attr_inteloclbicc:
return true;
}
llvm_unreachable("invalid attr kind");
}
CXXRecordDecl *InjectedClassNameType::getDecl() const {
return cast<CXXRecordDecl>(getInterestingTagDecl(Decl));
}
IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier();
}
SubstTemplateTypeParmPackType::
SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
QualType Canon,
const TemplateArgument &ArgPack)
: Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
Replaced(Param),
Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size())
{
}
TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
return TemplateArgument(Arguments, NumArguments);
}
void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getReplacedParameter(), getArgumentPack());
}
void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
const TemplateTypeParmType *Replaced,
const TemplateArgument &ArgPack) {
ID.AddPointer(Replaced);
ID.AddInteger(ArgPack.pack_size());
for (const auto &P : ArgPack.pack_elements())
ID.AddPointer(P.getAsType().getAsOpaquePtr());
}
bool TemplateSpecializationType::
anyDependentTemplateArguments(const TemplateArgumentListInfo &Args,
bool &InstantiationDependent) {
return anyDependentTemplateArguments(Args.getArgumentArray(), Args.size(),
InstantiationDependent);
}
bool TemplateSpecializationType::
anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N,
bool &InstantiationDependent) {
for (unsigned i = 0; i != N; ++i) {
if (Args[i].getArgument().isDependent()) {
InstantiationDependent = true;
return true;
}
if (Args[i].getArgument().isInstantiationDependent())
InstantiationDependent = true;
}
return false;
}
TemplateSpecializationType::
TemplateSpecializationType(TemplateName T,
const TemplateArgument *Args, unsigned NumArgs,
QualType Canon, QualType AliasedType)
: Type(TemplateSpecialization,
Canon.isNull()? QualType(this, 0) : Canon,
Canon.isNull()? true : Canon->isDependentType(),
Canon.isNull()? true : Canon->isInstantiationDependentType(),
false,
T.containsUnexpandedParameterPack()),
Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) {
assert(!T.getAsDependentTemplateName() &&
"Use DependentTemplateSpecializationType for dependent template-name");
assert((T.getKind() == TemplateName::Template ||
T.getKind() == TemplateName::SubstTemplateTemplateParm ||
T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
"Unexpected template name for TemplateSpecializationType");
TemplateArgument *TemplateArgs
= reinterpret_cast<TemplateArgument *>(this + 1);
for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
// Update instantiation-dependent and variably-modified bits.
// If the canonical type exists and is non-dependent, the template
// specialization type can be non-dependent even if one of the type
// arguments is. Given:
// template<typename T> using U = int;
// U<T> is always non-dependent, irrespective of the type T.
// However, U<Ts> contains an unexpanded parameter pack, even though
// its expansion (and thus its desugared type) doesn't.
if (Args[Arg].isInstantiationDependent())
setInstantiationDependent();
if (Args[Arg].getKind() == TemplateArgument::Type &&
Args[Arg].getAsType()->isVariablyModifiedType())
setVariablyModified();
if (Args[Arg].containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
}
// Store the aliased type if this is a type alias template specialization.
if (TypeAlias) {
TemplateArgument *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
*reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
}
}
void
TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs,
const ASTContext &Context) {
T.Profile(ID);
for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
Args[Idx].Profile(ID, Context);
}
QualType
QualifierCollector::apply(const ASTContext &Context, QualType QT) const {
if (!hasNonFastQualifiers())
return QT.withFastQualifiers(getFastQualifiers());
return Context.getQualifiedType(QT, *this);
}
QualType
QualifierCollector::apply(const ASTContext &Context, const Type *T) const {
if (!hasNonFastQualifiers())
return QualType(T, getFastQualifiers());
return Context.getQualifiedType(T, *this);
}
void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
QualType BaseType,
ArrayRef<QualType> typeArgs,
ArrayRef<ObjCProtocolDecl *> protocols,
bool isKindOf) {
ID.AddPointer(BaseType.getAsOpaquePtr());
ID.AddInteger(typeArgs.size());
for (auto typeArg : typeArgs)
ID.AddPointer(typeArg.getAsOpaquePtr());
ID.AddInteger(protocols.size());
for (auto proto : protocols)
ID.AddPointer(proto);
ID.AddBoolean(isKindOf);
}
void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getBaseType(), getTypeArgsAsWritten(),
llvm::makeArrayRef(qual_begin(), getNumProtocols()),
isKindOfTypeAsWritten());
}
namespace {
/// \brief The cached properties of a type.
class CachedProperties {
Linkage L;
bool local;
public:
CachedProperties(Linkage L, bool local) : L(L), local(local) {}
Linkage getLinkage() const { return L; }
bool hasLocalOrUnnamedType() const { return local; }
friend CachedProperties merge(CachedProperties L, CachedProperties R) {
Linkage MergedLinkage = minLinkage(L.L, R.L);
return CachedProperties(MergedLinkage,
L.hasLocalOrUnnamedType() || R.hasLocalOrUnnamedType());
}
};
}
static CachedProperties computeCachedProperties(const Type *T);
namespace clang {
/// The type-property cache. This is templated so as to be
/// instantiated at an internal type to prevent unnecessary symbol
/// leakage.
template <class Private> class TypePropertyCache {
public:
static CachedProperties get(QualType T) {
return get(T.getTypePtr());
}
static CachedProperties get(const Type *T) {
ensure(T);
return CachedProperties(T->TypeBits.getLinkage(),
T->TypeBits.hasLocalOrUnnamedType());
}
static void ensure(const Type *T) {
// If the cache is valid, we're okay.
if (T->TypeBits.isCacheValid()) return;
// If this type is non-canonical, ask its canonical type for the
// relevant information.
if (!T->isCanonicalUnqualified()) {
const Type *CT = T->getCanonicalTypeInternal().getTypePtr();
ensure(CT);
T->TypeBits.CacheValid = true;
T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage;
T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed;
return;
}
// Compute the cached properties and then set the cache.
CachedProperties Result = computeCachedProperties(T);
T->TypeBits.CacheValid = true;
T->TypeBits.CachedLinkage = Result.getLinkage();
T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType();
}
};
}
// Instantiate the friend template at a private class. In a
// reasonable implementation, these symbols will be internal.
// It is terrible that this is the best way to accomplish this.
namespace { class Private {}; }
typedef TypePropertyCache<Private> Cache;
static CachedProperties computeCachedProperties(const Type *T) {
switch (T->getTypeClass()) {
#define TYPE(Class,Base)
#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("didn't expect a non-canonical type here");
#define TYPE(Class,Base)
#define DEPENDENT_TYPE(Class,Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
// Treat instantiation-dependent types as external.
assert(T->isInstantiationDependentType());
return CachedProperties(ExternalLinkage, false);
case Type::Auto:
// Give non-deduced 'auto' types external linkage. We should only see them
// here in error recovery.
return CachedProperties(ExternalLinkage, false);
case Type::Builtin:
// C++ [basic.link]p8:
// A type is said to have linkage if and only if:
// - it is a fundamental type (3.9.1); or
return CachedProperties(ExternalLinkage, false);
case Type::Record:
case Type::Enum: {
const TagDecl *Tag = cast<TagType>(T)->getDecl();
// C++ [basic.link]p8:
// - it is a class or enumeration type that is named (or has a name
// for linkage purposes (7.1.3)) and the name has linkage; or
// - it is a specialization of a class template (14); or
Linkage L = Tag->getLinkageInternal();
bool IsLocalOrUnnamed =
Tag->getDeclContext()->isFunctionOrMethod() ||
!Tag->hasNameForLinkage();
return CachedProperties(L, IsLocalOrUnnamed);
}
// C++ [basic.link]p8:
// - it is a compound type (3.9.2) other than a class or enumeration,
// compounded exclusively from types that have linkage; or
case Type::Complex:
return Cache::get(cast<ComplexType>(T)->getElementType());
case Type::Pointer:
return Cache::get(cast<PointerType>(T)->getPointeeType());
case Type::BlockPointer:
return Cache::get(cast<BlockPointerType>(T)->getPointeeType());
case Type::LValueReference:
case Type::RValueReference:
return Cache::get(cast<ReferenceType>(T)->getPointeeType());
case Type::MemberPointer: {
const MemberPointerType *MPT = cast<MemberPointerType>(T);
return merge(Cache::get(MPT->getClass()),
Cache::get(MPT->getPointeeType()));
}
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
return Cache::get(cast<ArrayType>(T)->getElementType());
case Type::Vector:
case Type::ExtVector:
return Cache::get(cast<VectorType>(T)->getElementType());
case Type::FunctionNoProto:
return Cache::get(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
CachedProperties result = Cache::get(FPT->getReturnType());
for (const auto &ai : FPT->param_types())
result = merge(result, Cache::get(ai));
return result;
}
case Type::ObjCInterface: {
Linkage L = cast<ObjCInterfaceType>(T)->getDecl()->getLinkageInternal();
return CachedProperties(L, false);
}
case Type::ObjCObject:
return Cache::get(cast<ObjCObjectType>(T)->getBaseType());
case Type::ObjCObjectPointer:
return Cache::get(cast<ObjCObjectPointerType>(T)->getPointeeType());
case Type::Atomic:
return Cache::get(cast<AtomicType>(T)->getValueType());
}
llvm_unreachable("unhandled type class");
}
/// \brief Determine the linkage of this type.
Linkage Type::getLinkage() const {
Cache::ensure(this);
return TypeBits.getLinkage();
}
bool Type::hasUnnamedOrLocalType() const {
Cache::ensure(this);
return TypeBits.hasLocalOrUnnamedType();
}
static LinkageInfo computeLinkageInfo(QualType T);
static LinkageInfo computeLinkageInfo(const Type *T) {
switch (T->getTypeClass()) {
#define TYPE(Class,Base)
#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("didn't expect a non-canonical type here");
#define TYPE(Class,Base)
#define DEPENDENT_TYPE(Class,Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
// Treat instantiation-dependent types as external.
assert(T->isInstantiationDependentType());
return LinkageInfo::external();
case Type::Builtin:
return LinkageInfo::external();
case Type::Auto:
return LinkageInfo::external();
case Type::Record:
case Type::Enum:
return cast<TagType>(T)->getDecl()->getLinkageAndVisibility();
case Type::Complex:
return computeLinkageInfo(cast<ComplexType>(T)->getElementType());
case Type::Pointer:
return computeLinkageInfo(cast<PointerType>(T)->getPointeeType());
case Type::BlockPointer:
return computeLinkageInfo(cast<BlockPointerType>(T)->getPointeeType());
case Type::LValueReference:
case Type::RValueReference:
return computeLinkageInfo(cast<ReferenceType>(T)->getPointeeType());
case Type::MemberPointer: {
const MemberPointerType *MPT = cast<MemberPointerType>(T);
LinkageInfo LV = computeLinkageInfo(MPT->getClass());
LV.merge(computeLinkageInfo(MPT->getPointeeType()));
return LV;
}
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
return computeLinkageInfo(cast<ArrayType>(T)->getElementType());
case Type::Vector:
case Type::ExtVector:
return computeLinkageInfo(cast<VectorType>(T)->getElementType());
case Type::FunctionNoProto:
return computeLinkageInfo(cast<FunctionType>(T)->getReturnType());
case Type::FunctionProto: {
const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
LinkageInfo LV = computeLinkageInfo(FPT->getReturnType());
for (const auto &ai : FPT->param_types())
LV.merge(computeLinkageInfo(ai));
return LV;
}
case Type::ObjCInterface:
return cast<ObjCInterfaceType>(T)->getDecl()->getLinkageAndVisibility();
case Type::ObjCObject:
return computeLinkageInfo(cast<ObjCObjectType>(T)->getBaseType());
case Type::ObjCObjectPointer:
return computeLinkageInfo(cast<ObjCObjectPointerType>(T)->getPointeeType());
case Type::Atomic:
return computeLinkageInfo(cast<AtomicType>(T)->getValueType());
}
llvm_unreachable("unhandled type class");
}
static LinkageInfo computeLinkageInfo(QualType T) {
return computeLinkageInfo(T.getTypePtr());
}
bool Type::isLinkageValid() const {
if (!TypeBits.isCacheValid())
return true;
return computeLinkageInfo(getCanonicalTypeInternal()).getLinkage() ==
TypeBits.getLinkage();
}
LinkageInfo Type::getLinkageAndVisibility() const {
if (!isCanonicalUnqualified())
return computeLinkageInfo(getCanonicalTypeInternal());
LinkageInfo LV = computeLinkageInfo(this);
assert(LV.getLinkage() == getLinkage());
return LV;
}
Optional<NullabilityKind> Type::getNullability(const ASTContext &context) const {
QualType type(this, 0);
do {
// Check whether this is an attributed type with nullability
// information.
if (auto attributed = dyn_cast<AttributedType>(type.getTypePtr())) {
if (auto nullability = attributed->getImmediateNullability())
return nullability;
}
// Desugar the type. If desugaring does nothing, we're done.
QualType desugared = type.getSingleStepDesugaredType(context);
if (desugared.getTypePtr() == type.getTypePtr())
return None;
type = desugared;
} while (true);
}
bool Type::canHaveNullability() const {
QualType type = getCanonicalTypeInternal();
switch (type->getTypeClass()) {
// We'll only see canonical types here.
#define NON_CANONICAL_TYPE(Class, Parent) \
case Type::Class: \
llvm_unreachable("non-canonical type");
#define TYPE(Class, Parent)
#include "clang/AST/TypeNodes.def"
// Pointer types.
case Type::Pointer:
case Type::BlockPointer:
case Type::MemberPointer:
case Type::ObjCObjectPointer:
return true;
// Dependent types that could instantiate to pointer types.
case Type::UnresolvedUsing:
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
case Type::UnaryTransform:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
case Type::DependentName:
case Type::DependentTemplateSpecialization:
return true;
// Dependent template specializations can instantiate to pointer
// types unless they're known to be specializations of a class
// template.
case Type::TemplateSpecialization:
if (TemplateDecl *templateDecl
= cast<TemplateSpecializationType>(type.getTypePtr())
->getTemplateName().getAsTemplateDecl()) {
if (isa<ClassTemplateDecl>(templateDecl))
return false;
}
return true;
// auto is considered dependent when it isn't deduced.
case Type::Auto:
return !cast<AutoType>(type.getTypePtr())->isDeduced();
case Type::Builtin:
switch (cast<BuiltinType>(type.getTypePtr())->getKind()) {
// Signed, unsigned, and floating-point types cannot have nullability.
#define SIGNED_TYPE(Id, SingletonId) case BuiltinType::Id: LLVM_C_FALLTHROUGH;
#define UNSIGNED_TYPE(Id, SingletonId) case BuiltinType::Id: LLVM_C_FALLTHROUGH;
#define FLOATING_TYPE(Id, SingletonId) case BuiltinType::Id:
#define BUILTIN_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
return false;
// Dependent types that could instantiate to a pointer type.
case BuiltinType::Dependent:
case BuiltinType::Overload:
case BuiltinType::BoundMember:
case BuiltinType::PseudoObject:
case BuiltinType::UnknownAny:
case BuiltinType::ARCUnbridgedCast:
return true;
case BuiltinType::Void:
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
case BuiltinType::OCLImage1d:
case BuiltinType::OCLImage1dArray:
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
return false;
}
// Non-pointer types.
case Type::Complex:
case Type::LValueReference:
case Type::RValueReference:
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Record:
case Type::Enum:
case Type::InjectedClassName:
case Type::PackExpansion:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::Atomic:
return false;
}
llvm_unreachable("bad type kind!");
}
llvm::Optional<NullabilityKind> AttributedType::getImmediateNullability() const {
if (getAttrKind() == AttributedType::attr_nonnull)
return NullabilityKind::NonNull;
if (getAttrKind() == AttributedType::attr_nullable)
return NullabilityKind::Nullable;
if (getAttrKind() == AttributedType::attr_null_unspecified)
return NullabilityKind::Unspecified;
return None;
}
Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) {
if (auto attributed = dyn_cast<AttributedType>(T.getTypePtr())) {
if (auto nullability = attributed->getImmediateNullability()) {
T = attributed->getModifiedType();
return nullability;
}
}
return None;
}
bool Type::isBlockCompatibleObjCPointerType(ASTContext &ctx) const {
const ObjCObjectPointerType *objcPtr = getAs<ObjCObjectPointerType>();
if (!objcPtr)
return false;
if (objcPtr->isObjCIdType()) {
// id is always okay.
return true;
}
// Blocks are NSObjects.
if (ObjCInterfaceDecl *iface = objcPtr->getInterfaceDecl()) {
if (iface->getIdentifier() != ctx.getNSObjectName())
return false;
// Continue to check qualifiers, below.
} else if (objcPtr->isObjCQualifiedIdType()) {
// Continue to check qualifiers, below.
} else {
return false;
}
// Check protocol qualifiers.
for (ObjCProtocolDecl *proto : objcPtr->quals()) {
// Blocks conform to NSObject and NSCopying.
if (proto->getIdentifier() != ctx.getNSObjectName() &&
proto->getIdentifier() != ctx.getNSCopyingName())
return false;
}
return true;
}
Qualifiers::ObjCLifetime Type::getObjCARCImplicitLifetime() const {
if (isObjCARCImplicitlyUnretainedType())
return Qualifiers::OCL_ExplicitNone;
return Qualifiers::OCL_Strong;
}
bool Type::isObjCARCImplicitlyUnretainedType() const {
assert(isObjCLifetimeType() &&
"cannot query implicit lifetime for non-inferrable type");
const Type *canon = getCanonicalTypeInternal().getTypePtr();
// Walk down to the base type. We don't care about qualifiers for this.
while (const ArrayType *array = dyn_cast<ArrayType>(canon))
canon = array->getElementType().getTypePtr();
if (const ObjCObjectPointerType *opt
= dyn_cast<ObjCObjectPointerType>(canon)) {
// Class and Class<Protocol> don't require retension.
if (opt->getObjectType()->isObjCClass())
return true;
}
return false;
}
bool Type::isObjCNSObjectType() const {
if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
return false;
}
bool Type::isObjCIndependentClassType() const {
if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
return typedefType->getDecl()->hasAttr<ObjCIndependentClassAttr>();
return false;
}
bool Type::isObjCRetainableType() const {
return isObjCObjectPointerType() ||
isBlockPointerType() ||
isObjCNSObjectType();
}
bool Type::isObjCIndirectLifetimeType() const {
if (isObjCLifetimeType())
return true;
if (const PointerType *OPT = getAs<PointerType>())
return OPT->getPointeeType()->isObjCIndirectLifetimeType();
if (const ReferenceType *Ref = getAs<ReferenceType>())
return Ref->getPointeeType()->isObjCIndirectLifetimeType();
if (const MemberPointerType *MemPtr = getAs<MemberPointerType>())
return MemPtr->getPointeeType()->isObjCIndirectLifetimeType();
return false;
}
/// Returns true if objects of this type have lifetime semantics under
/// ARC.
bool Type::isObjCLifetimeType() const {
const Type *type = this;
while (const ArrayType *array = type->getAsArrayTypeUnsafe())
type = array->getElementType().getTypePtr();
return type->isObjCRetainableType();
}
/// \brief Determine whether the given type T is a "bridgable" Objective-C type,
/// which is either an Objective-C object pointer type or an
bool Type::isObjCARCBridgableType() const {
return isObjCObjectPointerType() || isBlockPointerType();
}
/// \brief Determine whether the given type T is a "bridgeable" C type.
bool Type::isCARCBridgableType() const {
const PointerType *Pointer = getAs<PointerType>();
if (!Pointer)
return false;
QualType Pointee = Pointer->getPointeeType();
return Pointee->isVoidType() || Pointee->isRecordType();
}
bool Type::hasSizedVLAType() const {
if (!isVariablyModifiedType()) return false;
if (const PointerType *ptr = getAs<PointerType>())
return ptr->getPointeeType()->hasSizedVLAType();
if (const ReferenceType *ref = getAs<ReferenceType>())
return ref->getPointeeType()->hasSizedVLAType();
if (const ArrayType *arr = getAsArrayTypeUnsafe()) {
if (isa<VariableArrayType>(arr) &&
cast<VariableArrayType>(arr)->getSizeExpr())
return true;
return arr->getElementType()->hasSizedVLAType();
}
return false;
}
QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
switch (type.getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
break;
case Qualifiers::OCL_Strong:
return DK_objc_strong_lifetime;
case Qualifiers::OCL_Weak:
return DK_objc_weak_lifetime;
}
/// Currently, the only destruction kind we recognize is C++ objects
/// with non-trivial destructors.
const CXXRecordDecl *record =
type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
if (record && record->hasDefinition() && !record->hasTrivialDestructor())
return DK_cxx_destructor;
return DK_none;
}
CXXRecordDecl *MemberPointerType::getMostRecentCXXRecordDecl() const {
return getClass()->getAsCXXRecordDecl()->getMostRecentDecl();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ASTContextHLSL.cpp | //===--- ASTContextHLSL.cpp - HLSL support for AST nodes and operations ---===//
///////////////////////////////////////////////////////////////////////////////
// //
// ASTContextHLSL.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This file implements the ASTContext interface for HLSL. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "dxc/DXIL/DxilSemantic.h"
#include "dxc/HLSL/HLOperations.h"
#include "dxc/HlslIntrinsicOp.h"
#include "dxc/Support/Global.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/HlslBuiltinTypeDeclBuilder.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
using namespace clang;
using namespace hlsl;
static const int FirstTemplateDepth = 0;
static const int FirstParamPosition = 0;
static const bool ForConstFalse =
false; // a construct is targeting a const type
static const bool ForConstTrue =
true; // a construct is targeting a non-const type
static const bool ParameterPackFalse =
false; // template parameter is not an ellipsis.
static const bool TypenameFalse =
false; // 'typename' specified rather than 'class' for a template argument.
static const bool DelayTypeCreationTrue =
true; // delay type creation for a declaration
static const SourceLocation NoLoc; // no source location attribution available
static const bool InlineFalse = false; // namespace is not an inline namespace
static const bool InlineSpecifiedFalse =
false; // function was not specified as inline
static const bool ExplicitFalse =
false; // constructor was not specified as explicit
static const bool IsConstexprFalse = false; // function is not constexpr
static const bool VirtualFalse =
false; // whether the base class is declares 'virtual'
static const bool BaseClassFalse =
false; // whether the base class is declared as 'class' (vs. 'struct')
/// <summary>Names of HLSLScalarType enumeration values, in matching order to
/// HLSLScalarType.</summary>
const char *HLSLScalarTypeNames[] = {
"<unknown>", "bool", "int", "uint",
"dword", "half", "float", "double",
"min10float", "min16float", "min12int", "min16int",
"min16uint", "literal float", "literal int", "int16_t",
"int32_t", "int64_t", "uint16_t", "uint32_t",
"uint64_t", "float16_t", "float32_t", "float64_t",
"int8_t4_packed", "uint8_t4_packed"};
static_assert(HLSLScalarTypeCount == _countof(HLSLScalarTypeNames),
"otherwise scalar constants are not aligned");
static HLSLScalarType FindScalarTypeByName(const char *typeName,
const size_t typeLen,
const LangOptions &langOptions) {
// skipped HLSLScalarType: unknown, literal int, literal float
switch (typeLen) {
case 3: // int
if (typeName[0] == 'i') {
if (strncmp(typeName, "int", 3))
break;
return HLSLScalarType_int;
}
break;
case 4: // bool, uint, half
if (typeName[0] == 'b') {
if (strncmp(typeName, "bool", 4))
break;
return HLSLScalarType_bool;
} else if (typeName[0] == 'u') {
if (strncmp(typeName, "uint", 4))
break;
return HLSLScalarType_uint;
} else if (typeName[0] == 'h') {
if (strncmp(typeName, "half", 4))
break;
return HLSLScalarType_half;
}
break;
case 5: // dword, float
if (typeName[0] == 'd') {
if (strncmp(typeName, "dword", 5))
break;
return HLSLScalarType_dword;
} else if (typeName[0] == 'f') {
if (strncmp(typeName, "float", 5))
break;
return HLSLScalarType_float;
}
break;
case 6: // double
if (typeName[0] == 'd') {
if (strncmp(typeName, "double", 6))
break;
return HLSLScalarType_double;
}
break;
case 7: // int64_t
if (typeName[0] == 'i' && typeName[1] == 'n') {
if (typeName[3] == '6') {
if (strncmp(typeName, "int64_t", 7))
break;
return HLSLScalarType_int64;
}
}
break;
case 8: // min12int, min16int, uint64_t
if (typeName[0] == 'm' && typeName[1] == 'i') {
if (typeName[4] == '2') {
if (strncmp(typeName, "min12int", 8))
break;
return HLSLScalarType_int_min12;
} else if (typeName[4] == '6') {
if (strncmp(typeName, "min16int", 8))
break;
return HLSLScalarType_int_min16;
}
} else if (typeName[0] == 'u' && typeName[1] == 'i') {
if (typeName[4] == '6') {
if (strncmp(typeName, "uint64_t", 8))
break;
return HLSLScalarType_uint64;
}
}
break;
case 9: // min16uint
if (typeName[0] == 'm' && typeName[1] == 'i') {
if (strncmp(typeName, "min16uint", 9))
break;
return HLSLScalarType_uint_min16;
}
break;
case 10: // min10float, min16float
if (typeName[0] == 'm' && typeName[1] == 'i') {
if (typeName[4] == '0') {
if (strncmp(typeName, "min10float", 10))
break;
return HLSLScalarType_float_min10;
}
if (typeName[4] == '6') {
if (strncmp(typeName, "min16float", 10))
break;
return HLSLScalarType_float_min16;
}
}
break;
case 14: // int8_t4_packed
if (typeName[0] == 'i' && typeName[1] == 'n') {
if (strncmp(typeName, "int8_t4_packed", 14))
break;
return HLSLScalarType_int8_4packed;
}
break;
case 15: // uint8_t4_packed
if (typeName[0] == 'u' && typeName[1] == 'i') {
if (strncmp(typeName, "uint8_t4_packed", 15))
break;
return HLSLScalarType_uint8_4packed;
}
break;
default:
break;
}
// fixed width types (int16_t, uint16_t, int32_t, uint32_t, float16_t,
// float32_t, float64_t) are only supported in HLSL 2018
if (langOptions.HLSLVersion >= hlsl::LangStd::v2018) {
switch (typeLen) {
case 7: // int16_t, int32_t
if (typeName[0] == 'i' && typeName[1] == 'n') {
if (!langOptions.UseMinPrecision) {
if (typeName[3] == '1') {
if (strncmp(typeName, "int16_t", 7))
break;
return HLSLScalarType_int16;
}
}
if (typeName[3] == '3') {
if (strncmp(typeName, "int32_t", 7))
break;
return HLSLScalarType_int32;
}
}
break;
case 8: // uint16_t, uint32_t
if (!langOptions.UseMinPrecision) {
if (typeName[0] == 'u' && typeName[1] == 'i') {
if (typeName[4] == '1') {
if (strncmp(typeName, "uint16_t", 8))
break;
return HLSLScalarType_uint16;
}
}
}
if (typeName[4] == '3') {
if (strncmp(typeName, "uint32_t", 8))
break;
return HLSLScalarType_uint32;
}
break;
case 9: // float16_t, float32_t, float64_t
if (typeName[0] == 'f' && typeName[1] == 'l') {
if (!langOptions.UseMinPrecision) {
if (typeName[5] == '1') {
if (strncmp(typeName, "float16_t", 9))
break;
return HLSLScalarType_float16;
}
}
if (typeName[5] == '3') {
if (strncmp(typeName, "float32_t", 9))
break;
return HLSLScalarType_float32;
} else if (typeName[5] == '6') {
if (strncmp(typeName, "float64_t", 9))
break;
return HLSLScalarType_float64;
}
}
}
}
return HLSLScalarType_unknown;
}
/// <summary>Provides the primitive type for lowering matrix types to
/// IR.</summary>
static CanQualType GetHLSLObjectHandleType(ASTContext &context) {
return context.IntTy;
}
static void
AddSubscriptOperator(ASTContext &context, unsigned int templateDepth,
TemplateTypeParmDecl *elementTemplateParamDecl,
NonTypeTemplateParmDecl *colCountTemplateParamDecl,
QualType intType, CXXRecordDecl *templateRecordDecl,
ClassTemplateDecl *vectorTemplateDecl, bool forConst) {
QualType elementType = context.getTemplateTypeParmType(
templateDepth, 0, ParameterPackFalse, elementTemplateParamDecl);
Expr *sizeExpr = DeclRefExpr::Create(
context, NestedNameSpecifierLoc(), NoLoc, colCountTemplateParamDecl,
false,
DeclarationNameInfo(colCountTemplateParamDecl->getDeclName(), NoLoc),
intType, ExprValueKind::VK_RValue);
CXXRecordDecl *vecTemplateRecordDecl = vectorTemplateDecl->getTemplatedDecl();
const clang::Type *vecTy = vecTemplateRecordDecl->getTypeForDecl();
TemplateArgument templateArgs[2] = {TemplateArgument(elementType),
TemplateArgument(sizeExpr)};
TemplateName canonName =
context.getCanonicalTemplateName(TemplateName(vectorTemplateDecl));
QualType vectorType = context.getTemplateSpecializationType(
canonName, templateArgs, _countof(templateArgs), QualType(vecTy, 0));
vectorType = context.getLValueReferenceType(vectorType);
if (forConst)
vectorType = context.getConstType(vectorType);
QualType indexType = intType;
auto methodDecl = CreateObjectFunctionDeclarationWithParams(
context, templateRecordDecl, vectorType, ArrayRef<QualType>(indexType),
ArrayRef<StringRef>(StringRef("index")),
context.DeclarationNames.getCXXOperatorName(OO_Subscript), forConst);
methodDecl->addAttr(HLSLCXXOverloadAttr::CreateImplicit(context));
}
/// <summary>Adds up-front support for HLSL matrix types (just the template
/// declaration).</summary>
void hlsl::AddHLSLMatrixTemplate(ASTContext &context,
ClassTemplateDecl *vectorTemplateDecl,
ClassTemplateDecl **matrixTemplateDecl) {
DXASSERT_NOMSG(matrixTemplateDecl != nullptr);
DXASSERT_NOMSG(vectorTemplateDecl != nullptr);
// Create a matrix template declaration in translation unit scope.
// template<typename element, int row_count, int col_count> matrix { ... }
BuiltinTypeDeclBuilder typeDeclBuilder(context.getTranslationUnitDecl(),
"matrix");
TemplateTypeParmDecl *elementTemplateParamDecl =
typeDeclBuilder.addTypeTemplateParam("element",
(QualType)context.FloatTy);
NonTypeTemplateParmDecl *rowCountTemplateParamDecl =
typeDeclBuilder.addIntegerTemplateParam("row_count", context.IntTy, 4);
NonTypeTemplateParmDecl *colCountTemplateParamDecl =
typeDeclBuilder.addIntegerTemplateParam("col_count", context.IntTy, 4);
typeDeclBuilder.startDefinition();
CXXRecordDecl *templateRecordDecl = typeDeclBuilder.getRecordDecl();
ClassTemplateDecl *classTemplateDecl = typeDeclBuilder.getTemplateDecl();
// Add an 'h' field to hold the handle.
// The type is vector<element, col>[row].
QualType elementType = context.getTemplateTypeParmType(
/*templateDepth*/ 0, 0, ParameterPackFalse, elementTemplateParamDecl);
Expr *sizeExpr = DeclRefExpr::Create(
context, NestedNameSpecifierLoc(), NoLoc, rowCountTemplateParamDecl,
false,
DeclarationNameInfo(rowCountTemplateParamDecl->getDeclName(), NoLoc),
context.IntTy, ExprValueKind::VK_RValue);
Expr *rowSizeExpr = DeclRefExpr::Create(
context, NestedNameSpecifierLoc(), NoLoc, colCountTemplateParamDecl,
false,
DeclarationNameInfo(colCountTemplateParamDecl->getDeclName(), NoLoc),
context.IntTy, ExprValueKind::VK_RValue);
QualType vectorType = context.getDependentSizedExtVectorType(
elementType, rowSizeExpr, SourceLocation());
QualType vectorArrayType = context.getDependentSizedArrayType(
vectorType, sizeExpr, ArrayType::Normal, 0, SourceRange());
typeDeclBuilder.addField("h", vectorArrayType);
// Add an operator[]. The operator ranges from zero to rowcount-1, and returns
// a vector of colcount elements.
const unsigned int templateDepth = 0;
AddSubscriptOperator(context, templateDepth, elementTemplateParamDecl,
colCountTemplateParamDecl, context.UnsignedIntTy,
templateRecordDecl, vectorTemplateDecl, ForConstFalse);
AddSubscriptOperator(context, templateDepth, elementTemplateParamDecl,
colCountTemplateParamDecl, context.UnsignedIntTy,
templateRecordDecl, vectorTemplateDecl, ForConstTrue);
typeDeclBuilder.completeDefinition();
*matrixTemplateDecl = classTemplateDecl;
}
static void AddHLSLVectorSubscriptAttr(Decl *D, ASTContext &context) {
StringRef group = GetHLOpcodeGroupName(HLOpcodeGroup::HLSubscript);
D->addAttr(HLSLIntrinsicAttr::CreateImplicit(
context, group, "",
static_cast<unsigned>(HLSubscriptOpcode::VectorSubscript)));
D->addAttr(HLSLCXXOverloadAttr::CreateImplicit(context));
}
/// <summary>Adds up-front support for HLSL vector types (just the template
/// declaration).</summary>
void hlsl::AddHLSLVectorTemplate(ASTContext &context,
ClassTemplateDecl **vectorTemplateDecl) {
DXASSERT_NOMSG(vectorTemplateDecl != nullptr);
// Create a vector template declaration in translation unit scope.
// template<typename element, int element_count> vector { ... }
BuiltinTypeDeclBuilder typeDeclBuilder(context.getTranslationUnitDecl(),
"vector");
TemplateTypeParmDecl *elementTemplateParamDecl =
typeDeclBuilder.addTypeTemplateParam("element",
(QualType)context.FloatTy);
NonTypeTemplateParmDecl *colCountTemplateParamDecl =
typeDeclBuilder.addIntegerTemplateParam("element_count", context.IntTy,
4);
typeDeclBuilder.startDefinition();
CXXRecordDecl *templateRecordDecl = typeDeclBuilder.getRecordDecl();
ClassTemplateDecl *classTemplateDecl = typeDeclBuilder.getTemplateDecl();
Expr *vecSizeExpr = DeclRefExpr::Create(
context, NestedNameSpecifierLoc(), NoLoc, colCountTemplateParamDecl,
false,
DeclarationNameInfo(colCountTemplateParamDecl->getDeclName(), NoLoc),
context.IntTy, ExprValueKind::VK_RValue);
const unsigned int templateDepth = 0;
QualType resultType = context.getTemplateTypeParmType(
templateDepth, 0, ParameterPackFalse, elementTemplateParamDecl);
QualType vectorType = context.getDependentSizedExtVectorType(
resultType, vecSizeExpr, SourceLocation());
// Add an 'h' field to hold the handle.
typeDeclBuilder.addField("h", vectorType);
// Add an operator[]. The operator ranges from zero to colcount-1, and returns
// a scalar.
// ForConstTrue:
QualType refResultType =
context.getConstType(context.getLValueReferenceType(resultType));
CXXMethodDecl *functionDecl = CreateObjectFunctionDeclarationWithParams(
context, templateRecordDecl, refResultType,
ArrayRef<QualType>(context.UnsignedIntTy),
ArrayRef<StringRef>(StringRef("index")),
context.DeclarationNames.getCXXOperatorName(OO_Subscript), ForConstTrue);
AddHLSLVectorSubscriptAttr(functionDecl, context);
// ForConstFalse:
resultType = context.getLValueReferenceType(resultType);
functionDecl = CreateObjectFunctionDeclarationWithParams(
context, templateRecordDecl, resultType,
ArrayRef<QualType>(context.UnsignedIntTy),
ArrayRef<StringRef>(StringRef("index")),
context.DeclarationNames.getCXXOperatorName(OO_Subscript), ForConstFalse);
AddHLSLVectorSubscriptAttr(functionDecl, context);
typeDeclBuilder.completeDefinition();
*vectorTemplateDecl = classTemplateDecl;
}
static void AddRecordAccessMethod(clang::ASTContext &Ctx,
clang::CXXRecordDecl *RD,
clang::QualType ReturnTy,
bool IsGetOrSubscript, bool IsConst,
bool IsArray) {
DeclarationName DeclName =
IsGetOrSubscript ? DeclarationName(&Ctx.Idents.get("Get"))
: Ctx.DeclarationNames.getCXXOperatorName(OO_Subscript);
if (IsConst)
ReturnTy.addConst();
ReturnTy = Ctx.getLValueReferenceType(ReturnTy);
QualType ArgTypes[] = {Ctx.UnsignedIntTy};
ArrayRef<QualType> Types = IsArray ? ArgTypes : ArrayRef<QualType>();
StringRef ArgNames[] = {"Index"};
ArrayRef<StringRef> Names = IsArray ? ArgNames : ArrayRef<StringRef>();
CXXMethodDecl *MethodDecl = CreateObjectFunctionDeclarationWithParams(
Ctx, RD, ReturnTy, Types, Names, DeclName, IsConst);
if (IsGetOrSubscript && IsArray) {
ParmVarDecl *IndexParam = MethodDecl->getParamDecl(0);
Expr *ConstantZero = IntegerLiteral::Create(
Ctx, llvm::APInt(Ctx.getIntWidth(Ctx.UnsignedIntTy), 0),
Ctx.UnsignedIntTy, NoLoc);
IndexParam->setDefaultArg(ConstantZero);
}
StringRef OpcodeGroup = GetHLOpcodeGroupName(HLOpcodeGroup::HLSubscript);
unsigned Opcode = static_cast<unsigned>(HLSubscriptOpcode::DefaultSubscript);
MethodDecl->addAttr(
HLSLIntrinsicAttr::CreateImplicit(Ctx, OpcodeGroup, "", Opcode));
MethodDecl->addAttr(HLSLCXXOverloadAttr::CreateImplicit(Ctx));
}
static void AddRecordGetMethods(clang::ASTContext &Ctx,
clang::CXXRecordDecl *RD,
clang::QualType ReturnTy, bool IsConstOnly,
bool IsArray) {
if (!IsConstOnly)
AddRecordAccessMethod(Ctx, RD, ReturnTy, true, false, IsArray);
AddRecordAccessMethod(Ctx, RD, ReturnTy, true, true, IsArray);
}
static void AddRecordSubscriptAccess(clang::ASTContext &Ctx,
clang::CXXRecordDecl *RD,
clang::QualType ReturnTy,
bool IsConstOnly) {
if (!IsConstOnly)
AddRecordAccessMethod(Ctx, RD, ReturnTy, false, false, true);
AddRecordAccessMethod(Ctx, RD, ReturnTy, false, true, true);
}
/// <summary>Adds up-front support for HLSL *NodeOutputRecords template
/// types.</summary>
void hlsl::AddHLSLNodeOutputRecordTemplate(
ASTContext &context, DXIL::NodeIOKind Type,
ClassTemplateDecl **outputRecordTemplateDecl,
bool isCompleteType /*= true*/) {
DXASSERT_NOMSG(outputRecordTemplateDecl != nullptr);
StringRef templateName = HLSLNodeObjectAttr::ConvertRecordTypeToStr(Type);
// Create a *NodeOutputRecords template declaration in translation unit scope.
BuiltinTypeDeclBuilder typeDeclBuilder(context.getTranslationUnitDecl(),
templateName,
TagDecl::TagKind::TTK_Struct);
TemplateTypeParmDecl *outputTemplateParamDecl =
typeDeclBuilder.addTypeTemplateParam("recordType");
typeDeclBuilder.startDefinition();
ClassTemplateDecl *classTemplateDecl = typeDeclBuilder.getTemplateDecl();
// Add an 'h' field to hold the handle.
typeDeclBuilder.addField("h", GetHLSLObjectHandleType(context));
typeDeclBuilder.getRecordDecl()->addAttr(
HLSLNodeObjectAttr::CreateImplicit(context, Type));
QualType elementType = context.getTemplateTypeParmType(
0, 0, ParameterPackFalse, outputTemplateParamDecl);
CXXRecordDecl *record = typeDeclBuilder.getRecordDecl();
// Subscript operator is required for Node Array Types.
AddRecordSubscriptAccess(context, record, elementType, false);
AddRecordGetMethods(context, record, elementType, false, true);
if (isCompleteType)
typeDeclBuilder.completeDefinition();
*outputRecordTemplateDecl = classTemplateDecl;
}
/// <summary>
/// Adds a new record type in the specified context with the given name. The
/// record type will have a handle field.
/// </summary>
CXXRecordDecl *
hlsl::DeclareRecordTypeWithHandleAndNoMemberFunctions(ASTContext &context,
StringRef name) {
BuiltinTypeDeclBuilder typeDeclBuilder(context.getTranslationUnitDecl(), name,
TagDecl::TagKind::TTK_Struct);
typeDeclBuilder.startDefinition();
typeDeclBuilder.addField("h", GetHLSLObjectHandleType(context));
typeDeclBuilder.completeDefinition();
return typeDeclBuilder.getRecordDecl();
}
/// <summary>
/// Adds a new record type in the specified context with the given name. The
/// record type will have a handle field.
/// </summary>
CXXRecordDecl *
hlsl::DeclareRecordTypeWithHandle(ASTContext &context, StringRef name,
bool isCompleteType /*= true */) {
BuiltinTypeDeclBuilder typeDeclBuilder(context.getTranslationUnitDecl(), name,
TagDecl::TagKind::TTK_Struct);
typeDeclBuilder.startDefinition();
typeDeclBuilder.addField("h", GetHLSLObjectHandleType(context));
if (isCompleteType)
return typeDeclBuilder.completeDefinition();
return typeDeclBuilder.getRecordDecl();
}
// creates a global static constant unsigned integer with value.
// equivalent to: static const uint name = val;
static void AddConstUInt(clang::ASTContext &context, DeclContext *DC,
StringRef name, unsigned val) {
IdentifierInfo &Id = context.Idents.get(name, tok::TokenKind::identifier);
QualType type = context.getConstType(context.UnsignedIntTy);
VarDecl *varDecl = VarDecl::Create(context, DC, NoLoc, NoLoc, &Id, type,
context.getTrivialTypeSourceInfo(type),
clang::StorageClass::SC_Static);
Expr *exprVal = IntegerLiteral::Create(
context, llvm::APInt(context.getIntWidth(type), val), type, NoLoc);
varDecl->setInit(exprVal);
varDecl->setImplicit(true);
DC->addDecl(varDecl);
}
static void AddConstUInt(clang::ASTContext &context, StringRef name,
unsigned val) {
AddConstUInt(context, context.getTranslationUnitDecl(), name, val);
}
// Adds a top-level enum with the given enumerants.
struct Enumerant {
StringRef name;
unsigned value;
};
static void AddTypedefPseudoEnum(ASTContext &context, StringRef name,
ArrayRef<Enumerant> enumerants) {
DeclContext *curDC = context.getTranslationUnitDecl();
// typedef uint <name>;
IdentifierInfo &enumId = context.Idents.get(name, tok::TokenKind::identifier);
TypeSourceInfo *uintTypeSource =
context.getTrivialTypeSourceInfo(context.UnsignedIntTy, NoLoc);
TypedefDecl *enumDecl = TypedefDecl::Create(context, curDC, NoLoc, NoLoc,
&enumId, uintTypeSource);
curDC->addDecl(enumDecl);
enumDecl->setImplicit(true);
// static const uint <enumerant.name> = <enumerant.value>;
for (const Enumerant &enumerant : enumerants) {
AddConstUInt(context, curDC, enumerant.name, enumerant.value);
}
}
/// <summary> Adds all constants and enums for ray tracing </summary>
void hlsl::AddRaytracingConstants(ASTContext &context) {
AddTypedefPseudoEnum(
context, "RAY_FLAG",
{
{"RAY_FLAG_NONE", (unsigned)DXIL::RayFlag::None},
{"RAY_FLAG_FORCE_OPAQUE", (unsigned)DXIL::RayFlag::ForceOpaque},
{"RAY_FLAG_FORCE_NON_OPAQUE",
(unsigned)DXIL::RayFlag::ForceNonOpaque},
{"RAY_FLAG_ACCEPT_FIRST_HIT_AND_END_SEARCH",
(unsigned)DXIL::RayFlag::AcceptFirstHitAndEndSearch},
{"RAY_FLAG_SKIP_CLOSEST_HIT_SHADER",
(unsigned)DXIL::RayFlag::SkipClosestHitShader},
{"RAY_FLAG_CULL_BACK_FACING_TRIANGLES",
(unsigned)DXIL::RayFlag::CullBackFacingTriangles},
{"RAY_FLAG_CULL_FRONT_FACING_TRIANGLES",
(unsigned)DXIL::RayFlag::CullFrontFacingTriangles},
{"RAY_FLAG_CULL_OPAQUE", (unsigned)DXIL::RayFlag::CullOpaque},
{"RAY_FLAG_CULL_NON_OPAQUE", (unsigned)DXIL::RayFlag::CullNonOpaque},
{"RAY_FLAG_SKIP_TRIANGLES", (unsigned)DXIL::RayFlag::SkipTriangles},
{"RAY_FLAG_SKIP_PROCEDURAL_PRIMITIVES",
(unsigned)DXIL::RayFlag::SkipProceduralPrimitives},
});
AddTypedefPseudoEnum(
context, "COMMITTED_STATUS",
{{"COMMITTED_NOTHING", (unsigned)DXIL::CommittedStatus::CommittedNothing},
{"COMMITTED_TRIANGLE_HIT",
(unsigned)DXIL::CommittedStatus::CommittedTriangleHit},
{"COMMITTED_PROCEDURAL_PRIMITIVE_HIT",
(unsigned)DXIL::CommittedStatus::CommittedProceduralPrimitiveHit}});
AddTypedefPseudoEnum(
context, "CANDIDATE_TYPE",
{{"CANDIDATE_NON_OPAQUE_TRIANGLE",
(unsigned)DXIL::CandidateType::CandidateNonOpaqueTriangle},
{"CANDIDATE_PROCEDURAL_PRIMITIVE",
(unsigned)DXIL::CandidateType::CandidateProceduralPrimitive}});
// static const uint HIT_KIND_* = *;
AddConstUInt(context, StringRef("HIT_KIND_NONE"),
(unsigned)DXIL::HitKind::None);
AddConstUInt(context, StringRef("HIT_KIND_TRIANGLE_FRONT_FACE"),
(unsigned)DXIL::HitKind::TriangleFrontFace);
AddConstUInt(context, StringRef("HIT_KIND_TRIANGLE_BACK_FACE"),
(unsigned)DXIL::HitKind::TriangleBackFace);
AddConstUInt(
context,
StringRef(
"STATE_OBJECT_FLAGS_ALLOW_LOCAL_DEPENDENCIES_ON_EXTERNAL_DEFINITONS"),
(unsigned)
DXIL::StateObjectFlags::AllowLocalDependenciesOnExternalDefinitions);
AddConstUInt(
context,
StringRef("STATE_OBJECT_FLAGS_ALLOW_EXTERNAL_DEPENDENCIES_ON_LOCAL_"
"DEFINITIONS"),
(unsigned)
DXIL::StateObjectFlags::AllowExternalDependenciesOnLocalDefinitions);
// The above "_FLAGS_" was a typo, leaving in to avoid breaking anyone.
// Supposed to be _FLAG_ below.
AddConstUInt(
context,
StringRef(
"STATE_OBJECT_FLAG_ALLOW_LOCAL_DEPENDENCIES_ON_EXTERNAL_DEFINITONS"),
(unsigned)
DXIL::StateObjectFlags::AllowLocalDependenciesOnExternalDefinitions);
AddConstUInt(
context,
StringRef(
"STATE_OBJECT_FLAG_ALLOW_EXTERNAL_DEPENDENCIES_ON_LOCAL_DEFINITIONS"),
(unsigned)
DXIL::StateObjectFlags::AllowExternalDependenciesOnLocalDefinitions);
AddConstUInt(context,
StringRef("STATE_OBJECT_FLAG_ALLOW_STATE_OBJECT_ADDITIONS"),
(unsigned)DXIL::StateObjectFlags::AllowStateObjectAdditions);
AddConstUInt(context, StringRef("RAYTRACING_PIPELINE_FLAG_NONE"),
(unsigned)DXIL::RaytracingPipelineFlags::None);
AddConstUInt(context, StringRef("RAYTRACING_PIPELINE_FLAG_SKIP_TRIANGLES"),
(unsigned)DXIL::RaytracingPipelineFlags::SkipTriangles);
AddConstUInt(
context, StringRef("RAYTRACING_PIPELINE_FLAG_SKIP_PROCEDURAL_PRIMITIVES"),
(unsigned)DXIL::RaytracingPipelineFlags::SkipProceduralPrimitives);
}
/// <summary> Adds all constants and enums for sampler feedback </summary>
void hlsl::AddSamplerFeedbackConstants(ASTContext &context) {
AddConstUInt(context, StringRef("SAMPLER_FEEDBACK_MIN_MIP"),
(unsigned)DXIL::SamplerFeedbackType::MinMip);
AddConstUInt(context, StringRef("SAMPLER_FEEDBACK_MIP_REGION_USED"),
(unsigned)DXIL::SamplerFeedbackType::MipRegionUsed);
}
/// <summary> Adds all enums for Barrier intrinsic</summary>
void hlsl::AddBarrierConstants(ASTContext &context) {
AddTypedefPseudoEnum(
context, "MEMORY_TYPE_FLAG",
{{"UAV_MEMORY", (unsigned)DXIL::MemoryTypeFlag::UavMemory},
{"GROUP_SHARED_MEMORY",
(unsigned)DXIL::MemoryTypeFlag::GroupSharedMemory},
{"NODE_INPUT_MEMORY", (unsigned)DXIL::MemoryTypeFlag::NodeInputMemory},
{"NODE_OUTPUT_MEMORY", (unsigned)DXIL::MemoryTypeFlag::NodeOutputMemory},
{"ALL_MEMORY", (unsigned)DXIL::MemoryTypeFlag::AllMemory}});
AddTypedefPseudoEnum(
context, "BARRIER_SEMANTIC_FLAG",
{{"GROUP_SYNC", (unsigned)DXIL::BarrierSemanticFlag::GroupSync},
{"GROUP_SCOPE", (unsigned)DXIL::BarrierSemanticFlag::GroupScope},
{"DEVICE_SCOPE", (unsigned)DXIL::BarrierSemanticFlag::DeviceScope}});
}
static Expr *IntConstantAsBoolExpr(clang::Sema &sema, uint64_t value) {
return sema
.ImpCastExprToType(sema.ActOnIntegerConstant(NoLoc, value).get(),
sema.getASTContext().BoolTy, CK_IntegralToBoolean)
.get();
}
static CXXRecordDecl *CreateStdStructWithStaticBool(clang::ASTContext &context,
NamespaceDecl *stdNamespace,
IdentifierInfo &trueTypeId,
IdentifierInfo &valueId,
Expr *trueExpression) {
// struct true_type { static const bool value = true; }
TypeSourceInfo *boolTypeSource =
context.getTrivialTypeSourceInfo(context.BoolTy.withConst());
CXXRecordDecl *trueTypeDecl = CXXRecordDecl::Create(
context, TagTypeKind::TTK_Struct, stdNamespace, NoLoc, NoLoc, &trueTypeId,
nullptr, DelayTypeCreationTrue);
// static fields are variables in the AST
VarDecl *trueValueDecl =
VarDecl::Create(context, trueTypeDecl, NoLoc, NoLoc, &valueId,
context.BoolTy.withConst(), boolTypeSource, SC_Static);
trueValueDecl->setInit(trueExpression);
trueValueDecl->setConstexpr(true);
trueValueDecl->setAccess(AS_public);
trueTypeDecl->setLexicalDeclContext(stdNamespace);
trueTypeDecl->startDefinition();
trueTypeDecl->addDecl(trueValueDecl);
trueTypeDecl->completeDefinition();
stdNamespace->addDecl(trueTypeDecl);
return trueTypeDecl;
}
static void DefineRecordWithBase(CXXRecordDecl *decl,
DeclContext *lexicalContext,
const CXXBaseSpecifier *base) {
decl->setLexicalDeclContext(lexicalContext);
decl->startDefinition();
decl->setBases(&base, 1);
decl->completeDefinition();
lexicalContext->addDecl(decl);
}
static void SetPartialExplicitSpecialization(
ClassTemplateDecl *templateDecl,
ClassTemplatePartialSpecializationDecl *specializationDecl) {
specializationDecl->setSpecializationKind(TSK_ExplicitSpecialization);
templateDecl->AddPartialSpecialization(specializationDecl, nullptr);
}
static void CreateIsEqualSpecialization(
ASTContext &context, ClassTemplateDecl *templateDecl,
TemplateName &templateName, DeclContext *lexicalContext,
const CXXBaseSpecifier *base, TemplateParameterList *templateParamList,
TemplateArgument (&templateArgs)[2]) {
QualType specializationCanonType = context.getTemplateSpecializationType(
templateName, templateArgs, _countof(templateArgs));
TemplateArgumentListInfo templateArgsListInfo =
TemplateArgumentListInfo(NoLoc, NoLoc);
templateArgsListInfo.addArgument(TemplateArgumentLoc(
templateArgs[0],
context.getTrivialTypeSourceInfo(templateArgs[0].getAsType())));
templateArgsListInfo.addArgument(TemplateArgumentLoc(
templateArgs[1],
context.getTrivialTypeSourceInfo(templateArgs[1].getAsType())));
ClassTemplatePartialSpecializationDecl *specializationDecl =
ClassTemplatePartialSpecializationDecl::Create(
context, TTK_Struct, lexicalContext, NoLoc, NoLoc, templateParamList,
templateDecl, templateArgs, _countof(templateArgs),
templateArgsListInfo, specializationCanonType, nullptr);
context.getTagDeclType(specializationDecl); // Fault this in now.
DefineRecordWithBase(specializationDecl, lexicalContext, base);
SetPartialExplicitSpecialization(templateDecl, specializationDecl);
}
/// <summary>Adds the implementation for std::is_equal.</summary>
void hlsl::AddStdIsEqualImplementation(clang::ASTContext &context,
clang::Sema &sema) {
// The goal is to support std::is_same<T, T>::value for testing purposes, in a
// manner that can evolve into a compliant feature in the future.
//
// The definitions necessary are as follows (all in the std namespace).
// template <class T, T v>
// struct integral_constant {
// typedef T value_type;
// static const value_type value = v;
// operator value_type() { return value; }
// };
//
// typedef integral_constant<bool, true> true_type;
// typedef integral_constant<bool, false> false_type;
//
// template<typename T, typename U> struct is_same : public false_type {};
// template<typename T> struct is_same<T, T> : public
// true_type{};
//
// We instead use these simpler definitions for true_type and false_type.
// struct false_type { static const bool value = false; };
// struct true_type { static const bool value = true; };
DeclContext *tuContext = context.getTranslationUnitDecl();
IdentifierInfo &stdId =
context.Idents.get(StringRef("std"), tok::TokenKind::identifier);
IdentifierInfo &trueTypeId =
context.Idents.get(StringRef("true_type"), tok::TokenKind::identifier);
IdentifierInfo &falseTypeId =
context.Idents.get(StringRef("false_type"), tok::TokenKind::identifier);
IdentifierInfo &valueId =
context.Idents.get(StringRef("value"), tok::TokenKind::identifier);
IdentifierInfo &isSameId =
context.Idents.get(StringRef("is_same"), tok::TokenKind::identifier);
IdentifierInfo &tId =
context.Idents.get(StringRef("T"), tok::TokenKind::identifier);
IdentifierInfo &vId =
context.Idents.get(StringRef("V"), tok::TokenKind::identifier);
Expr *trueExpression = IntConstantAsBoolExpr(sema, 1);
Expr *falseExpression = IntConstantAsBoolExpr(sema, 0);
// namespace std
NamespaceDecl *stdNamespace = NamespaceDecl::Create(
context, tuContext, InlineFalse, NoLoc, NoLoc, &stdId, nullptr);
CXXRecordDecl *trueTypeDecl = CreateStdStructWithStaticBool(
context, stdNamespace, trueTypeId, valueId, trueExpression);
CXXRecordDecl *falseTypeDecl = CreateStdStructWithStaticBool(
context, stdNamespace, falseTypeId, valueId, falseExpression);
// template<typename T, typename U> struct is_same : public false_type {};
CXXRecordDecl *isSameFalseRecordDecl =
CXXRecordDecl::Create(context, TagTypeKind::TTK_Struct, stdNamespace,
NoLoc, NoLoc, &isSameId, nullptr, false);
TemplateTypeParmDecl *tParam = TemplateTypeParmDecl::Create(
context, stdNamespace, NoLoc, NoLoc, FirstTemplateDepth,
FirstParamPosition, &tId, TypenameFalse, ParameterPackFalse);
TemplateTypeParmDecl *uParam = TemplateTypeParmDecl::Create(
context, stdNamespace, NoLoc, NoLoc, FirstTemplateDepth,
FirstParamPosition + 1, &vId, TypenameFalse, ParameterPackFalse);
NamedDecl *falseParams[] = {tParam, uParam};
TemplateParameterList *falseParamList = TemplateParameterList::Create(
context, NoLoc, NoLoc, falseParams, _countof(falseParams), NoLoc);
ClassTemplateDecl *isSameFalseTemplateDecl = ClassTemplateDecl::Create(
context, stdNamespace, NoLoc, DeclarationName(&isSameId), falseParamList,
isSameFalseRecordDecl, nullptr);
context.getTagDeclType(isSameFalseRecordDecl); // Fault this in now.
CXXBaseSpecifier *falseBase = new (context) CXXBaseSpecifier(
SourceRange(), VirtualFalse, BaseClassFalse, AS_public,
context.getTrivialTypeSourceInfo(context.getTypeDeclType(falseTypeDecl)),
NoLoc);
isSameFalseRecordDecl->setDescribedClassTemplate(isSameFalseTemplateDecl);
isSameFalseTemplateDecl->setLexicalDeclContext(stdNamespace);
DefineRecordWithBase(isSameFalseRecordDecl, stdNamespace, falseBase);
// is_same for 'true' is a specialization of is_same for 'false', taking a
// single T, where both T will match
// template<typename T> struct is_same<T, T> : public true_type{};
TemplateName tn = TemplateName(isSameFalseTemplateDecl);
NamedDecl *trueParams[] = {tParam};
TemplateParameterList *trueParamList = TemplateParameterList::Create(
context, NoLoc, NoLoc, trueParams, _countof(trueParams), NoLoc);
CXXBaseSpecifier *trueBase = new (context) CXXBaseSpecifier(
SourceRange(), VirtualFalse, BaseClassFalse, AS_public,
context.getTrivialTypeSourceInfo(context.getTypeDeclType(trueTypeDecl)),
NoLoc);
TemplateArgument ta = TemplateArgument(
context.getCanonicalType(context.getTypeDeclType(tParam)));
TemplateArgument isSameTrueTemplateArgs[] = {ta, ta};
CreateIsEqualSpecialization(context, isSameFalseTemplateDecl, tn,
stdNamespace, trueBase, trueParamList,
isSameTrueTemplateArgs);
stdNamespace->addDecl(isSameFalseTemplateDecl);
stdNamespace->setImplicit(true);
tuContext->addDecl(stdNamespace);
// This could be a parameter if ever needed.
const bool SupportExtensions = true;
// Consider right-hand const and right-hand ref to be true for is_same:
// template<typename T> struct is_same<T, const T> : public true_type{};
// template<typename T> struct is_same<T, T&> : public true_type{};
if (SupportExtensions) {
TemplateArgument trueConstArg = TemplateArgument(
context.getCanonicalType(context.getTypeDeclType(tParam)).withConst());
TemplateArgument isSameTrueConstTemplateArgs[] = {ta, trueConstArg};
CreateIsEqualSpecialization(context, isSameFalseTemplateDecl, tn,
stdNamespace, trueBase, trueParamList,
isSameTrueConstTemplateArgs);
TemplateArgument trueRefArg =
TemplateArgument(context.getLValueReferenceType(
context.getCanonicalType(context.getTypeDeclType(tParam))));
TemplateArgument isSameTrueRefTemplateArgs[] = {ta, trueRefArg};
CreateIsEqualSpecialization(context, isSameFalseTemplateDecl, tn,
stdNamespace, trueBase, trueParamList,
isSameTrueRefTemplateArgs);
}
}
/// <summary>
/// Adds a new template type in the specified context with the given name. The
/// record type will have a handle field.
/// </summary>
/// <parm name="context">AST context to which template will be added.</param>
/// <parm name="typeName">Name of template to create.</param>
/// <parm name="templateArgCount">Number of template arguments (one or
/// two).</param> <parm name="defaultTypeArgValue">If assigned, the default
/// argument for the element template.</param>
CXXRecordDecl *
hlsl::DeclareTemplateTypeWithHandle(ASTContext &context, StringRef name,
uint8_t templateArgCount,
TypeSourceInfo *defaultTypeArgValue) {
return DeclareTemplateTypeWithHandleInDeclContext(
context, context.getTranslationUnitDecl(), name, templateArgCount,
defaultTypeArgValue);
}
CXXRecordDecl *hlsl::DeclareTemplateTypeWithHandleInDeclContext(
ASTContext &context, DeclContext *declContext, StringRef name,
uint8_t templateArgCount, TypeSourceInfo *defaultTypeArgValue) {
DXASSERT(templateArgCount != 0,
"otherwise caller should be creating a class or struct");
DXASSERT(templateArgCount <= 2, "otherwise the function needs to be updated "
"for a different template pattern");
// Create an object template declaration in translation unit scope.
// templateArgCount=1: template<typename element> typeName { ... }
// templateArgCount=2: template<typename element, int count> typeName { ... }
BuiltinTypeDeclBuilder typeDeclBuilder(declContext, name);
TemplateTypeParmDecl *elementTemplateParamDecl =
typeDeclBuilder.addTypeTemplateParam("element", defaultTypeArgValue);
NonTypeTemplateParmDecl *countTemplateParamDecl = nullptr;
if (templateArgCount > 1)
countTemplateParamDecl =
typeDeclBuilder.addIntegerTemplateParam("count", context.IntTy, 0);
typeDeclBuilder.startDefinition();
CXXRecordDecl *templateRecordDecl = typeDeclBuilder.getRecordDecl();
// Add an 'h' field to hold the handle.
QualType elementType = context.getTemplateTypeParmType(
/*templateDepth*/ 0, 0, ParameterPackFalse, elementTemplateParamDecl);
if (templateArgCount > 1 &&
// Only need array type for inputpatch and outputpatch.
// Avoid Texture2DMS which may use 0 count.
// TODO: use hlsl types to do the check.
!name.startswith("Texture") && !name.startswith("RWTexture")) {
Expr *countExpr = DeclRefExpr::Create(
context, NestedNameSpecifierLoc(), NoLoc, countTemplateParamDecl, false,
DeclarationNameInfo(countTemplateParamDecl->getDeclName(), NoLoc),
context.IntTy, ExprValueKind::VK_RValue);
elementType = context.getDependentSizedArrayType(
elementType, countExpr, ArrayType::ArraySizeModifier::Normal, 0,
SourceRange());
// InputPatch and OutputPatch also have a "Length" static const member for
// the number of control points
IdentifierInfo &lengthId =
context.Idents.get(StringRef("Length"), tok::TokenKind::identifier);
TypeSourceInfo *lengthTypeSource =
context.getTrivialTypeSourceInfo(context.IntTy.withConst());
VarDecl *lengthValueDecl =
VarDecl::Create(context, templateRecordDecl, NoLoc, NoLoc, &lengthId,
context.IntTy.withConst(), lengthTypeSource, SC_Static);
lengthValueDecl->setInit(countExpr);
lengthValueDecl->setAccess(AS_public);
templateRecordDecl->addDecl(lengthValueDecl);
}
typeDeclBuilder.addField("h", elementType);
return typeDeclBuilder.getRecordDecl();
}
FunctionTemplateDecl *hlsl::CreateFunctionTemplateDecl(
ASTContext &context, CXXRecordDecl *recordDecl, CXXMethodDecl *functionDecl,
NamedDecl **templateParamNamedDecls, size_t templateParamNamedDeclsCount) {
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(templateParamNamedDecls != nullptr);
DXASSERT(templateParamNamedDeclsCount > 0,
"otherwise caller shouldn't invoke this function");
TemplateParameterList *templateParams = TemplateParameterList::Create(
context, NoLoc, NoLoc, &templateParamNamedDecls[0],
templateParamNamedDeclsCount, NoLoc);
FunctionTemplateDecl *functionTemplate = FunctionTemplateDecl::Create(
context, recordDecl, NoLoc, functionDecl->getDeclName(), templateParams,
functionDecl);
functionTemplate->setAccess(AccessSpecifier::AS_public);
functionTemplate->setLexicalDeclContext(recordDecl);
functionDecl->setDescribedFunctionTemplate(functionTemplate);
recordDecl->addDecl(functionTemplate);
return functionTemplate;
}
static void AssociateParametersToFunctionPrototype(TypeSourceInfo *tinfo,
ParmVarDecl **paramVarDecls,
unsigned int numParams) {
FunctionProtoTypeLoc protoLoc =
tinfo->getTypeLoc().getAs<FunctionProtoTypeLoc>();
DXASSERT(protoLoc.getNumParams() == numParams,
"otherwise unexpected number of parameters available");
for (unsigned i = 0; i < numParams; i++) {
DXASSERT(protoLoc.getParam(i) == nullptr,
"otherwise prototype parameters were already initialized");
protoLoc.setParam(i, paramVarDecls[i]);
}
}
static void CreateConstructorDeclaration(
ASTContext &context, CXXRecordDecl *recordDecl, QualType resultType,
ArrayRef<QualType> args, DeclarationName declarationName, bool isConst,
CXXConstructorDecl **constructorDecl, TypeSourceInfo **tinfo) {
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(constructorDecl != nullptr);
FunctionProtoType::ExtProtoInfo functionExtInfo;
functionExtInfo.TypeQuals = isConst ? Qualifiers::Const : 0;
QualType functionQT = context.getFunctionType(
resultType, args, functionExtInfo, ArrayRef<ParameterModifier>());
DeclarationNameInfo declNameInfo(declarationName, NoLoc);
*tinfo = context.getTrivialTypeSourceInfo(functionQT, NoLoc);
DXASSERT_NOMSG(*tinfo != nullptr);
*constructorDecl = CXXConstructorDecl::Create(
context, recordDecl, NoLoc, declNameInfo, functionQT, *tinfo,
StorageClass::SC_None, ExplicitFalse, InlineSpecifiedFalse,
IsConstexprFalse);
DXASSERT_NOMSG(*constructorDecl != nullptr);
(*constructorDecl)->setLexicalDeclContext(recordDecl);
(*constructorDecl)->setAccess(AccessSpecifier::AS_public);
}
static void CreateObjectFunctionDeclaration(
ASTContext &context, CXXRecordDecl *recordDecl, QualType resultType,
ArrayRef<QualType> args, DeclarationName declarationName, bool isConst,
CXXMethodDecl **functionDecl, TypeSourceInfo **tinfo) {
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(functionDecl != nullptr);
FunctionProtoType::ExtProtoInfo functionExtInfo;
functionExtInfo.TypeQuals = isConst ? Qualifiers::Const : 0;
QualType functionQT = context.getFunctionType(
resultType, args, functionExtInfo, ArrayRef<ParameterModifier>());
DeclarationNameInfo declNameInfo(declarationName, NoLoc);
*tinfo = context.getTrivialTypeSourceInfo(functionQT, NoLoc);
DXASSERT_NOMSG(*tinfo != nullptr);
*functionDecl = CXXMethodDecl::Create(
context, recordDecl, NoLoc, declNameInfo, functionQT, *tinfo,
StorageClass::SC_None, InlineSpecifiedFalse, IsConstexprFalse, NoLoc);
DXASSERT_NOMSG(*functionDecl != nullptr);
(*functionDecl)->setLexicalDeclContext(recordDecl);
(*functionDecl)->setAccess(AccessSpecifier::AS_public);
}
CXXMethodDecl *hlsl::CreateObjectFunctionDeclarationWithParams(
ASTContext &context, CXXRecordDecl *recordDecl, QualType resultType,
ArrayRef<QualType> paramTypes, ArrayRef<StringRef> paramNames,
DeclarationName declarationName, bool isConst, bool isTemplateFunction) {
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(!resultType.isNull());
DXASSERT_NOMSG(paramTypes.size() == paramNames.size());
TypeSourceInfo *tinfo;
CXXMethodDecl *functionDecl;
CreateObjectFunctionDeclaration(context, recordDecl, resultType, paramTypes,
declarationName, isConst, &functionDecl,
&tinfo);
// Create and associate parameters to method.
SmallVector<ParmVarDecl *, 2> parmVarDecls;
if (!paramTypes.empty()) {
for (unsigned int i = 0; i < paramTypes.size(); ++i) {
IdentifierInfo *argIi = &context.Idents.get(paramNames[i]);
ParmVarDecl *parmVarDecl = ParmVarDecl::Create(
context, functionDecl, NoLoc, NoLoc, argIi, paramTypes[i],
context.getTrivialTypeSourceInfo(paramTypes[i], NoLoc),
StorageClass::SC_None, nullptr);
parmVarDecl->setScopeInfo(0, i);
DXASSERT(parmVarDecl->getFunctionScopeIndex() == i,
"otherwise failed to set correct index");
parmVarDecls.push_back(parmVarDecl);
}
functionDecl->setParams(ArrayRef<ParmVarDecl *>(parmVarDecls));
AssociateParametersToFunctionPrototype(tinfo, &parmVarDecls.front(),
parmVarDecls.size());
}
// If this is going to be part of a template function decl, don't add it to
// the record because the template function decl will be added instead.
if (!isTemplateFunction)
recordDecl->addDecl(functionDecl);
return functionDecl;
}
CXXRecordDecl *hlsl::DeclareUIntTemplatedTypeWithHandle(
ASTContext &context, StringRef typeName, StringRef templateParamName,
TagTypeKind tagKind) {
return DeclareUIntTemplatedTypeWithHandleInDeclContext(
context, context.getTranslationUnitDecl(), typeName, templateParamName,
tagKind);
}
CXXRecordDecl *hlsl::DeclareUIntTemplatedTypeWithHandleInDeclContext(
ASTContext &context, DeclContext *declContext, StringRef typeName,
StringRef templateParamName, TagTypeKind tagKind) {
// template<uint kind> FeedbackTexture2D[Array] { ... }
BuiltinTypeDeclBuilder typeDeclBuilder(declContext, typeName, tagKind);
typeDeclBuilder.addIntegerTemplateParam(templateParamName,
context.UnsignedIntTy);
typeDeclBuilder.startDefinition();
typeDeclBuilder.addField(
"h", context.UnsignedIntTy); // Add an 'h' field to hold the handle.
return typeDeclBuilder.getRecordDecl();
}
clang::CXXRecordDecl *
hlsl::DeclareConstantBufferViewType(clang::ASTContext &context, bool bTBuf) {
// Create ConstantBufferView template declaration in translation unit scope
// like other resource.
// template<typename T> ConstantBuffer { int h; }
DeclContext *DC = context.getTranslationUnitDecl();
BuiltinTypeDeclBuilder typeDeclBuilder(
DC, bTBuf ? "TextureBuffer" : "ConstantBuffer",
TagDecl::TagKind::TTK_Struct);
(void)typeDeclBuilder.addTypeTemplateParam("T");
typeDeclBuilder.startDefinition();
CXXRecordDecl *templateRecordDecl = typeDeclBuilder.getRecordDecl();
typeDeclBuilder.addField(
"h", context.UnsignedIntTy); // Add an 'h' field to hold the handle.
typeDeclBuilder.getRecordDecl();
return templateRecordDecl;
}
CXXRecordDecl *hlsl::DeclareRayQueryType(ASTContext &context) {
// template<uint kind> RayQuery { ... }
BuiltinTypeDeclBuilder typeDeclBuilder(context.getTranslationUnitDecl(),
"RayQuery");
typeDeclBuilder.addIntegerTemplateParam("flags", context.UnsignedIntTy);
typeDeclBuilder.startDefinition();
typeDeclBuilder.addField(
"h", context.UnsignedIntTy); // Add an 'h' field to hold the handle.
// Add constructor that will be lowered to the intrinsic that produces
// the RayQuery handle for this object.
CanQualType canQualType = typeDeclBuilder.getRecordDecl()
->getTypeForDecl()
->getCanonicalTypeUnqualified();
CXXConstructorDecl *pConstructorDecl = nullptr;
TypeSourceInfo *pTypeSourceInfo = nullptr;
CreateConstructorDeclaration(
context, typeDeclBuilder.getRecordDecl(), context.VoidTy, {},
context.DeclarationNames.getCXXConstructorName(canQualType), false,
&pConstructorDecl, &pTypeSourceInfo);
typeDeclBuilder.getRecordDecl()->addDecl(pConstructorDecl);
return typeDeclBuilder.getRecordDecl();
}
CXXRecordDecl *hlsl::DeclareResourceType(ASTContext &context, bool bSampler) {
// struct ResourceDescriptor { uint8 desc; }
StringRef Name = bSampler ? ".Sampler" : ".Resource";
BuiltinTypeDeclBuilder typeDeclBuilder(context.getTranslationUnitDecl(), Name,
TagDecl::TagKind::TTK_Struct);
typeDeclBuilder.startDefinition();
typeDeclBuilder.addField("h", GetHLSLObjectHandleType(context));
CXXRecordDecl *recordDecl = typeDeclBuilder.getRecordDecl();
QualType indexType = context.UnsignedIntTy;
QualType resultType = context.getRecordType(recordDecl);
resultType = context.getConstType(resultType);
CXXMethodDecl *functionDecl = CreateObjectFunctionDeclarationWithParams(
context, recordDecl, resultType, ArrayRef<QualType>(indexType),
ArrayRef<StringRef>(StringRef("index")),
context.DeclarationNames.getCXXOperatorName(OO_Subscript), true);
// Mark function as createResourceFromHeap intrinsic.
functionDecl->addAttr(HLSLIntrinsicAttr::CreateImplicit(
context, "op", "",
static_cast<int>(hlsl::IntrinsicOp::IOP_CreateResourceFromHeap)));
functionDecl->addAttr(HLSLCXXOverloadAttr::CreateImplicit(context));
return recordDecl;
}
CXXRecordDecl *hlsl::DeclareNodeOrRecordType(
clang::ASTContext &Ctx, DXIL::NodeIOKind Type, bool IsRecordTypeTemplate,
bool IsConst, bool HasGetMethods, bool IsArray, bool IsCompleteType) {
StringRef TypeName = HLSLNodeObjectAttr::ConvertRecordTypeToStr(Type);
BuiltinTypeDeclBuilder Builder(Ctx.getTranslationUnitDecl(), TypeName,
TagDecl::TagKind::TTK_Struct);
TemplateTypeParmDecl *TyParamDecl = nullptr;
if (IsRecordTypeTemplate)
TyParamDecl = Builder.addTypeTemplateParam("recordtype");
Builder.startDefinition();
Builder.addField("h", GetHLSLObjectHandleType(Ctx));
Builder.getRecordDecl()->addAttr(
HLSLNodeObjectAttr::CreateImplicit(Ctx, Type));
if (IsRecordTypeTemplate) {
QualType ParamTy = QualType(TyParamDecl->getTypeForDecl(), 0);
CXXRecordDecl *Record = Builder.getRecordDecl();
if (HasGetMethods || IsArray)
AddRecordGetMethods(Ctx, Record, ParamTy, IsConst, IsArray);
if (IsArray)
AddRecordSubscriptAccess(Ctx, Record, ParamTy, IsConst);
}
if (IsCompleteType)
return Builder.completeDefinition();
return Builder.getRecordDecl();
}
#ifdef ENABLE_SPIRV_CODEGEN
CXXRecordDecl *hlsl::DeclareInlineSpirvType(clang::ASTContext &context,
clang::DeclContext *declContext,
llvm::StringRef typeName,
bool opaque) {
// template<uint opcode, int size, int alignment> vk::SpirvType { ... }
// template<uint opcode> vk::SpirvOpaqueType { ... }
BuiltinTypeDeclBuilder typeDeclBuilder(declContext, typeName,
clang::TagTypeKind::TTK_Class);
typeDeclBuilder.addIntegerTemplateParam("opcode", context.UnsignedIntTy);
if (!opaque) {
typeDeclBuilder.addIntegerTemplateParam("size", context.UnsignedIntTy);
typeDeclBuilder.addIntegerTemplateParam("alignment", context.UnsignedIntTy);
}
typeDeclBuilder.addTypeTemplateParam("operands", nullptr, true);
typeDeclBuilder.startDefinition();
typeDeclBuilder.addField(
"h", context.UnsignedIntTy); // Add an 'h' field to hold the handle.
return typeDeclBuilder.getRecordDecl();
}
CXXRecordDecl *hlsl::DeclareVkIntegralConstant(
clang::ASTContext &context, clang::DeclContext *declContext,
llvm::StringRef typeName, ClassTemplateDecl **templateDecl) {
// template<typename T, T v> vk::integral_constant { ... }
BuiltinTypeDeclBuilder typeDeclBuilder(declContext, typeName,
clang::TagTypeKind::TTK_Class);
typeDeclBuilder.addTypeTemplateParam("T");
typeDeclBuilder.addIntegerTemplateParam("v", context.UnsignedIntTy);
typeDeclBuilder.startDefinition();
typeDeclBuilder.addField(
"h", context.UnsignedIntTy); // Add an 'h' field to hold the handle.
*templateDecl = typeDeclBuilder.getTemplateDecl();
return typeDeclBuilder.getRecordDecl();
}
#endif
CXXRecordDecl *hlsl::DeclareNodeOutputArray(clang::ASTContext &Ctx,
DXIL::NodeIOKind Type,
CXXRecordDecl *OutputType,
bool IsRecordTypeTemplate) {
StringRef TypeName = HLSLNodeObjectAttr::ConvertRecordTypeToStr(Type);
BuiltinTypeDeclBuilder Builder(Ctx.getTranslationUnitDecl(), TypeName,
TagDecl::TagKind::TTK_Struct);
TemplateTypeParmDecl *elementTemplateParamDecl = nullptr;
if (IsRecordTypeTemplate)
elementTemplateParamDecl = Builder.addTypeTemplateParam("recordtype");
Builder.startDefinition();
Builder.addField("h", GetHLSLObjectHandleType(Ctx));
Builder.getRecordDecl()->addAttr(
HLSLNodeObjectAttr::CreateImplicit(Ctx, Type));
QualType ResultType;
if (IsRecordTypeTemplate) {
QualType elementType = Ctx.getTemplateTypeParmType(
/*templateDepth*/ 0, /*index*/ 0, ParameterPackFalse,
elementTemplateParamDecl);
const clang::Type *nodeOutputTy = OutputType->getTypeForDecl();
TemplateArgument templateArgs[1] = {TemplateArgument(elementType)};
TemplateName canonName = Ctx.getCanonicalTemplateName(
TemplateName(OutputType->getDescribedClassTemplate()));
ResultType = Ctx.getTemplateSpecializationType(canonName, templateArgs,
_countof(templateArgs),
QualType(nodeOutputTy, 0));
} else {
// For Non Template types(like EmptyNodeOutput)
ResultType = Ctx.getTypeDeclType(OutputType);
}
QualType indexType = Ctx.UnsignedIntTy;
auto methodDecl = CreateObjectFunctionDeclarationWithParams(
Ctx, Builder.getRecordDecl(), ResultType, ArrayRef<QualType>(indexType),
ArrayRef<StringRef>(StringRef("index")),
Ctx.DeclarationNames.getCXXOperatorName(OO_Subscript), false);
StringRef OpcodeGroup =
GetHLOpcodeGroupName(HLOpcodeGroup::HLIndexNodeHandle);
unsigned Opcode = static_cast<unsigned>(HLOpcodeGroup::HLIndexNodeHandle);
methodDecl->addAttr(
HLSLIntrinsicAttr::CreateImplicit(Ctx, OpcodeGroup, "", Opcode));
methodDecl->addAttr(HLSLCXXOverloadAttr::CreateImplicit(Ctx));
return Builder.completeDefinition();
}
VarDecl *hlsl::DeclareBuiltinGlobal(llvm::StringRef name, clang::QualType Ty,
clang::ASTContext &context) {
IdentifierInfo &II = context.Idents.get(name);
auto *curDeclCtx = context.getTranslationUnitDecl();
VarDecl *varDecl = VarDecl::Create(
context, curDeclCtx, SourceLocation(), SourceLocation(), &II, Ty,
context.getTrivialTypeSourceInfo(Ty), StorageClass::SC_Extern);
// Mark implicit to avoid print it when rewrite.
varDecl->setImplicit();
curDeclCtx->addDecl(varDecl);
return varDecl;
}
bool hlsl::IsIntrinsicOp(const clang::FunctionDecl *FD) {
return FD != nullptr && FD->hasAttr<HLSLIntrinsicAttr>();
}
bool hlsl::GetIntrinsicOp(const clang::FunctionDecl *FD, unsigned &opcode,
llvm::StringRef &group) {
if (FD == nullptr || !FD->hasAttr<HLSLIntrinsicAttr>()) {
return false;
}
HLSLIntrinsicAttr *A = FD->getAttr<HLSLIntrinsicAttr>();
opcode = A->getOpcode();
group = A->getGroup();
return true;
}
bool hlsl::GetIntrinsicLowering(const clang::FunctionDecl *FD,
llvm::StringRef &S) {
if (FD == nullptr || !FD->hasAttr<HLSLIntrinsicAttr>()) {
return false;
}
HLSLIntrinsicAttr *A = FD->getAttr<HLSLIntrinsicAttr>();
S = A->getLowering();
return true;
}
/// <summary>Parses a column or row digit.</summary>
static bool TryParseColOrRowChar(const char digit, int *count) {
if ('1' <= digit && digit <= '4') {
*count = digit - '0';
return true;
}
*count = 0;
return false;
}
/// <summary>Parses a matrix shorthand identifier (eg, float3x2).</summary>
bool hlsl::TryParseMatrixShorthand(const char *typeName, size_t typeNameLen,
HLSLScalarType *parsedType, int *rowCount,
int *colCount,
const clang::LangOptions &langOptions) {
//
// Matrix shorthand format is PrimitiveTypeRxC, where R is the row count and C
// is the column count. R and C should be between 1 and 4 inclusive. x is a
// literal 'x' character. PrimitiveType is one of the HLSLScalarTypeNames
// values.
//
if (TryParseMatrixOrVectorDimension(typeName, typeNameLen, rowCount, colCount,
langOptions) &&
*rowCount != 0 && *colCount != 0) {
// compare scalar component
HLSLScalarType type =
FindScalarTypeByName(typeName, typeNameLen - 3, langOptions);
if (type != HLSLScalarType_unknown) {
*parsedType = type;
return true;
}
}
// Unable to parse.
return false;
}
/// <summary>Parses a vector shorthand identifier (eg, float3).</summary>
bool hlsl::TryParseVectorShorthand(const char *typeName, size_t typeNameLen,
HLSLScalarType *parsedType,
int *elementCount,
const clang::LangOptions &langOptions) {
// At least *something*N characters necessary, where something is at least
// 'int'
if (TryParseColOrRowChar(typeName[typeNameLen - 1], elementCount)) {
// compare scalar component
HLSLScalarType type =
FindScalarTypeByName(typeName, typeNameLen - 1, langOptions);
if (type != HLSLScalarType_unknown) {
*parsedType = type;
return true;
}
}
// Unable to parse.
return false;
}
/// <summary>Parses a hlsl scalar type (e.g min16float, uint3x4) </summary>
bool hlsl::TryParseScalar(const char *typeName, size_t typeNameLen,
HLSLScalarType *parsedType,
const clang::LangOptions &langOptions) {
HLSLScalarType type =
FindScalarTypeByName(typeName, typeNameLen, langOptions);
if (type != HLSLScalarType_unknown) {
*parsedType = type;
return true;
}
return false; // unable to parse
}
/// <summary>Parse any (scalar, vector, matrix) hlsl types (e.g float, int3x4,
/// uint2) </summary>
bool hlsl::TryParseAny(const char *typeName, size_t typeNameLen,
HLSLScalarType *parsedType, int *rowCount, int *colCount,
const clang::LangOptions &langOptions) {
// at least 'int'
const size_t MinValidLen = 3;
if (typeNameLen >= MinValidLen) {
TryParseMatrixOrVectorDimension(typeName, typeNameLen, rowCount, colCount,
langOptions);
int suffixLen = *colCount == 0 ? 0 : *rowCount == 0 ? 1 : 3;
HLSLScalarType type =
FindScalarTypeByName(typeName, typeNameLen - suffixLen, langOptions);
if (type != HLSLScalarType_unknown) {
*parsedType = type;
return true;
}
}
return false;
}
/// <summary>Parse string hlsl type</summary>
bool hlsl::TryParseString(const char *typeName, size_t typeNameLen,
const clang::LangOptions &langOptions) {
if (typeNameLen == 6 && typeName[0] == 's' &&
strncmp(typeName, "string", 6) == 0) {
return true;
}
return false;
}
/// <summary>Parse any kind of dimension for vector or matrix (e.g 4,3 in
/// int4x3). If it's a matrix type, rowCount and colCount will be nonzero. If
/// it's a vector type, colCount is 0. Otherwise both rowCount and colCount is
/// 0. Returns true if either matrix or vector dimensions detected. </summary>
bool hlsl::TryParseMatrixOrVectorDimension(
const char *typeName, size_t typeNameLen, int *rowCount, int *colCount,
const clang::LangOptions &langOptions) {
*rowCount = 0;
*colCount = 0;
size_t MinValidLen = 3; // at least int
if (typeNameLen > MinValidLen) {
if (TryParseColOrRowChar(typeName[typeNameLen - 1], colCount)) {
// Try parse matrix
if (typeName[typeNameLen - 2] == 'x')
TryParseColOrRowChar(typeName[typeNameLen - 3], rowCount);
return true;
}
}
return false;
}
/// <summary>Creates a typedef for a matrix shorthand (eg, float3x2).</summary>
TypedefDecl *hlsl::CreateMatrixSpecializationShorthand(
ASTContext &context, QualType matrixSpecialization,
HLSLScalarType scalarType, size_t rowCount, size_t colCount) {
DXASSERT(rowCount <= 4, "else caller didn't validate rowCount");
DXASSERT(colCount <= 4, "else caller didn't validate colCount");
char typeName[64];
sprintf_s(typeName, _countof(typeName), "%s%ux%u",
HLSLScalarTypeNames[scalarType], (unsigned)rowCount,
(unsigned)colCount);
IdentifierInfo &typedefId =
context.Idents.get(StringRef(typeName), tok::TokenKind::identifier);
DeclContext *currentDeclContext = context.getTranslationUnitDecl();
TypedefDecl *decl = TypedefDecl::Create(
context, currentDeclContext, NoLoc, NoLoc, &typedefId,
context.getTrivialTypeSourceInfo(matrixSpecialization, NoLoc));
decl->setImplicit(true);
currentDeclContext->addDecl(decl);
return decl;
}
/// <summary>Creates a typedef for a vector shorthand (eg, float3).</summary>
TypedefDecl *hlsl::CreateVectorSpecializationShorthand(
ASTContext &context, QualType vectorSpecialization,
HLSLScalarType scalarType, size_t colCount) {
DXASSERT(colCount <= 4, "else caller didn't validate colCount");
char typeName[64];
sprintf_s(typeName, _countof(typeName), "%s%u",
HLSLScalarTypeNames[scalarType], (unsigned)colCount);
IdentifierInfo &typedefId =
context.Idents.get(StringRef(typeName), tok::TokenKind::identifier);
DeclContext *currentDeclContext = context.getTranslationUnitDecl();
TypedefDecl *decl = TypedefDecl::Create(
context, currentDeclContext, NoLoc, NoLoc, &typedefId,
context.getTrivialTypeSourceInfo(vectorSpecialization, NoLoc));
decl->setImplicit(true);
currentDeclContext->addDecl(decl);
return decl;
}
llvm::ArrayRef<hlsl::UnusualAnnotation *>
hlsl::UnusualAnnotation::CopyToASTContextArray(clang::ASTContext &Context,
hlsl::UnusualAnnotation **begin,
size_t count) {
if (count == 0) {
return llvm::ArrayRef<hlsl::UnusualAnnotation *>();
}
UnusualAnnotation **arr = ::new (Context) UnusualAnnotation *[count];
for (size_t i = 0; i < count; ++i) {
arr[i] = begin[i]->CopyToASTContext(Context);
}
return llvm::ArrayRef<hlsl::UnusualAnnotation *>(arr, count);
}
UnusualAnnotation *
hlsl::UnusualAnnotation::CopyToASTContext(ASTContext &Context) {
// All UnusualAnnotation instances can be blitted.
size_t instanceSize;
switch (Kind) {
case UA_RegisterAssignment:
instanceSize = sizeof(hlsl::RegisterAssignment);
break;
case UA_ConstantPacking:
instanceSize = sizeof(hlsl::ConstantPacking);
break;
case UA_PayloadAccessQualifier:
instanceSize = sizeof(hlsl::PayloadAccessAnnotation);
break;
default:
DXASSERT(Kind == UA_SemanticDecl,
"Kind == UA_SemanticDecl -- otherwise switch is incomplete");
instanceSize = sizeof(hlsl::SemanticDecl);
break;
}
void *result = Context.Allocate(instanceSize);
memcpy(result, this, instanceSize);
return (UnusualAnnotation *)result;
}
bool ASTContext::IsPatchConstantFunctionDecl(const FunctionDecl *FD) const {
return hlsl::IsPatchConstantFunctionDecl(FD);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CommentLexer.cpp |
#include "clang/AST/CommentLexer.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentDiagnostic.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
namespace clang {
namespace comments {
void Token::dump(const Lexer &L, const SourceManager &SM) const {
llvm::errs() << "comments::Token Kind=" << Kind << " ";
Loc.dump(SM);
llvm::errs() << " " << Length << " \"" << L.getSpelling(*this, SM) << "\"\n";
}
static inline bool isHTMLNamedCharacterReferenceCharacter(char C) {
return isLetter(C);
}
static inline bool isHTMLDecimalCharacterReferenceCharacter(char C) {
return isDigit(C);
}
static inline bool isHTMLHexCharacterReferenceCharacter(char C) {
return isHexDigit(C);
}
static inline StringRef convertCodePointToUTF8(
llvm::BumpPtrAllocator &Allocator,
unsigned CodePoint) {
char *Resolved = Allocator.Allocate<char>(UNI_MAX_UTF8_BYTES_PER_CODE_POINT);
char *ResolvedPtr = Resolved;
if (llvm::ConvertCodePointToUTF8(CodePoint, ResolvedPtr))
return StringRef(Resolved, ResolvedPtr - Resolved);
else
return StringRef();
}
namespace {
#include "clang/AST/CommentHTMLTags.inc"
#include "clang/AST/CommentHTMLNamedCharacterReferences.inc"
} // unnamed namespace
StringRef Lexer::resolveHTMLNamedCharacterReference(StringRef Name) const {
// Fast path, first check a few most widely used named character references.
return llvm::StringSwitch<StringRef>(Name)
.Case("amp", "&")
.Case("lt", "<")
.Case("gt", ">")
.Case("quot", "\"")
.Case("apos", "\'")
// Slow path.
.Default(translateHTMLNamedCharacterReferenceToUTF8(Name));
}
StringRef Lexer::resolveHTMLDecimalCharacterReference(StringRef Name) const {
unsigned CodePoint = 0;
for (unsigned i = 0, e = Name.size(); i != e; ++i) {
assert(isHTMLDecimalCharacterReferenceCharacter(Name[i]));
CodePoint *= 10;
CodePoint += Name[i] - '0';
}
return convertCodePointToUTF8(Allocator, CodePoint);
}
StringRef Lexer::resolveHTMLHexCharacterReference(StringRef Name) const {
unsigned CodePoint = 0;
for (unsigned i = 0, e = Name.size(); i != e; ++i) {
CodePoint *= 16;
const char C = Name[i];
assert(isHTMLHexCharacterReferenceCharacter(C));
CodePoint += llvm::hexDigitValue(C);
}
return convertCodePointToUTF8(Allocator, CodePoint);
}
void Lexer::skipLineStartingDecorations() {
// This function should be called only for C comments
assert(CommentState == LCS_InsideCComment);
if (BufferPtr == CommentEnd)
return;
switch (*BufferPtr) {
case ' ':
case '\t':
case '\f':
case '\v': {
const char *NewBufferPtr = BufferPtr;
NewBufferPtr++;
if (NewBufferPtr == CommentEnd)
return;
char C = *NewBufferPtr;
while (isHorizontalWhitespace(C)) {
NewBufferPtr++;
if (NewBufferPtr == CommentEnd)
return;
C = *NewBufferPtr;
}
if (C == '*')
BufferPtr = NewBufferPtr + 1;
break;
}
case '*':
BufferPtr++;
break;
}
}
namespace {
/// Returns pointer to the first newline character in the string.
const char *findNewline(const char *BufferPtr, const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (isVerticalWhitespace(*BufferPtr))
return BufferPtr;
}
return BufferEnd;
}
const char *skipNewline(const char *BufferPtr, const char *BufferEnd) {
if (BufferPtr == BufferEnd)
return BufferPtr;
if (*BufferPtr == '\n')
BufferPtr++;
else {
assert(*BufferPtr == '\r');
BufferPtr++;
if (BufferPtr != BufferEnd && *BufferPtr == '\n')
BufferPtr++;
}
return BufferPtr;
}
const char *skipNamedCharacterReference(const char *BufferPtr,
const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (!isHTMLNamedCharacterReferenceCharacter(*BufferPtr))
return BufferPtr;
}
return BufferEnd;
}
const char *skipDecimalCharacterReference(const char *BufferPtr,
const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (!isHTMLDecimalCharacterReferenceCharacter(*BufferPtr))
return BufferPtr;
}
return BufferEnd;
}
const char *skipHexCharacterReference(const char *BufferPtr,
const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (!isHTMLHexCharacterReferenceCharacter(*BufferPtr))
return BufferPtr;
}
return BufferEnd;
}
bool isHTMLIdentifierStartingCharacter(char C) {
return isLetter(C);
}
bool isHTMLIdentifierCharacter(char C) {
return isAlphanumeric(C);
}
const char *skipHTMLIdentifier(const char *BufferPtr, const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (!isHTMLIdentifierCharacter(*BufferPtr))
return BufferPtr;
}
return BufferEnd;
}
/// Skip HTML string quoted in single or double quotes. Escaping quotes inside
/// string allowed.
///
/// Returns pointer to closing quote.
const char *skipHTMLQuotedString(const char *BufferPtr, const char *BufferEnd)
{
const char Quote = *BufferPtr;
assert(Quote == '\"' || Quote == '\'');
BufferPtr++;
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
const char C = *BufferPtr;
if (C == Quote && BufferPtr[-1] != '\\')
return BufferPtr;
}
return BufferEnd;
}
const char *skipWhitespace(const char *BufferPtr, const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (!isWhitespace(*BufferPtr))
return BufferPtr;
}
return BufferEnd;
}
bool isWhitespace(const char *BufferPtr, const char *BufferEnd) {
return skipWhitespace(BufferPtr, BufferEnd) == BufferEnd;
}
bool isCommandNameStartCharacter(char C) {
return isLetter(C);
}
bool isCommandNameCharacter(char C) {
return isAlphanumeric(C);
}
const char *skipCommandName(const char *BufferPtr, const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (!isCommandNameCharacter(*BufferPtr))
return BufferPtr;
}
return BufferEnd;
}
/// Return the one past end pointer for BCPL comments.
/// Handles newlines escaped with backslash or trigraph for backslahs.
const char *findBCPLCommentEnd(const char *BufferPtr, const char *BufferEnd) {
const char *CurPtr = BufferPtr;
while (CurPtr != BufferEnd) {
while (!isVerticalWhitespace(*CurPtr)) {
CurPtr++;
if (CurPtr == BufferEnd)
return BufferEnd;
}
// We found a newline, check if it is escaped.
const char *EscapePtr = CurPtr - 1;
while(isHorizontalWhitespace(*EscapePtr))
EscapePtr--;
if (*EscapePtr == '\\' ||
(EscapePtr - 2 >= BufferPtr && EscapePtr[0] == '/' &&
EscapePtr[-1] == '?' && EscapePtr[-2] == '?')) {
// We found an escaped newline.
CurPtr = skipNewline(CurPtr, BufferEnd);
} else
return CurPtr; // Not an escaped newline.
}
return BufferEnd;
}
/// Return the one past end pointer for C comments.
/// Very dumb, does not handle escaped newlines or trigraphs.
const char *findCCommentEnd(const char *BufferPtr, const char *BufferEnd) {
for ( ; BufferPtr != BufferEnd; ++BufferPtr) {
if (*BufferPtr == '*') {
assert(BufferPtr + 1 != BufferEnd);
if (*(BufferPtr + 1) == '/')
return BufferPtr;
}
}
llvm_unreachable("buffer end hit before '*/' was seen");
}
} // unnamed namespace
void Lexer::formTokenWithChars(Token &Result, const char *TokEnd,
tok::TokenKind Kind) {
const unsigned TokLen = TokEnd - BufferPtr;
Result.setLocation(getSourceLocation(BufferPtr));
Result.setKind(Kind);
Result.setLength(TokLen);
#ifndef NDEBUG
Result.TextPtr = "<UNSET>";
Result.IntVal = 7;
#endif
BufferPtr = TokEnd;
}
void Lexer::lexCommentText(Token &T) {
assert(CommentState == LCS_InsideBCPLComment ||
CommentState == LCS_InsideCComment);
switch (State) {
case LS_Normal:
break;
case LS_VerbatimBlockFirstLine:
lexVerbatimBlockFirstLine(T);
return;
case LS_VerbatimBlockBody:
lexVerbatimBlockBody(T);
return;
case LS_VerbatimLineText:
lexVerbatimLineText(T);
return;
case LS_HTMLStartTag:
lexHTMLStartTag(T);
return;
case LS_HTMLEndTag:
lexHTMLEndTag(T);
return;
}
assert(State == LS_Normal);
const char *TokenPtr = BufferPtr;
assert(TokenPtr < CommentEnd);
while (TokenPtr != CommentEnd) {
switch(*TokenPtr) {
case '\\':
case '@': {
// Commands that start with a backslash and commands that start with
// 'at' have equivalent semantics. But we keep information about the
// exact syntax in AST for comments.
tok::TokenKind CommandKind =
(*TokenPtr == '@') ? tok::at_command : tok::backslash_command;
TokenPtr++;
if (TokenPtr == CommentEnd) {
formTextToken(T, TokenPtr);
return;
}
char C = *TokenPtr;
switch (C) {
default:
break;
case '\\': case '@': case '&': case '$':
case '#': case '<': case '>': case '%':
case '\"': case '.': case ':':
// This is one of \\ \@ \& \$ etc escape sequences.
TokenPtr++;
if (C == ':' && TokenPtr != CommentEnd && *TokenPtr == ':') {
// This is the \:: escape sequence.
TokenPtr++;
}
StringRef UnescapedText(BufferPtr + 1, TokenPtr - (BufferPtr + 1));
formTokenWithChars(T, TokenPtr, tok::text);
T.setText(UnescapedText);
return;
}
// Don't make zero-length commands.
if (!isCommandNameStartCharacter(*TokenPtr)) {
formTextToken(T, TokenPtr);
return;
}
TokenPtr = skipCommandName(TokenPtr, CommentEnd);
unsigned Length = TokenPtr - (BufferPtr + 1);
// Hardcoded support for lexing LaTeX formula commands
// \f$ \f[ \f] \f{ \f} as a single command.
if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) {
C = *TokenPtr;
if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') {
TokenPtr++;
Length++;
}
}
StringRef CommandName(BufferPtr + 1, Length);
const CommandInfo *Info = Traits.getCommandInfoOrNULL(CommandName);
if (!Info) {
if ((Info = Traits.getTypoCorrectCommandInfo(CommandName))) {
StringRef CorrectedName = Info->Name;
SourceLocation Loc = getSourceLocation(BufferPtr);
SourceRange CommandRange(Loc.getLocWithOffset(1),
getSourceLocation(TokenPtr));
Diag(Loc, diag::warn_correct_comment_command_name)
<< CommandName << CorrectedName
<< FixItHint::CreateReplacement(CommandRange, CorrectedName);
} else {
formTokenWithChars(T, TokenPtr, tok::unknown_command);
T.setUnknownCommandName(CommandName);
Diag(T.getLocation(), diag::warn_unknown_comment_command_name);
return;
}
}
if (Info->IsVerbatimBlockCommand) {
setupAndLexVerbatimBlock(T, TokenPtr, *BufferPtr, Info);
return;
}
if (Info->IsVerbatimLineCommand) {
setupAndLexVerbatimLine(T, TokenPtr, Info);
return;
}
formTokenWithChars(T, TokenPtr, CommandKind);
T.setCommandID(Info->getID());
return;
}
case '&':
lexHTMLCharacterReference(T);
return;
case '<': {
TokenPtr++;
if (TokenPtr == CommentEnd) {
formTextToken(T, TokenPtr);
return;
}
const char C = *TokenPtr;
if (isHTMLIdentifierStartingCharacter(C))
setupAndLexHTMLStartTag(T);
else if (C == '/')
setupAndLexHTMLEndTag(T);
else
formTextToken(T, TokenPtr);
return;
}
case '\n':
case '\r':
TokenPtr = skipNewline(TokenPtr, CommentEnd);
formTokenWithChars(T, TokenPtr, tok::newline);
if (CommentState == LCS_InsideCComment)
skipLineStartingDecorations();
return;
default: {
size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr).
find_first_of("\n\r\\@&<");
if (End != StringRef::npos)
TokenPtr += End;
else
TokenPtr = CommentEnd;
formTextToken(T, TokenPtr);
return;
}
}
}
}
void Lexer::setupAndLexVerbatimBlock(Token &T,
const char *TextBegin,
char Marker, const CommandInfo *Info) {
assert(Info->IsVerbatimBlockCommand);
VerbatimBlockEndCommandName.clear();
VerbatimBlockEndCommandName.append(Marker == '\\' ? "\\" : "@");
VerbatimBlockEndCommandName.append(Info->EndCommandName);
formTokenWithChars(T, TextBegin, tok::verbatim_block_begin);
T.setVerbatimBlockID(Info->getID());
// If there is a newline following the verbatim opening command, skip the
// newline so that we don't create an tok::verbatim_block_line with empty
// text content.
if (BufferPtr != CommentEnd &&
isVerticalWhitespace(*BufferPtr)) {
BufferPtr = skipNewline(BufferPtr, CommentEnd);
State = LS_VerbatimBlockBody;
return;
}
State = LS_VerbatimBlockFirstLine;
}
void Lexer::lexVerbatimBlockFirstLine(Token &T) {
again:
assert(BufferPtr < CommentEnd);
// FIXME: It would be better to scan the text once, finding either the block
// end command or newline.
//
// Extract current line.
const char *Newline = findNewline(BufferPtr, CommentEnd);
StringRef Line(BufferPtr, Newline - BufferPtr);
// Look for end command in current line.
size_t Pos = Line.find(VerbatimBlockEndCommandName);
const char *TextEnd;
const char *NextLine;
if (Pos == StringRef::npos) {
// Current line is completely verbatim.
TextEnd = Newline;
NextLine = skipNewline(Newline, CommentEnd);
} else if (Pos == 0) {
// Current line contains just an end command.
const char *End = BufferPtr + VerbatimBlockEndCommandName.size();
StringRef Name(BufferPtr + 1, End - (BufferPtr + 1));
formTokenWithChars(T, End, tok::verbatim_block_end);
T.setVerbatimBlockID(Traits.getCommandInfo(Name)->getID());
State = LS_Normal;
return;
} else {
// There is some text, followed by end command. Extract text first.
TextEnd = BufferPtr + Pos;
NextLine = TextEnd;
// If there is only whitespace before end command, skip whitespace.
if (isWhitespace(BufferPtr, TextEnd)) {
BufferPtr = TextEnd;
goto again;
}
}
StringRef Text(BufferPtr, TextEnd - BufferPtr);
formTokenWithChars(T, NextLine, tok::verbatim_block_line);
T.setVerbatimBlockText(Text);
State = LS_VerbatimBlockBody;
}
void Lexer::lexVerbatimBlockBody(Token &T) {
assert(State == LS_VerbatimBlockBody);
if (CommentState == LCS_InsideCComment)
skipLineStartingDecorations();
if (BufferPtr == CommentEnd) {
formTokenWithChars(T, BufferPtr, tok::verbatim_block_line);
T.setVerbatimBlockText("");
return;
}
lexVerbatimBlockFirstLine(T);
}
void Lexer::setupAndLexVerbatimLine(Token &T, const char *TextBegin,
const CommandInfo *Info) {
assert(Info->IsVerbatimLineCommand);
formTokenWithChars(T, TextBegin, tok::verbatim_line_name);
T.setVerbatimLineID(Info->getID());
State = LS_VerbatimLineText;
}
void Lexer::lexVerbatimLineText(Token &T) {
assert(State == LS_VerbatimLineText);
// Extract current line.
const char *Newline = findNewline(BufferPtr, CommentEnd);
StringRef Text(BufferPtr, Newline - BufferPtr);
formTokenWithChars(T, Newline, tok::verbatim_line_text);
T.setVerbatimLineText(Text);
State = LS_Normal;
}
void Lexer::lexHTMLCharacterReference(Token &T) {
const char *TokenPtr = BufferPtr;
assert(*TokenPtr == '&');
TokenPtr++;
if (TokenPtr == CommentEnd) {
formTextToken(T, TokenPtr);
return;
}
const char *NamePtr;
bool isNamed = false;
bool isDecimal = false;
char C = *TokenPtr;
if (isHTMLNamedCharacterReferenceCharacter(C)) {
NamePtr = TokenPtr;
TokenPtr = skipNamedCharacterReference(TokenPtr, CommentEnd);
isNamed = true;
} else if (C == '#') {
TokenPtr++;
if (TokenPtr == CommentEnd) {
formTextToken(T, TokenPtr);
return;
}
C = *TokenPtr;
if (isHTMLDecimalCharacterReferenceCharacter(C)) {
NamePtr = TokenPtr;
TokenPtr = skipDecimalCharacterReference(TokenPtr, CommentEnd);
isDecimal = true;
} else if (C == 'x' || C == 'X') {
TokenPtr++;
NamePtr = TokenPtr;
TokenPtr = skipHexCharacterReference(TokenPtr, CommentEnd);
} else {
formTextToken(T, TokenPtr);
return;
}
} else {
formTextToken(T, TokenPtr);
return;
}
if (NamePtr == TokenPtr || TokenPtr == CommentEnd ||
*TokenPtr != ';') {
formTextToken(T, TokenPtr);
return;
}
StringRef Name(NamePtr, TokenPtr - NamePtr);
TokenPtr++; // Skip semicolon.
StringRef Resolved;
if (isNamed)
Resolved = resolveHTMLNamedCharacterReference(Name);
else if (isDecimal)
Resolved = resolveHTMLDecimalCharacterReference(Name);
else
Resolved = resolveHTMLHexCharacterReference(Name);
if (Resolved.empty()) {
formTextToken(T, TokenPtr);
return;
}
formTokenWithChars(T, TokenPtr, tok::text);
T.setText(Resolved);
return;
}
void Lexer::setupAndLexHTMLStartTag(Token &T) {
assert(BufferPtr[0] == '<' &&
isHTMLIdentifierStartingCharacter(BufferPtr[1]));
const char *TagNameEnd = skipHTMLIdentifier(BufferPtr + 2, CommentEnd);
StringRef Name(BufferPtr + 1, TagNameEnd - (BufferPtr + 1));
if (!isHTMLTagName(Name)) {
formTextToken(T, TagNameEnd);
return;
}
formTokenWithChars(T, TagNameEnd, tok::html_start_tag);
T.setHTMLTagStartName(Name);
BufferPtr = skipWhitespace(BufferPtr, CommentEnd);
const char C = *BufferPtr;
if (BufferPtr != CommentEnd &&
(C == '>' || C == '/' || isHTMLIdentifierStartingCharacter(C)))
State = LS_HTMLStartTag;
}
void Lexer::lexHTMLStartTag(Token &T) {
assert(State == LS_HTMLStartTag);
const char *TokenPtr = BufferPtr;
char C = *TokenPtr;
if (isHTMLIdentifierCharacter(C)) {
TokenPtr = skipHTMLIdentifier(TokenPtr, CommentEnd);
StringRef Ident(BufferPtr, TokenPtr - BufferPtr);
formTokenWithChars(T, TokenPtr, tok::html_ident);
T.setHTMLIdent(Ident);
} else {
switch (C) {
case '=':
TokenPtr++;
formTokenWithChars(T, TokenPtr, tok::html_equals);
break;
case '\"':
case '\'': {
const char *OpenQuote = TokenPtr;
TokenPtr = skipHTMLQuotedString(TokenPtr, CommentEnd);
const char *ClosingQuote = TokenPtr;
if (TokenPtr != CommentEnd) // Skip closing quote.
TokenPtr++;
formTokenWithChars(T, TokenPtr, tok::html_quoted_string);
T.setHTMLQuotedString(StringRef(OpenQuote + 1,
ClosingQuote - (OpenQuote + 1)));
break;
}
case '>':
TokenPtr++;
formTokenWithChars(T, TokenPtr, tok::html_greater);
State = LS_Normal;
return;
case '/':
TokenPtr++;
if (TokenPtr != CommentEnd && *TokenPtr == '>') {
TokenPtr++;
formTokenWithChars(T, TokenPtr, tok::html_slash_greater);
} else
formTextToken(T, TokenPtr);
State = LS_Normal;
return;
}
}
// Now look ahead and return to normal state if we don't see any HTML tokens
// ahead.
BufferPtr = skipWhitespace(BufferPtr, CommentEnd);
if (BufferPtr == CommentEnd) {
State = LS_Normal;
return;
}
C = *BufferPtr;
if (!isHTMLIdentifierStartingCharacter(C) &&
C != '=' && C != '\"' && C != '\'' && C != '>') {
State = LS_Normal;
return;
}
}
void Lexer::setupAndLexHTMLEndTag(Token &T) {
assert(BufferPtr[0] == '<' && BufferPtr[1] == '/');
const char *TagNameBegin = skipWhitespace(BufferPtr + 2, CommentEnd);
const char *TagNameEnd = skipHTMLIdentifier(TagNameBegin, CommentEnd);
StringRef Name(TagNameBegin, TagNameEnd - TagNameBegin);
if (!isHTMLTagName(Name)) {
formTextToken(T, TagNameEnd);
return;
}
const char *End = skipWhitespace(TagNameEnd, CommentEnd);
formTokenWithChars(T, End, tok::html_end_tag);
T.setHTMLTagEndName(Name);
if (BufferPtr != CommentEnd && *BufferPtr == '>')
State = LS_HTMLEndTag;
}
void Lexer::lexHTMLEndTag(Token &T) {
assert(BufferPtr != CommentEnd && *BufferPtr == '>');
formTokenWithChars(T, BufferPtr + 1, tok::html_greater);
State = LS_Normal;
}
Lexer::Lexer(llvm::BumpPtrAllocator &Allocator, DiagnosticsEngine &Diags,
const CommandTraits &Traits,
SourceLocation FileLoc,
const char *BufferStart, const char *BufferEnd):
Allocator(Allocator), Diags(Diags), Traits(Traits),
BufferStart(BufferStart), BufferEnd(BufferEnd),
FileLoc(FileLoc), BufferPtr(BufferStart),
CommentState(LCS_BeforeComment), State(LS_Normal) {
}
void Lexer::lex(Token &T) {
again:
switch (CommentState) {
case LCS_BeforeComment:
if (BufferPtr == BufferEnd) {
formTokenWithChars(T, BufferPtr, tok::eof);
return;
}
assert(*BufferPtr == '/');
BufferPtr++; // Skip first slash.
switch(*BufferPtr) {
case '/': { // BCPL comment.
BufferPtr++; // Skip second slash.
if (BufferPtr != BufferEnd) {
// Skip Doxygen magic marker, if it is present.
// It might be missing because of a typo //< or /*<, or because we
// merged this non-Doxygen comment into a bunch of Doxygen comments
// around it: /** ... */ /* ... */ /** ... */
const char C = *BufferPtr;
if (C == '/' || C == '!')
BufferPtr++;
}
// Skip less-than symbol that marks trailing comments.
// Skip it even if the comment is not a Doxygen one, because //< and /*<
// are frequent typos.
if (BufferPtr != BufferEnd && *BufferPtr == '<')
BufferPtr++;
CommentState = LCS_InsideBCPLComment;
if (State != LS_VerbatimBlockBody && State != LS_VerbatimBlockFirstLine)
State = LS_Normal;
CommentEnd = findBCPLCommentEnd(BufferPtr, BufferEnd);
goto again;
}
case '*': { // C comment.
BufferPtr++; // Skip star.
// Skip Doxygen magic marker.
const char C = *BufferPtr;
if ((C == '*' && *(BufferPtr + 1) != '/') || C == '!')
BufferPtr++;
// Skip less-than symbol that marks trailing comments.
if (BufferPtr != BufferEnd && *BufferPtr == '<')
BufferPtr++;
CommentState = LCS_InsideCComment;
State = LS_Normal;
CommentEnd = findCCommentEnd(BufferPtr, BufferEnd);
goto again;
}
default:
llvm_unreachable("second character of comment should be '/' or '*'");
}
case LCS_BetweenComments: {
// Consecutive comments are extracted only if there is only whitespace
// between them. So we can search for the start of the next comment.
const char *EndWhitespace = BufferPtr;
while(EndWhitespace != BufferEnd && *EndWhitespace != '/')
EndWhitespace++;
// Turn any whitespace between comments (and there is only whitespace
// between them -- guaranteed by comment extraction) into a newline. We
// have two newlines between C comments in total (first one was synthesized
// after a comment).
formTokenWithChars(T, EndWhitespace, tok::newline);
CommentState = LCS_BeforeComment;
break;
}
case LCS_InsideBCPLComment:
case LCS_InsideCComment:
if (BufferPtr != CommentEnd) {
lexCommentText(T);
break;
} else {
// Skip C comment closing sequence.
if (CommentState == LCS_InsideCComment) {
assert(BufferPtr[0] == '*' && BufferPtr[1] == '/');
BufferPtr += 2;
assert(BufferPtr <= BufferEnd);
// Synthenize newline just after the C comment, regardless if there is
// actually a newline.
formTokenWithChars(T, BufferPtr, tok::newline);
CommentState = LCS_BetweenComments;
break;
} else {
// Don't synthesized a newline after BCPL comment.
CommentState = LCS_BetweenComments;
goto again;
}
}
}
}
StringRef Lexer::getSpelling(const Token &Tok,
const SourceManager &SourceMgr,
bool *Invalid) const {
SourceLocation Loc = Tok.getLocation();
std::pair<FileID, unsigned> LocInfo = SourceMgr.getDecomposedLoc(Loc);
bool InvalidTemp = false;
StringRef File = SourceMgr.getBufferData(LocInfo.first, &InvalidTemp);
if (InvalidTemp) {
*Invalid = true;
return StringRef();
}
const char *Begin = File.data() + LocInfo.second;
return StringRef(Begin, Tok.getLength());
}
} // end namespace comments
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/TemplateBase.cpp | //===--- TemplateBase.cpp - Common template AST class implementation ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements common classes used throughout C++ template
// representations.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/TemplateBase.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Diagnostic.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
/// \brief Print a template integral argument value.
///
/// \param TemplArg the TemplateArgument instance to print.
///
/// \param Out the raw_ostream instance to use for printing.
///
/// \param Policy the printing policy for EnumConstantDecl printing.
static void printIntegral(const TemplateArgument &TemplArg,
raw_ostream &Out, const PrintingPolicy& Policy) {
const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr();
const llvm::APSInt &Val = TemplArg.getAsIntegral();
if (const EnumType *ET = T->getAs<EnumType>()) {
for (const EnumConstantDecl* ECD : ET->getDecl()->enumerators()) {
// In Sema::CheckTemplateArugment, enum template arguments value are
// extended to the size of the integer underlying the enum type. This
// may create a size difference between the enum value and template
// argument value, requiring isSameValue here instead of operator==.
if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) {
ECD->printQualifiedName(Out, Policy);
return;
}
}
}
if (T->isBooleanType()) {
Out << (Val.getBoolValue() ? "true" : "false");
} else if (T->isCharType()) {
const char Ch = Val.getZExtValue();
Out << ((Ch == '\'') ? "'\\" : "'");
Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true);
Out << "'";
} else {
Out << Val;
}
}
//===----------------------------------------------------------------------===//
// TemplateArgument Implementation
//===----------------------------------------------------------------------===//
TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value,
QualType Type) {
Integer.Kind = Integral;
// Copy the APSInt value into our decomposed form.
Integer.BitWidth = Value.getBitWidth();
Integer.IsUnsigned = Value.isUnsigned();
// If the value is large, we have to get additional memory from the ASTContext
unsigned NumWords = Value.getNumWords();
if (NumWords > 1) {
void *Mem = Ctx.Allocate(NumWords * sizeof(uint64_t));
std::memcpy(Mem, Value.getRawData(), NumWords * sizeof(uint64_t));
Integer.pVal = static_cast<uint64_t *>(Mem);
} else {
Integer.VAL = Value.getZExtValue();
}
Integer.Type = Type.getAsOpaquePtr();
}
TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context,
const TemplateArgument *Args,
unsigned NumArgs) {
if (NumArgs == 0)
return getEmptyPack();
TemplateArgument *Storage = new (Context) TemplateArgument [NumArgs];
std::copy(Args, Args + NumArgs, Storage);
return TemplateArgument(Storage, NumArgs);
}
bool TemplateArgument::isDependent() const {
switch (getKind()) {
case Null:
llvm_unreachable("Should not have a NULL template argument");
case Type:
return getAsType()->isDependentType() ||
isa<PackExpansionType>(getAsType());
case Template:
return getAsTemplate().isDependent();
case TemplateExpansion:
return true;
case Declaration:
if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
return DC->isDependentContext();
return getAsDecl()->getDeclContext()->isDependentContext();
case NullPtr:
return false;
case Integral:
// Never dependent
return false;
case Expression:
return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent() ||
isa<PackExpansionExpr>(getAsExpr()));
case Pack:
for (const auto &P : pack_elements())
if (P.isDependent())
return true;
return false;
}
llvm_unreachable("Invalid TemplateArgument Kind!");
}
bool TemplateArgument::isInstantiationDependent() const {
switch (getKind()) {
case Null:
llvm_unreachable("Should not have a NULL template argument");
case Type:
return getAsType()->isInstantiationDependentType();
case Template:
return getAsTemplate().isInstantiationDependent();
case TemplateExpansion:
return true;
case Declaration:
if (DeclContext *DC = dyn_cast<DeclContext>(getAsDecl()))
return DC->isDependentContext();
return getAsDecl()->getDeclContext()->isDependentContext();
case NullPtr:
return false;
case Integral:
// Never dependent
return false;
case Expression:
return getAsExpr()->isInstantiationDependent();
case Pack:
for (const auto &P : pack_elements())
if (P.isInstantiationDependent())
return true;
return false;
}
llvm_unreachable("Invalid TemplateArgument Kind!");
}
bool TemplateArgument::isPackExpansion() const {
switch (getKind()) {
case Null:
case Declaration:
case Integral:
case Pack:
case Template:
case NullPtr:
return false;
case TemplateExpansion:
return true;
case Type:
return isa<PackExpansionType>(getAsType());
case Expression:
return isa<PackExpansionExpr>(getAsExpr());
}
llvm_unreachable("Invalid TemplateArgument Kind!");
}
bool TemplateArgument::containsUnexpandedParameterPack() const {
switch (getKind()) {
case Null:
case Declaration:
case Integral:
case TemplateExpansion:
case NullPtr:
break;
case Type:
if (getAsType()->containsUnexpandedParameterPack())
return true;
break;
case Template:
if (getAsTemplate().containsUnexpandedParameterPack())
return true;
break;
case Expression:
if (getAsExpr()->containsUnexpandedParameterPack())
return true;
break;
case Pack:
for (const auto &P : pack_elements())
if (P.containsUnexpandedParameterPack())
return true;
break;
}
return false;
}
Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
assert(getKind() == TemplateExpansion);
if (TemplateArg.NumExpansions)
return TemplateArg.NumExpansions - 1;
return None;
}
void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context) const {
ID.AddInteger(getKind());
switch (getKind()) {
case Null:
break;
case Type:
getAsType().Profile(ID);
break;
case NullPtr:
getNullPtrType().Profile(ID);
break;
case Declaration:
ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : nullptr);
break;
case Template:
case TemplateExpansion: {
TemplateName Template = getAsTemplateOrTemplatePattern();
if (TemplateTemplateParmDecl *TTP
= dyn_cast_or_null<TemplateTemplateParmDecl>(
Template.getAsTemplateDecl())) {
ID.AddBoolean(true);
ID.AddInteger(TTP->getDepth());
ID.AddInteger(TTP->getPosition());
ID.AddBoolean(TTP->isParameterPack());
} else {
ID.AddBoolean(false);
ID.AddPointer(Context.getCanonicalTemplateName(Template)
.getAsVoidPointer());
}
break;
}
case Integral:
getAsIntegral().Profile(ID);
getIntegralType().Profile(ID);
break;
case Expression:
getAsExpr()->Profile(ID, Context, true);
break;
case Pack:
ID.AddInteger(Args.NumArgs);
for (unsigned I = 0; I != Args.NumArgs; ++I)
Args.Args[I].Profile(ID, Context);
}
}
bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
if (getKind() != Other.getKind()) return false;
switch (getKind()) {
case Null:
case Type:
case Expression:
case Template:
case TemplateExpansion:
case NullPtr:
return TypeOrValue.V == Other.TypeOrValue.V;
case Declaration:
return getAsDecl() == Other.getAsDecl();
case Integral:
return getIntegralType() == Other.getIntegralType() &&
getAsIntegral() == Other.getAsIntegral();
case Pack:
if (Args.NumArgs != Other.Args.NumArgs) return false;
for (unsigned I = 0, E = Args.NumArgs; I != E; ++I)
if (!Args.Args[I].structurallyEquals(Other.Args.Args[I]))
return false;
return true;
}
llvm_unreachable("Invalid TemplateArgument Kind!");
}
TemplateArgument TemplateArgument::getPackExpansionPattern() const {
assert(isPackExpansion());
switch (getKind()) {
case Type:
return getAsType()->getAs<PackExpansionType>()->getPattern();
case Expression:
return cast<PackExpansionExpr>(getAsExpr())->getPattern();
case TemplateExpansion:
return TemplateArgument(getAsTemplateOrTemplatePattern());
case Declaration:
case Integral:
case Pack:
case Null:
case Template:
case NullPtr:
return TemplateArgument();
}
llvm_unreachable("Invalid TemplateArgument Kind!");
}
void TemplateArgument::print(const PrintingPolicy &Policy,
raw_ostream &Out) const {
switch (getKind()) {
case Null:
Out << "(no value)";
break;
case Type: {
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressStrongLifetime = true;
getAsType().print(Out, SubPolicy);
break;
}
case Declaration: {
NamedDecl *ND = cast<NamedDecl>(getAsDecl());
Out << '&';
if (ND->getDeclName()) {
// FIXME: distinguish between pointer and reference args?
ND->printQualifiedName(Out);
} else {
Out << "(anonymous)";
}
break;
}
case NullPtr:
Out << "nullptr";
break;
case Template:
getAsTemplate().print(Out, Policy);
break;
case TemplateExpansion:
getAsTemplateOrTemplatePattern().print(Out, Policy);
Out << "...";
break;
case Integral: {
printIntegral(*this, Out, Policy);
break;
}
case Expression:
getAsExpr()->printPretty(Out, nullptr, Policy);
break;
case Pack:
Out << "<";
bool First = true;
for (const auto &P : pack_elements()) {
if (First)
First = false;
else
Out << ", ";
P.print(Policy, Out);
}
Out << ">";
break;
}
}
//===----------------------------------------------------------------------===//
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
TemplateArgumentLocInfo::TemplateArgumentLocInfo() {
memset((void*)this, 0, sizeof(TemplateArgumentLocInfo));
}
SourceRange TemplateArgumentLoc::getSourceRange() const {
switch (Argument.getKind()) {
case TemplateArgument::Expression:
return getSourceExpression()->getSourceRange();
case TemplateArgument::Declaration:
return getSourceDeclExpression()->getSourceRange();
case TemplateArgument::NullPtr:
return getSourceNullPtrExpression()->getSourceRange();
case TemplateArgument::Type:
if (TypeSourceInfo *TSI = getTypeSourceInfo())
return TSI->getTypeLoc().getSourceRange();
else
return SourceRange();
case TemplateArgument::Template:
if (getTemplateQualifierLoc())
return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
getTemplateNameLoc());
return SourceRange(getTemplateNameLoc());
case TemplateArgument::TemplateExpansion:
if (getTemplateQualifierLoc())
return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
getTemplateEllipsisLoc());
return SourceRange(getTemplateNameLoc(), getTemplateEllipsisLoc());
case TemplateArgument::Integral:
return getSourceIntegralExpression()->getSourceRange();
case TemplateArgument::Pack:
case TemplateArgument::Null:
return SourceRange();
}
llvm_unreachable("Invalid TemplateArgument Kind!");
}
const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
const TemplateArgument &Arg) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
// This is bad, but not as bad as crashing because of argument
// count mismatches.
return DB << "(null template argument)";
case TemplateArgument::Type:
return DB << Arg.getAsType();
case TemplateArgument::Declaration:
return DB << Arg.getAsDecl();
case TemplateArgument::NullPtr:
return DB << "nullptr";
case TemplateArgument::Integral:
return DB << Arg.getAsIntegral().toString(10);
case TemplateArgument::Template:
return DB << Arg.getAsTemplate();
case TemplateArgument::TemplateExpansion:
return DB << Arg.getAsTemplateOrTemplatePattern() << "...";
case TemplateArgument::Expression: {
// This shouldn't actually ever happen, so it's okay that we're
// regurgitating an expression here.
// FIXME: We're guessing at LangOptions!
SmallString<32> Str;
llvm::raw_svector_ostream OS(Str);
LangOptions LangOpts;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS
LangOpts.CPlusPlus = true;
#endif
PrintingPolicy Policy(LangOpts);
Arg.getAsExpr()->printPretty(OS, nullptr, Policy);
return DB << OS.str();
}
case TemplateArgument::Pack: {
// FIXME: We're guessing at LangOptions!
SmallString<32> Str;
llvm::raw_svector_ostream OS(Str);
LangOptions LangOpts;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS
LangOpts.CPlusPlus = true;
#endif
PrintingPolicy Policy(LangOpts);
Arg.print(Policy, OS);
return DB << OS.str();
}
}
llvm_unreachable("Invalid TemplateArgument Kind!");
}
const ASTTemplateArgumentListInfo *
ASTTemplateArgumentListInfo::Create(ASTContext &C,
const TemplateArgumentListInfo &List) {
assert(llvm::alignOf<ASTTemplateArgumentListInfo>() >=
llvm::alignOf<TemplateArgumentLoc>());
std::size_t size = ASTTemplateArgumentListInfo::sizeFor(List.size());
void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
TAI->initializeFrom(List);
return TAI;
}
void ASTTemplateArgumentListInfo::initializeFrom(
const TemplateArgumentListInfo &Info) {
LAngleLoc = Info.getLAngleLoc();
RAngleLoc = Info.getRAngleLoc();
NumTemplateArgs = Info.size();
TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
for (unsigned i = 0; i != NumTemplateArgs; ++i)
new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
}
void ASTTemplateArgumentListInfo::initializeFrom(
const TemplateArgumentListInfo &Info,
bool &Dependent,
bool &InstantiationDependent,
bool &ContainsUnexpandedParameterPack) {
LAngleLoc = Info.getLAngleLoc();
RAngleLoc = Info.getRAngleLoc();
NumTemplateArgs = Info.size();
TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
for (unsigned i = 0; i != NumTemplateArgs; ++i) {
Dependent = Dependent || Info[i].getArgument().isDependent();
InstantiationDependent = InstantiationDependent ||
Info[i].getArgument().isInstantiationDependent();
ContainsUnexpandedParameterPack
= ContainsUnexpandedParameterPack ||
Info[i].getArgument().containsUnexpandedParameterPack();
new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
}
}
void ASTTemplateArgumentListInfo::copyInto(
TemplateArgumentListInfo &Info) const {
Info.setLAngleLoc(LAngleLoc);
Info.setRAngleLoc(RAngleLoc);
for (unsigned I = 0; I != NumTemplateArgs; ++I)
Info.addArgument(getTemplateArgs()[I]);
}
std::size_t ASTTemplateArgumentListInfo::sizeFor(unsigned NumTemplateArgs) {
return sizeof(ASTTemplateArgumentListInfo) +
sizeof(TemplateArgumentLoc) * NumTemplateArgs;
}
void
ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc,
const TemplateArgumentListInfo &Info) {
Base::initializeFrom(Info);
setTemplateKeywordLoc(TemplateKWLoc);
}
void
ASTTemplateKWAndArgsInfo
::initializeFrom(SourceLocation TemplateKWLoc,
const TemplateArgumentListInfo &Info,
bool &Dependent,
bool &InstantiationDependent,
bool &ContainsUnexpandedParameterPack) {
Base::initializeFrom(Info, Dependent, InstantiationDependent,
ContainsUnexpandedParameterPack);
setTemplateKeywordLoc(TemplateKWLoc);
}
void
ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
// No explicit template arguments, but template keyword loc is valid.
assert(TemplateKWLoc.isValid());
LAngleLoc = SourceLocation();
RAngleLoc = SourceLocation();
NumTemplateArgs = 0;
setTemplateKeywordLoc(TemplateKWLoc);
}
std::size_t
ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) {
// Add space for the template keyword location.
// FIXME: There's room for this in the padding before the template args in
// 64-bit builds.
return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/AttrImpl.cpp | //===--- AttrImpl.cpp - Classes for representing attributes -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains out-of-line methods for Attr classes.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Attr.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
#include "clang/AST/AttrImpl.inc"
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclPrinter.cpp | //===--- DeclPrinter.cpp - Printing implementation for Decl ASTs ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Decl::print method, which pretty prints the
// AST back out to C/Objective-C/C++/Objective-C++ code.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/Basic/Module.h"
#include "llvm/Support/raw_ostream.h"
#include "clang/Sema/SemaHLSL.h" // HLSL Change
using namespace clang;
// //
///////////////////////////////////////////////////////////////////////////////
namespace {
class DeclPrinter : public DeclVisitor<DeclPrinter> {
raw_ostream &Out;
PrintingPolicy Policy;
unsigned Indentation;
bool PrintInstantiation;
raw_ostream& Indent() { return Indent(Indentation); }
raw_ostream& Indent(unsigned Indentation);
void ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls);
void Print(AccessSpecifier AS);
/// Print an Objective-C method type in parentheses.
///
/// \param Quals The Objective-C declaration qualifiers.
/// \param T The type to print.
void PrintObjCMethodType(ASTContext &Ctx, Decl::ObjCDeclQualifier Quals,
QualType T);
void PrintObjCTypeParams(ObjCTypeParamList *Params);
public:
DeclPrinter(raw_ostream &Out, const PrintingPolicy &Policy,
unsigned Indentation = 0, bool PrintInstantiation = false)
: Out(Out), Policy(Policy), Indentation(Indentation),
PrintInstantiation(PrintInstantiation) { }
void VisitDeclContext(DeclContext *DC, bool Indent = true);
void VisitTranslationUnitDecl(TranslationUnitDecl *D);
void VisitTypedefDecl(TypedefDecl *D);
void VisitTypeAliasDecl(TypeAliasDecl *D);
void VisitEnumDecl(EnumDecl *D);
void VisitRecordDecl(RecordDecl *D);
void VisitEnumConstantDecl(EnumConstantDecl *D);
void VisitEmptyDecl(EmptyDecl *D);
void VisitFunctionDecl(FunctionDecl *D);
void VisitFriendDecl(FriendDecl *D);
void VisitFieldDecl(FieldDecl *D);
void VisitVarDecl(VarDecl *D);
void VisitLabelDecl(LabelDecl *D);
void VisitParmVarDecl(ParmVarDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
void VisitImportDecl(ImportDecl *D);
void VisitStaticAssertDecl(StaticAssertDecl *D);
void VisitNamespaceDecl(NamespaceDecl *D);
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
void VisitCXXRecordDecl(CXXRecordDecl *D);
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
void VisitTemplateDecl(const TemplateDecl *D);
void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
void VisitObjCMethodDecl(ObjCMethodDecl *D);
void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
void VisitUsingDecl(UsingDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
void PrintTemplateParameters(const TemplateParameterList *Params,
const TemplateArgumentList *Args = nullptr);
void prettyPrintAttributes(Decl *D);
void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
// HLSL Change Begin
void VisitHLSLBufferDecl(HLSLBufferDecl *D);
void PrintUnusualAnnotations(NamedDecl *D);
void VisitHLSLUnusualAnnotation(const hlsl::UnusualAnnotation *UA);
void PrintHLSLPreAttr(NamedDecl *D);
// HLSL Change End
};
}
void Decl::print(raw_ostream &Out, unsigned Indentation,
bool PrintInstantiation) const {
print(Out, getASTContext().getPrintingPolicy(), Indentation, PrintInstantiation);
}
void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy,
unsigned Indentation, bool PrintInstantiation) const {
DeclPrinter Printer(Out, Policy, Indentation, PrintInstantiation);
Printer.Visit(const_cast<Decl*>(this));
}
static QualType GetBaseType(QualType T) {
// FIXME: This should be on the Type class!
QualType BaseType = T;
while (!BaseType->isSpecifierType()) {
if (isa<TypedefType>(BaseType))
break;
else if (const PointerType* PTy = BaseType->getAs<PointerType>())
BaseType = PTy->getPointeeType();
else if (const BlockPointerType *BPy = BaseType->getAs<BlockPointerType>())
BaseType = BPy->getPointeeType();
else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
BaseType = ATy->getElementType();
else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
BaseType = FTy->getReturnType();
else if (const VectorType *VTy = BaseType->getAs<VectorType>())
BaseType = VTy->getElementType();
else if (const ReferenceType *RTy = BaseType->getAs<ReferenceType>())
BaseType = RTy->getPointeeType();
else
llvm_unreachable("Unknown declarator!");
}
return BaseType;
}
static QualType getDeclType(Decl* D) {
if (TypedefNameDecl* TDD = dyn_cast<TypedefNameDecl>(D))
return TDD->getUnderlyingType();
if (ValueDecl* VD = dyn_cast<ValueDecl>(D))
return VD->getType();
return QualType();
}
void Decl::printGroup(Decl** Begin, unsigned NumDecls,
raw_ostream &Out, const PrintingPolicy &Policy,
unsigned Indentation) {
if (NumDecls == 1) {
(*Begin)->print(Out, Policy, Indentation);
return;
}
Decl** End = Begin + NumDecls;
TagDecl* TD = dyn_cast<TagDecl>(*Begin);
if (TD)
++Begin;
// HLSL Change Begin - anonymous struct need to have static.
//static const struct {
// float a;
// SamplerState s;
//} A = {1.2, ss};
// will be rewrite to
// struct {
// float a;
// SamplerState s;
//} static const A = {1.2, ss};
// without this change.
bool bAnonymous = false;
if (TD && TD->getName().empty()) {
bAnonymous = true;
}
if (bAnonymous && Begin) {
if (VarDecl *VD = dyn_cast<VarDecl>(*Begin)) {
if (!Policy.SuppressSpecifiers) {
StorageClass SC = VD->getStorageClass();
if (SC != SC_None)
Out << VarDecl::getStorageClassSpecifierString(SC) << " ";
if (VD->getType().hasQualifiers())
VD->getType().getQualifiers().print(Out, Policy,
/*appendSpaceIfNonEmpty*/ true);
}
}
}
// HLSL Change End
PrintingPolicy SubPolicy(Policy);
if (TD && TD->isCompleteDefinition()) {
TD->print(Out, Policy, Indentation);
Out << " ";
SubPolicy.SuppressTag = true;
}
bool isFirst = true;
for ( ; Begin != End; ++Begin) {
if (isFirst) {
SubPolicy.SuppressSpecifiers = bAnonymous; // HLSL Change.
isFirst = false;
} else {
if (!isFirst) Out << ", ";
SubPolicy.SuppressSpecifiers = true;
}
(*Begin)->print(Out, SubPolicy, Indentation);
}
}
LLVM_DUMP_METHOD void DeclContext::dumpDeclContext() const {
// Get the translation unit
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
DC = DC->getParent();
ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
DeclPrinter Printer(llvm::errs(), Ctx.getPrintingPolicy(), 0);
Printer.VisitDeclContext(const_cast<DeclContext *>(this), /*Indent=*/false);
}
raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
for (unsigned i = 0; i != Indentation; ++i)
Out << " ";
return Out;
}
void DeclPrinter::prettyPrintAttributes(Decl *D) {
if (Policy.PolishForDeclaration)
return;
if (D->hasAttrs()) {
AttrVec &Attrs = D->getAttrs();
for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) {
Attr *A = *i;
if (!hlsl::IsHLSLAttr(A->getKind())) // HLSL Change
A->printPretty(Out, Policy);
}
}
}
void DeclPrinter::printDeclType(QualType T, StringRef DeclName, bool Pack) {
// Normally, a PackExpansionType is written as T[3]... (for instance, as a
// template argument), but if it is the type of a declaration, the ellipsis
// is placed before the name being declared.
if (auto *PET = T->getAs<PackExpansionType>()) {
Pack = true;
T = PET->getPattern();
}
T.print(Out, Policy, (Pack ? "..." : "") + DeclName);
}
void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
this->Indent();
Decl::printGroup(Decls.data(), Decls.size(), Out, Policy, Indentation);
Out << ";\n";
Decls.clear();
}
void DeclPrinter::Print(AccessSpecifier AS) {
switch(AS) {
case AS_none: llvm_unreachable("No access specifier!");
case AS_public: Out << "public"; break;
case AS_protected: Out << "protected"; break;
case AS_private: Out << "private"; break;
}
}
//----------------------------------------------------------------------------
// Common C declarations
//----------------------------------------------------------------------------
void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
if (Policy.TerseOutput)
return;
if (Indent)
Indentation += Policy.Indentation;
SmallVector<Decl*, 2> Decls;
for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
D != DEnd; ++D) {
// Don't print ObjCIvarDecls, as they are printed when visiting the
// containing ObjCInterfaceDecl.
if (isa<ObjCIvarDecl>(*D))
continue;
// Skip over implicit declarations in pretty-printing mode.
if (D->isImplicit())
continue;
// The next bits of code handles stuff like "struct {int x;} a,b"; we're
// forced to merge the declarations because there's no other way to
// refer to the struct in question. This limited merging is safe without
// a bunch of other checks because it only merges declarations directly
// referring to the tag, not typedefs.
//
// Check whether the current declaration should be grouped with a previous
// unnamed struct.
QualType CurDeclType = getDeclType(*D);
if (!Decls.empty() && !CurDeclType.isNull()) {
QualType BaseType = GetBaseType(CurDeclType);
if (!BaseType.isNull() && isa<ElaboratedType>(BaseType))
BaseType = cast<ElaboratedType>(BaseType)->getNamedType();
if (!BaseType.isNull() && isa<TagType>(BaseType) &&
cast<TagType>(BaseType)->getDecl() == Decls[0]) {
Decls.push_back(*D);
continue;
}
}
// If we have a merged group waiting to be handled, handle it now.
if (!Decls.empty())
ProcessDeclGroup(Decls);
// If the current declaration is an unnamed tag type, save it
// so we can merge it with the subsequent declaration(s) using it.
if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) {
Decls.push_back(*D);
continue;
}
if (isa<AccessSpecDecl>(*D))
if (!Policy.LangOpts.HLSL) { // HLSL Change - no access specifier for hlsl.
Indentation -= Policy.Indentation;
this->Indent();
Print(D->getAccess());
Out << ":\n";
Indentation += Policy.Indentation;
continue;
}
this->Indent();
Visit(*D);
// FIXME: Need to be able to tell the DeclPrinter when
const char *Terminator = nullptr;
if (isa<OMPThreadPrivateDecl>(*D))
Terminator = nullptr;
else if (isa<FunctionDecl>(*D) &&
cast<FunctionDecl>(*D)->isThisDeclarationADefinition())
Terminator = nullptr;
else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody())
Terminator = nullptr;
else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
isa<ObjCImplementationDecl>(*D) ||
isa<ObjCInterfaceDecl>(*D) ||
isa<ObjCProtocolDecl>(*D) ||
isa<ObjCCategoryImplDecl>(*D) ||
isa<ObjCCategoryDecl>(*D))
Terminator = nullptr;
else if (isa<HLSLBufferDecl>(*D)) // HLSL Change
Terminator = nullptr;
else if (isa<EnumConstantDecl>(*D)) {
DeclContext::decl_iterator Next = D;
++Next;
if (Next != DEnd)
Terminator = ",";
} else
Terminator = ";";
if (Terminator)
Out << Terminator;
Out << "\n";
}
if (!Decls.empty())
ProcessDeclGroup(Decls);
if (Indent)
Indentation -= Policy.Indentation;
}
void DeclPrinter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
VisitDeclContext(D, false);
}
void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
if (!Policy.SuppressSpecifiers) {
Out << "typedef ";
if (D->isModulePrivate())
Out << "__module_private__ ";
}
D->getTypeSourceInfo()->getType().print(Out, Policy, D->getName());
PrintUnusualAnnotations(D); // HLSL Change
prettyPrintAttributes(D);
}
void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) {
Out << "using " << *D;
PrintUnusualAnnotations(D); // HLSL Change
prettyPrintAttributes(D);
Out << " = " << D->getTypeSourceInfo()->getType().getAsString(Policy);
}
void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
Out << "enum ";
if (D->isScoped()) {
if (D->isScopedUsingClassTag())
Out << "class ";
else
Out << "struct ";
}
Out << *D;
if (D->isFixed())
Out << " : " << D->getIntegerType().stream(Policy);
if (D->isCompleteDefinition()) {
Out << " {\n";
VisitDeclContext(D);
Indent() << "}";
}
PrintUnusualAnnotations(D); // HLSL Change
prettyPrintAttributes(D);
}
void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
Out << D->getKindName();
prettyPrintAttributes(D);
if (D->getIdentifier())
Out << ' ' << *D;
if (D->isCompleteDefinition()) {
Out << " {\n";
VisitDeclContext(D);
Indent() << "}";
}
}
void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
Out << *D;
if (Expr *Init = D->getInitExpr()) {
Out << " = ";
Init->printPretty(Out, nullptr, Policy, Indentation);
}
}
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
if (!Policy.SuppressSpecifiers) {
switch (D->getStorageClass()) {
case SC_None: break;
case SC_Extern: Out << "extern "; break;
case SC_Static: Out << "static "; break;
case SC_PrivateExtern: Out << "__private_extern__ "; break;
case SC_Auto: case SC_Register: case SC_OpenCLWorkGroupLocal:
llvm_unreachable("invalid for functions");
}
if (D->isInlineSpecified()) Out << "inline ";
if (D->isVirtualAsWritten()) Out << "virtual ";
if (D->isModulePrivate()) Out << "__module_private__ ";
if (D->isConstexpr() && !D->isExplicitlyDefaulted()) Out << "constexpr ";
if ((CDecl && CDecl->isExplicitSpecified()) ||
(ConversionDecl && ConversionDecl->isExplicit()))
Out << "explicit ";
}
// HLSL Change Begin
if (D->hasAttrs() && Policy.LangOpts.HLSL)
PrintHLSLPreAttr(D);
// HLSL Change End
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressSpecifiers = false;
std::string Proto = D->getNameInfo().getAsString();
// HLSL Change Begin
DeclContext *Namespace = D->getEnclosingNamespaceContext();
DeclContext *Enclosing = D->getLexicalParent();
if (!Enclosing->isNamespace() && Namespace->isNamespace() &&
!Policy.HLSLOnlyDecl) {
NamespaceDecl* ns = (NamespaceDecl*)Namespace;
Proto = ns->getName().str() + "::" + Proto;
}
if (Policy.HLSLNoinlineMethod) {
Proto = D->getQualifiedNameAsString();
}
// HLSL Change End
QualType Ty = D->getType();
while (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
Proto = '(' + Proto + ')';
Ty = PT->getInnerType();
}
if (const FunctionType *AFT = Ty->getAs<FunctionType>()) {
const FunctionProtoType *FT = nullptr;
if (D->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
Proto += "(";
if (FT) {
llvm::raw_string_ostream POut(Proto);
DeclPrinter ParamPrinter(POut, SubPolicy, Indentation);
for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
if (Policy.HLSLSuppressUniformParameters &&
Policy.LangOpts.HLSL &&
D->getParamDecl(i)->hasAttr<HLSLUniformAttr>()) // HLSL Change
continue;
if (i) POut << ", ";
ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
}
if (FT->isVariadic()) {
if (D->getNumParams()) POut << ", ";
POut << "...";
}
} else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) {
for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
if (i)
Proto += ", ";
Proto += D->getParamDecl(i)->getNameAsString();
}
}
Proto += ")";
if (FT) {
if (FT->isConst())
Proto += " const";
if (FT->isVolatile())
Proto += " volatile";
if (FT->isRestrict())
Proto += " restrict";
switch (FT->getRefQualifier()) {
case RQ_None:
break;
case RQ_LValue:
Proto += " &";
break;
case RQ_RValue:
Proto += " &&";
break;
}
}
if (FT && FT->hasDynamicExceptionSpec()) {
Proto += " throw(";
if (FT->getExceptionSpecType() == EST_MSAny)
Proto += "...";
else
for (unsigned I = 0, N = FT->getNumExceptions(); I != N; ++I) {
if (I)
Proto += ", ";
Proto += FT->getExceptionType(I).getAsString(SubPolicy);
}
Proto += ")";
} else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) {
Proto += " noexcept";
if (FT->getExceptionSpecType() == EST_ComputedNoexcept) {
Proto += "(";
llvm::raw_string_ostream EOut(Proto);
FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy,
Indentation);
EOut.flush();
Proto += EOut.str();
Proto += ")";
}
}
if (CDecl) {
bool HasInitializerList = false;
for (const auto *BMInitializer : CDecl->inits()) {
if (BMInitializer->isInClassMemberInitializer())
continue;
if (!HasInitializerList) {
Proto += " : ";
Out << Proto;
Proto.clear();
HasInitializerList = true;
} else
Out << ", ";
if (BMInitializer->isAnyMemberInitializer()) {
FieldDecl *FD = BMInitializer->getAnyMember();
Out << *FD;
} else {
Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(Policy);
}
Out << "(";
if (!BMInitializer->getInit()) {
// Nothing to print
} else {
Expr *Init = BMInitializer->getInit();
if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init))
Init = Tmp->getSubExpr();
Init = Init->IgnoreParens();
Expr *SimpleInit = nullptr;
Expr **Args = nullptr;
unsigned NumArgs = 0;
if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
Args = ParenList->getExprs();
NumArgs = ParenList->getNumExprs();
} else if (CXXConstructExpr *Construct
= dyn_cast<CXXConstructExpr>(Init)) {
Args = Construct->getArgs();
NumArgs = Construct->getNumArgs();
} else
SimpleInit = Init;
if (SimpleInit)
SimpleInit->printPretty(Out, nullptr, Policy, Indentation);
else {
for (unsigned I = 0; I != NumArgs; ++I) {
assert(Args[I] != nullptr && "Expected non-null Expr");
if (isa<CXXDefaultArgExpr>(Args[I]))
break;
if (I)
Out << ", ";
Args[I]->printPretty(Out, nullptr, Policy, Indentation);
}
}
}
Out << ")";
if (BMInitializer->isPackExpansion())
Out << "...";
}
} else if (!ConversionDecl && !isa<CXXDestructorDecl>(D)) {
if (FT && FT->hasTrailingReturn()) {
Out << "auto " << Proto << " -> ";
Proto.clear();
}
AFT->getReturnType().print(Out, Policy, Proto);
Proto.clear();
}
Out << Proto;
} else {
Ty.print(Out, Policy, Proto);
}
PrintUnusualAnnotations(D); // HLSL Change
prettyPrintAttributes(D);
if (D->isPure())
Out << " = 0";
else if (D->isDeletedAsWritten())
Out << " = delete";
else if (D->isExplicitlyDefaulted())
Out << " = default";
else if (D->doesThisDeclarationHaveABody() && !Policy.TerseOutput) {
if (!D->hasPrototype() && D->getNumParams()) {
// This is a K&R function definition, so we need to print the
// parameters.
Out << '\n';
DeclPrinter ParamPrinter(Out, SubPolicy, Indentation);
Indentation += Policy.Indentation;
for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
Indent();
ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
Out << ";\n";
}
Indentation -= Policy.Indentation;
} else
Out << ' ';
if (D->getBody()) {
// HLSL Change Begin - only print decl.
if (Policy.HLSLOnlyDecl) {
Out << ";";
} else {
// HLSL Change end.
D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation);
}
}
Out << '\n';
}
}
void DeclPrinter::VisitFriendDecl(FriendDecl *D) {
if (TypeSourceInfo *TSI = D->getFriendType()) {
unsigned NumTPLists = D->getFriendTypeNumTemplateParameterLists();
for (unsigned i = 0; i < NumTPLists; ++i)
PrintTemplateParameters(D->getFriendTypeTemplateParameterList(i));
Out << "friend ";
Out << " " << TSI->getType().getAsString(Policy);
}
else if (FunctionDecl *FD =
dyn_cast<FunctionDecl>(D->getFriendDecl())) {
Out << "friend ";
VisitFunctionDecl(FD);
}
else if (FunctionTemplateDecl *FTD =
dyn_cast<FunctionTemplateDecl>(D->getFriendDecl())) {
Out << "friend ";
VisitFunctionTemplateDecl(FTD);
}
else if (ClassTemplateDecl *CTD =
dyn_cast<ClassTemplateDecl>(D->getFriendDecl())) {
Out << "friend ";
VisitRedeclarableTemplateDecl(CTD);
}
}
void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
if (!Policy.SuppressSpecifiers && D->isMutable())
Out << "mutable ";
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
// HLSL Change Begin
if (D->hasAttrs())
PrintHLSLPreAttr(D);
// HLSL Change End
Out << D->getASTContext().getUnqualifiedObjCPointerType(D->getType()).
stream(Policy, D->getName());
if (D->isBitField()) {
Out << " : ";
D->getBitWidth()->printPretty(Out, nullptr, Policy, Indentation);
}
Expr *Init = D->getInClassInitializer();
if (!Policy.SuppressInitializers && Init) {
if (D->getInClassInitStyle() == ICIS_ListInit)
Out << " ";
else
Out << " = ";
Init->printPretty(Out, nullptr, Policy, Indentation);
}
PrintUnusualAnnotations(D); // HLSL Change
prettyPrintAttributes(D);
}
void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
Out << *D << ":";
}
void DeclPrinter::VisitVarDecl(VarDecl *D) {
if (!Policy.SuppressSpecifiers) {
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
Out << VarDecl::getStorageClassSpecifierString(SC) << " ";
switch (D->getTSCSpec()) {
case TSCS_unspecified:
break;
case TSCS___thread:
Out << "__thread ";
break;
case TSCS__Thread_local:
Out << "_Thread_local ";
break;
case TSCS_thread_local:
Out << "thread_local ";
break;
}
if (D->isModulePrivate())
Out << "__module_private__ ";
}
// HLSL Change Begin
if (D->hasAttrs() && Policy.LangOpts.HLSL)
PrintHLSLPreAttr(D);
// HLSL Change End
QualType T = D->getTypeSourceInfo()
? D->getTypeSourceInfo()->getType()
: D->getASTContext().getUnqualifiedObjCPointerType(D->getType());
// HLSL Change Begin
if (D->hasAttrs() && Policy.LangOpts.HLSL) {
printDeclType(T.getNonReferenceType(), D->getName());
}
else {
printDeclType(T, D->getName());
}
// HLSL Change end
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
bool ImplicitInit = false;
if (CXXConstructExpr *Construct =
dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) {
if (D->getInitStyle() == VarDecl::CallInit &&
!Construct->isListInitialization()) {
ImplicitInit = Construct->getNumArgs() == 0 ||
Construct->getArg(0)->isDefaultArgument();
}
}
if (!ImplicitInit) {
if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
Out << "(";
else if (D->getInitStyle() == VarDecl::CInit) {
Out << " = ";
}
Init->printPretty(Out, nullptr, Policy, Indentation);
if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
Out << ")";
}
}
PrintUnusualAnnotations(D); // HLSL Change
prettyPrintAttributes(D);
}
void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
VisitVarDecl(D);
}
void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
Out << "__asm (";
D->getAsmString()->printPretty(Out, nullptr, Policy, Indentation);
Out << ")";
}
void DeclPrinter::VisitImportDecl(ImportDecl *D) {
Out << "@import " << D->getImportedModule()->getFullModuleName()
<< ";\n";
}
void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
Out << "static_assert(";
D->getAssertExpr()->printPretty(Out, nullptr, Policy, Indentation);
if (StringLiteral *SL = D->getMessage()) {
Out << ", ";
SL->printPretty(Out, nullptr, Policy, Indentation);
}
Out << ")";
}
//----------------------------------------------------------------------------
// C++ declarations
//----------------------------------------------------------------------------
void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
// HLSL Change Begin - Don't emit built-in "vk" namespace, it's implicitly
// declared when compiling to SPIR-V and would otherwise cause parsing errors
// due to unsupported HLSL 2021 features.
if (D->getNameAsString() == "vk")
return;
// HLSL Change End
if (D->isInline())
Out << "inline ";
Out << "namespace " << *D << " {\n";
VisitDeclContext(D);
Indent() << "}";
}
void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
Out << "using namespace ";
if (D->getQualifier())
D->getQualifier()->print(Out, Policy);
Out << *D->getNominatedNamespaceAsWritten();
}
void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
Out << "namespace " << *D << " = ";
if (D->getQualifier())
D->getQualifier()->print(Out, Policy);
Out << *D->getAliasedNamespace();
}
void DeclPrinter::VisitEmptyDecl(EmptyDecl *D) {
prettyPrintAttributes(D);
}
void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
if (!Policy.SuppressSpecifiers && D->isModulePrivate())
Out << "__module_private__ ";
// HLSL Change Begin
if (!Policy.LangOpts.HLSL || !D->isInterface()) {
Out << D->getKindName();
}
else {
Out << "interface";
}
// HLSL Change End
PrintUnusualAnnotations(D); // HLSL Change
prettyPrintAttributes(D);
if (D->getIdentifier())
Out << ' ' << *D;
if (D->isCompleteDefinition()) {
// Print the base classes
if (D->getNumBases()) {
Out << " : ";
for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(),
BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) {
if (Base != D->bases_begin())
Out << ", ";
if (Base->isVirtual())
Out << "virtual ";
AccessSpecifier AS = Base->getAccessSpecifierAsWritten();
if (AS != AS_none
&& !Policy.LangOpts.HLSL // HLSL Change - no access specifier for hlsl.
) {
Print(AS);
Out << " ";
}
Out << Base->getType().getAsString(Policy);
if (Base->isPackExpansion())
Out << "...";
}
}
// Print the class definition
// FIXME: Doesn't print access specifiers, e.g., "public:"
Out << " {\n";
VisitDeclContext(D);
Indent() << "}";
}
}
void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
const char *l;
if (D->getLanguage() == LinkageSpecDecl::lang_c)
l = "C";
else {
assert(D->getLanguage() == LinkageSpecDecl::lang_cxx &&
"unknown language in linkage specification");
l = "C++";
}
Out << "extern \"" << l << "\" ";
if (D->hasBraces()) {
Out << "{\n";
VisitDeclContext(D);
Indent() << "}";
} else
Visit(*D->decls_begin());
}
void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
const TemplateArgumentList *Args) {
assert(Params);
assert(!Args || Params->size() == Args->size());
Out << "template <";
for (unsigned i = 0, e = Params->size(); i != e; ++i) {
if (i != 0)
Out << ", ";
const Decl *Param = Params->getParam(i);
if (const TemplateTypeParmDecl *TTP =
dyn_cast<TemplateTypeParmDecl>(Param)) {
if (TTP->wasDeclaredWithTypename())
Out << "typename ";
else
Out << "class ";
if (TTP->isParameterPack())
Out << "...";
Out << *TTP;
if (Args) {
Out << " = ";
Args->get(i).print(Policy, Out);
} else if (TTP->hasDefaultArgument()) {
Out << " = ";
Out << TTP->getDefaultArgument().getAsString(Policy);
};
} else if (const NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(Param)) {
StringRef Name;
if (IdentifierInfo *II = NTTP->getIdentifier())
Name = II->getName();
printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
if (Args) {
Out << " = ";
Args->get(i).print(Policy, Out);
} else if (NTTP->hasDefaultArgument()) {
Out << " = ";
NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy,
Indentation);
}
} else if (const TemplateTemplateParmDecl *TTPD =
dyn_cast<TemplateTemplateParmDecl>(Param)) {
VisitTemplateDecl(TTPD);
// FIXME: print the default argument, if present.
}
}
Out << "> ";
}
void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
PrintTemplateParameters(D->getTemplateParameters());
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(D)) {
Out << "class ";
if (TTP->isParameterPack())
Out << "...";
Out << D->getName();
} else {
Visit(D->getTemplatedDecl());
}
}
void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (PrintInstantiation) {
TemplateParameterList *Params = D->getTemplateParameters();
for (auto *I : D->specializations()) {
PrintTemplateParameters(Params, I->getTemplateSpecializationArgs());
Visit(I);
}
}
return VisitRedeclarableTemplateDecl(D);
}
void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (PrintInstantiation) {
TemplateParameterList *Params = D->getTemplateParameters();
for (auto *I : D->specializations()) {
PrintTemplateParameters(Params, &I->getTemplateArgs());
Visit(I);
Out << '\n';
}
}
return VisitRedeclarableTemplateDecl(D);
}
//----------------------------------------------------------------------------
// Objective-C declarations
//----------------------------------------------------------------------------
void DeclPrinter::PrintObjCMethodType(ASTContext &Ctx,
Decl::ObjCDeclQualifier Quals,
QualType T) {
Out << '(';
if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_In)
Out << "in ";
if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Inout)
Out << "inout ";
if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Out)
Out << "out ";
if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Bycopy)
Out << "bycopy ";
if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Byref)
Out << "byref ";
if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_Oneway)
Out << "oneway ";
if (Quals & Decl::ObjCDeclQualifier::OBJC_TQ_CSNullability) {
if (auto nullability = AttributedType::stripOuterNullability(T))
Out << getNullabilitySpelling(*nullability, true) << ' ';
}
Out << Ctx.getUnqualifiedObjCPointerType(T).getAsString(Policy);
Out << ')';
}
void DeclPrinter::PrintObjCTypeParams(ObjCTypeParamList *Params) {
Out << "<";
unsigned First = true;
for (auto *Param : *Params) {
if (First) {
First = false;
} else {
Out << ", ";
}
switch (Param->getVariance()) {
case ObjCTypeParamVariance::Invariant:
break;
case ObjCTypeParamVariance::Covariant:
Out << "__covariant ";
break;
case ObjCTypeParamVariance::Contravariant:
Out << "__contravariant ";
break;
}
Out << Param->getDeclName().getAsString();
if (Param->hasExplicitBound()) {
Out << " : " << Param->getUnderlyingType().getAsString(Policy);
}
}
Out << ">";
}
void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
if (OMD->isInstanceMethod())
Out << "- ";
else
Out << "+ ";
if (!OMD->getReturnType().isNull()) {
PrintObjCMethodType(OMD->getASTContext(), OMD->getObjCDeclQualifier(),
OMD->getReturnType());
}
std::string name = OMD->getSelector().getAsString();
std::string::size_type pos, lastPos = 0;
for (const auto *PI : OMD->params()) {
// FIXME: selector is missing here!
pos = name.find_first_of(':', lastPos);
Out << " " << name.substr(lastPos, pos - lastPos) << ':';
PrintObjCMethodType(OMD->getASTContext(),
PI->getObjCDeclQualifier(),
PI->getType());
Out << *PI;
lastPos = pos + 1;
}
if (OMD->param_begin() == OMD->param_end())
Out << " " << name;
if (OMD->isVariadic())
Out << ", ...";
prettyPrintAttributes(OMD);
if (OMD->getBody() && !Policy.TerseOutput) {
Out << ' ';
OMD->getBody()->printPretty(Out, nullptr, Policy);
}
else if (Policy.PolishForDeclaration)
Out << ';';
}
void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
std::string I = OID->getNameAsString();
ObjCInterfaceDecl *SID = OID->getSuperClass();
bool eolnOut = false;
if (SID)
Out << "@implementation " << I << " : " << *SID;
else
Out << "@implementation " << I;
if (OID->ivar_size() > 0) {
Out << "{\n";
eolnOut = true;
Indentation += Policy.Indentation;
for (const auto *I : OID->ivars()) {
Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
getAsString(Policy) << ' ' << *I << ";\n";
}
Indentation -= Policy.Indentation;
Out << "}\n";
}
else if (SID || (OID->decls_begin() != OID->decls_end())) {
Out << "\n";
eolnOut = true;
}
VisitDeclContext(OID, false);
if (!eolnOut)
Out << "\n";
Out << "@end";
}
void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
std::string I = OID->getNameAsString();
ObjCInterfaceDecl *SID = OID->getSuperClass();
if (!OID->isThisDeclarationADefinition()) {
Out << "@class " << I;
if (auto TypeParams = OID->getTypeParamListAsWritten()) {
PrintObjCTypeParams(TypeParams);
}
Out << ";";
return;
}
bool eolnOut = false;
Out << "@interface " << I;
if (auto TypeParams = OID->getTypeParamListAsWritten()) {
PrintObjCTypeParams(TypeParams);
}
if (SID)
Out << " : " << OID->getSuperClass()->getName();
// Protocols?
const ObjCList<ObjCProtocolDecl> &Protocols = OID->getReferencedProtocols();
if (!Protocols.empty()) {
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end(); I != E; ++I)
Out << (I == Protocols.begin() ? '<' : ',') << **I;
Out << "> ";
}
if (OID->ivar_size() > 0) {
Out << "{\n";
eolnOut = true;
Indentation += Policy.Indentation;
for (const auto *I : OID->ivars()) {
Indent() << I->getASTContext()
.getUnqualifiedObjCPointerType(I->getType())
.getAsString(Policy) << ' ' << *I << ";\n";
}
Indentation -= Policy.Indentation;
Out << "}\n";
}
else if (SID || (OID->decls_begin() != OID->decls_end())) {
Out << "\n";
eolnOut = true;
}
VisitDeclContext(OID, false);
if (!eolnOut)
Out << "\n";
Out << "@end";
// FIXME: implement the rest...
}
void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
if (!PID->isThisDeclarationADefinition()) {
Out << "@protocol " << *PID << ";\n";
return;
}
// Protocols?
const ObjCList<ObjCProtocolDecl> &Protocols = PID->getReferencedProtocols();
if (!Protocols.empty()) {
Out << "@protocol " << *PID;
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end(); I != E; ++I)
Out << (I == Protocols.begin() ? '<' : ',') << **I;
Out << ">\n";
} else
Out << "@protocol " << *PID << '\n';
VisitDeclContext(PID, false);
Out << "@end";
}
void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
Out << "@implementation " << *PID->getClassInterface() << '(' << *PID <<")\n";
VisitDeclContext(PID, false);
Out << "@end";
// FIXME: implement the rest...
}
void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) {
Out << "@interface " << *PID->getClassInterface();
if (auto TypeParams = PID->getTypeParamList()) {
PrintObjCTypeParams(TypeParams);
}
Out << "(" << *PID << ")\n";
if (PID->ivar_size() > 0) {
Out << "{\n";
Indentation += Policy.Indentation;
for (const auto *I : PID->ivars())
Indent() << I->getASTContext().getUnqualifiedObjCPointerType(I->getType()).
getAsString(Policy) << ' ' << *I << ";\n";
Indentation -= Policy.Indentation;
Out << "}\n";
}
VisitDeclContext(PID, false);
Out << "@end";
// FIXME: implement the rest...
}
void DeclPrinter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *AID) {
Out << "@compatibility_alias " << *AID
<< ' ' << *AID->getClassInterface() << ";\n";
}
/// PrintObjCPropertyDecl - print a property declaration.
///
void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Required)
Out << "@required\n";
else if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Optional)
Out << "@optional\n";
QualType T = PDecl->getType();
Out << "@property";
if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
bool first = true;
Out << " (";
if (PDecl->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_readonly) {
Out << (first ? ' ' : ',') << "readonly";
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
Out << (first ? ' ' : ',') << "getter = ";
PDecl->getGetterName().print(Out);
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
Out << (first ? ' ' : ',') << "setter = ";
PDecl->getSetterName().print(Out);
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
Out << (first ? ' ' : ',') << "assign";
first = false;
}
if (PDecl->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_readwrite) {
Out << (first ? ' ' : ',') << "readwrite";
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
Out << (first ? ' ' : ',') << "retain";
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
Out << (first ? ' ' : ',') << "strong";
first = false;
}
if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
Out << (first ? ' ' : ',') << "copy";
first = false;
}
if (PDecl->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_nonatomic) {
Out << (first ? ' ' : ',') << "nonatomic";
first = false;
}
if (PDecl->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_atomic) {
Out << (first ? ' ' : ',') << "atomic";
first = false;
}
if (PDecl->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_nullability) {
if (auto nullability = AttributedType::stripOuterNullability(T)) {
if (*nullability == NullabilityKind::Unspecified &&
(PDecl->getPropertyAttributes() &
ObjCPropertyDecl::OBJC_PR_null_resettable)) {
Out << (first ? ' ' : ',') << "null_resettable";
} else {
Out << (first ? ' ' : ',')
<< getNullabilitySpelling(*nullability, true);
}
first = false;
}
}
(void) first; // Silence dead store warning due to idiomatic code.
Out << " )";
}
Out << ' ' << PDecl->getASTContext().getUnqualifiedObjCPointerType(T).
getAsString(Policy) << ' ' << *PDecl;
if (Policy.PolishForDeclaration)
Out << ';';
}
void DeclPrinter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *PID) {
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
Out << "@synthesize ";
else
Out << "@dynamic ";
Out << *PID->getPropertyDecl();
if (PID->getPropertyIvarDecl())
Out << '=' << *PID->getPropertyIvarDecl();
}
void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
if (!D->isAccessDeclaration())
Out << "using ";
if (D->hasTypename())
Out << "typename ";
D->getQualifier()->print(Out, Policy);
Out << *D;
}
void
DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
Out << "using typename ";
D->getQualifier()->print(Out, Policy);
Out << D->getDeclName();
}
void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
if (!D->isAccessDeclaration())
Out << "using ";
D->getQualifier()->print(Out, Policy);
Out << D->getName();
}
void DeclPrinter::VisitUsingShadowDecl(UsingShadowDecl *D) {
// ignore
}
void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
Out << "#pragma omp threadprivate";
if (!D->varlist_empty()) {
for (OMPThreadPrivateDecl::varlist_iterator I = D->varlist_begin(),
E = D->varlist_end();
I != E; ++I) {
Out << (I == D->varlist_begin() ? '(' : ',');
NamedDecl *ND = cast<NamedDecl>(cast<DeclRefExpr>(*I)->getDecl());
ND->printQualifiedName(Out);
}
Out << ")";
}
}
// HLSL Change Begin
void DeclPrinter::VisitHLSLBufferDecl(HLSLBufferDecl *D) {
if (D->isCBuffer()) {
Out << "cbuffer ";
}
else {
Out << "tbuffer ";
}
Out << *D;
PrintUnusualAnnotations(D);
prettyPrintAttributes(D);
Out << " {\n";
VisitDeclContext(D);
Indent() << "}";
}
void DeclPrinter::PrintUnusualAnnotations(NamedDecl* D) {
if (D->isInvalidDecl())
return;
ArrayRef<hlsl::UnusualAnnotation *> Annotations = D->getUnusualAnnotations();
if (!Annotations.empty()) {
for (auto i = Annotations.begin(), e = Annotations.end(); i != e; ++i) {
VisitHLSLUnusualAnnotation(*i);
}
}
}
void DeclPrinter::VisitHLSLUnusualAnnotation(const hlsl::UnusualAnnotation *UA) {
switch (UA->getKind()) {
case hlsl::UnusualAnnotation::UA_SemanticDecl: {
const hlsl::SemanticDecl * semdecl = dyn_cast<hlsl::SemanticDecl>(UA);
Out << " : " << semdecl->SemanticName.str();
break;
}
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
const hlsl::RegisterAssignment * ra = dyn_cast<hlsl::RegisterAssignment>(UA);
if (ra->RegisterType) {
Out << " : register(";
if (!ra->ShaderProfile.empty()) {
Out << ra->ShaderProfile.str() << ", ";
}
Out << ra->RegisterType << ra->RegisterNumber;
if (ra->RegisterOffset) {
Out << "[" << ra->RegisterOffset << "]";
}
if (ra->RegisterSpace.hasValue() != 0) {
Out << ", space" << ra->RegisterSpace.getValue();
}
Out << ")";
}
break;
}
case hlsl::UnusualAnnotation::UA_ConstantPacking: {
const hlsl::ConstantPacking * cp = dyn_cast<hlsl::ConstantPacking>(UA);
Out << " : packoffset(c" << cp->Subcomponent; //packing applies to constant registers (c) only
if (cp->ComponentOffset) {
switch (cp->ComponentOffset) {
case 1:
Out << ".y";
break;
case 2:
Out << ".z";
break;
case 3:
Out << ".w";
break;
}
}
Out << ")";
break;
}
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier: {
const hlsl::PayloadAccessAnnotation *annotation =
cast<hlsl::PayloadAccessAnnotation>(UA);
Out << " : "
<< (annotation->qualifier == hlsl::DXIL::PayloadAccessQualifier::Read
? "read"
: "write")
<< "(";
StringRef shaderStageNames[] = { "caller", "closesthit", "miss", "anyhit"};
for (unsigned i = 0; i < annotation->ShaderStages.size(); ++i) {
Out << shaderStageNames[static_cast<unsigned>(annotation->ShaderStages[i])];
if (i < annotation->ShaderStages.size() - 1)
Out << ", ";
}
Out << ")";
break;
}
}
}
void DeclPrinter::PrintHLSLPreAttr(NamedDecl* D) {
AttrVec &Attrs = D->getAttrs();
std::vector<Attr*> tempVec;
for (AttrVec::const_reverse_iterator i = Attrs.rbegin(), e = Attrs.rend(); i != e; ++i) {
Attr *A = *i;
hlsl::CustomPrintHLSLAttr(A, Out, Policy, Indentation);
}
}
// HLSL Change End
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/StmtViz.cpp | //===--- StmtViz.cpp - Graphviz visualization for Stmt ASTs -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements Stmt::viewAST, which generates a Graphviz DOT file
// that depicts the AST and then calls Graphviz/dot+gv on it.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/StmtGraphTraits.h"
#include "clang/AST/Decl.h"
#include "llvm/Support/GraphWriter.h"
using namespace clang;
void Stmt::viewAST() const {
#ifndef NDEBUG
llvm::ViewGraph(this,"AST");
#else
llvm::errs() << "Stmt::viewAST is only available in debug builds on "
<< "systems with Graphviz or gv!\n";
#endif
}
namespace llvm {
template<>
struct DOTGraphTraits<const Stmt*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getNodeLabel(const Stmt* Node, const Stmt* Graph) {
#ifndef NDEBUG
std::string OutSStr;
llvm::raw_string_ostream Out(OutSStr);
if (Node)
Out << Node->getStmtClassName();
else
Out << "<NULL>";
std::string OutStr = Out.str();
if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
// Process string output to make it nicer...
for (unsigned i = 0; i != OutStr.length(); ++i)
if (OutStr[i] == '\n') { // Left justify
OutStr[i] = '\\';
OutStr.insert(OutStr.begin()+i+1, 'l');
}
return OutStr;
#else
return "";
#endif
}
};
} // end namespace llvm
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ExternalASTSource.cpp | //===- ExternalASTSource.cpp - Abstract External AST Interface --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides the default implementation of the ExternalASTSource
// interface, which enables construction of AST nodes from some external
// source.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclarationName.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
ExternalASTSource::~ExternalASTSource() { }
llvm::Optional<ExternalASTSource::ASTSourceDescriptor>
ExternalASTSource::getSourceDescriptor(unsigned ID) {
return None;
}
ExternalASTSource::ASTSourceDescriptor
ExternalASTSource::getSourceDescriptor(const Module &M) {
return ASTSourceDescriptor();
}
void ExternalASTSource::FindFileRegionDecls(FileID File, unsigned Offset,
unsigned Length,
SmallVectorImpl<Decl *> &Decls) {}
void ExternalASTSource::CompleteRedeclChain(const Decl *D) {}
void ExternalASTSource::CompleteType(TagDecl *Tag) {}
void ExternalASTSource::CompleteType(ObjCInterfaceDecl *Class) {}
void ExternalASTSource::ReadComments() {}
void ExternalASTSource::StartedDeserializing() {}
void ExternalASTSource::FinishedDeserializing() {}
void ExternalASTSource::StartTranslationUnit(ASTConsumer *Consumer) {}
void ExternalASTSource::PrintStats() { }
bool ExternalASTSource::layoutRecordType(
const RecordDecl *Record, uint64_t &Size, uint64_t &Alignment,
llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets) {
return false;
}
Decl *ExternalASTSource::GetExternalDecl(uint32_t ID) {
return nullptr;
}
Selector ExternalASTSource::GetExternalSelector(uint32_t ID) {
return Selector();
}
uint32_t ExternalASTSource::GetNumExternalSelectors() {
return 0;
}
Stmt *ExternalASTSource::GetExternalDeclStmt(uint64_t Offset) {
return nullptr;
}
CXXCtorInitializer **
ExternalASTSource::GetExternalCXXCtorInitializers(uint64_t Offset) {
return nullptr;
}
CXXBaseSpecifier *
ExternalASTSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
return nullptr;
}
bool
ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
DeclarationName Name) {
return false;
}
void ExternalASTSource::completeVisibleDeclsMap(const DeclContext *DC) {
}
ExternalLoadResult
ExternalASTSource::FindExternalLexicalDecls(const DeclContext *DC,
bool (*isKindWeWant)(Decl::Kind),
SmallVectorImpl<Decl*> &Result) {
return ELR_AlreadyLoaded;
}
void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const { }
uint32_t ExternalASTSource::incrementGeneration(ASTContext &C) {
uint32_t OldGeneration = CurrentGeneration;
// Make sure the generation of the topmost external source for the context is
// incremented. That might not be us.
auto *P = C.getExternalSource();
if (P && P != this)
CurrentGeneration = P->incrementGeneration(C);
else {
// FIXME: Only bump the generation counter if the current generation number
// has been observed?
if (!++CurrentGeneration)
llvm::report_fatal_error("generation counter overflowed", false);
}
return OldGeneration;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/RecordLayout.cpp | //===-- RecordLayout.cpp - Layout information for a struct/union -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RecordLayout interface.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
using namespace clang;
void ASTRecordLayout::Destroy(ASTContext &Ctx) {
if (FieldOffsets)
Ctx.Deallocate(FieldOffsets);
if (CXXInfo) {
CXXInfo->~CXXRecordLayoutInfo();
Ctx.Deallocate(CXXInfo);
}
this->~ASTRecordLayout();
Ctx.Deallocate(this);
}
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
CharUnits alignment,
CharUnits requiredAlignment,
CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount)
: Size(size), DataSize(datasize), Alignment(alignment),
RequiredAlignment(requiredAlignment), FieldOffsets(nullptr),
FieldCount(fieldcount), CXXInfo(nullptr) {
if (FieldCount > 0) {
FieldOffsets = new (Ctx) uint64_t[FieldCount];
memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
}
}
// Constructor for C++ records.
ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
CharUnits size, CharUnits alignment,
CharUnits requiredAlignment,
bool hasOwnVFPtr, bool hasExtendableVFPtr,
CharUnits vbptroffset,
CharUnits datasize,
const uint64_t *fieldoffsets,
unsigned fieldcount,
CharUnits nonvirtualsize,
CharUnits nonvirtualalignment,
CharUnits SizeOfLargestEmptySubobject,
const CXXRecordDecl *PrimaryBase,
bool IsPrimaryBaseVirtual,
const CXXRecordDecl *BaseSharingVBPtr,
bool HasZeroSizedSubObject,
bool LeadsWithZeroSizedBase,
const BaseOffsetsMapTy& BaseOffsets,
const VBaseOffsetsMapTy& VBaseOffsets)
: Size(size), DataSize(datasize), Alignment(alignment),
RequiredAlignment(requiredAlignment), FieldOffsets(nullptr),
FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
{
if (FieldCount > 0) {
FieldOffsets = new (Ctx) uint64_t[FieldCount];
memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
}
CXXInfo->PrimaryBase.setPointer(PrimaryBase);
CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
CXXInfo->NonVirtualSize = nonvirtualsize;
CXXInfo->NonVirtualAlignment = nonvirtualalignment;
CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
CXXInfo->BaseOffsets = BaseOffsets;
CXXInfo->VBaseOffsets = VBaseOffsets;
CXXInfo->HasOwnVFPtr = hasOwnVFPtr;
CXXInfo->VBPtrOffset = vbptroffset;
CXXInfo->HasExtendableVFPtr = hasExtendableVFPtr;
CXXInfo->BaseSharingVBPtr = BaseSharingVBPtr;
CXXInfo->HasZeroSizedSubObject = HasZeroSizedSubObject;
CXXInfo->LeadsWithZeroSizedBase = LeadsWithZeroSizedBase;
#ifndef NDEBUG
if (const CXXRecordDecl *PrimaryBase = getPrimaryBase()) {
if (isPrimaryBaseVirtual()) {
if (Ctx.getTargetInfo().getCXXABI().hasPrimaryVBases()) {
assert(getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary virtual base must be at offset 0!");
}
} else {
assert(getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base must be at offset 0!");
}
}
#endif
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclObjC.cpp | //===--- DeclObjC.cpp - ObjC Declaration AST Node Implementation ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Objective-C related Decl classes.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Stmt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// ObjCListBase
//===----------------------------------------------------------------------===//
void ObjCListBase::set(void *const* InList, unsigned Elts, ASTContext &Ctx) {
List = nullptr;
if (Elts == 0) return; // Setting to an empty list is a noop.
List = new (Ctx) void*[Elts];
NumElts = Elts;
memcpy(List, InList, sizeof(void*)*Elts);
}
void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts,
const SourceLocation *Locs, ASTContext &Ctx) {
if (Elts == 0)
return;
Locations = new (Ctx) SourceLocation[Elts];
memcpy(Locations, Locs, sizeof(SourceLocation) * Elts);
set(InList, Elts, Ctx);
}
//===----------------------------------------------------------------------===//
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
void ObjCContainerDecl::anchor() { }
/// getIvarDecl - This method looks up an ivar in this ContextDecl.
///
ObjCIvarDecl *
ObjCContainerDecl::getIvarDecl(IdentifierInfo *Id) const {
lookup_result R = lookup(Id);
for (lookup_iterator Ivar = R.begin(), IvarEnd = R.end();
Ivar != IvarEnd; ++Ivar) {
if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar))
return ivar;
}
return nullptr;
}
// Get the local instance/class method declared in this interface.
ObjCMethodDecl *
ObjCContainerDecl::getMethod(Selector Sel, bool isInstance,
bool AllowHidden) const {
// If this context is a hidden protocol definition, don't find any
// methods there.
if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
if (Def->isHidden() && !AllowHidden)
return nullptr;
}
// Since instance & class methods can have the same name, the loop below
// ensures we get the correct method.
//
// @interface Whatever
// - (int) class_method;
// + (float) class_method;
// @end
//
lookup_result R = lookup(Sel);
for (lookup_iterator Meth = R.begin(), MethEnd = R.end();
Meth != MethEnd; ++Meth) {
ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
if (MD && MD->isInstanceMethod() == isInstance)
return MD;
}
return nullptr;
}
/// \brief This routine returns 'true' if a user declared setter method was
/// found in the class, its protocols, its super classes or categories.
/// It also returns 'true' if one of its categories has declared a 'readwrite'
/// property. This is because, user must provide a setter method for the
/// category's 'readwrite' property.
bool ObjCContainerDecl::HasUserDeclaredSetterMethod(
const ObjCPropertyDecl *Property) const {
Selector Sel = Property->getSetterName();
lookup_result R = lookup(Sel);
for (lookup_iterator Meth = R.begin(), MethEnd = R.end();
Meth != MethEnd; ++Meth) {
ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
if (MD && MD->isInstanceMethod() && !MD->isImplicit())
return true;
}
if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(this)) {
// Also look into categories, including class extensions, looking
// for a user declared instance method.
for (const auto *Cat : ID->visible_categories()) {
if (ObjCMethodDecl *MD = Cat->getInstanceMethod(Sel))
if (!MD->isImplicit())
return true;
if (Cat->IsClassExtension())
continue;
// Also search through the categories looking for a 'readwrite'
// declaration of this property. If one found, presumably a setter will
// be provided (properties declared in categories will not get
// auto-synthesized).
for (const auto *P : Cat->properties())
if (P->getIdentifier() == Property->getIdentifier()) {
if (P->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite)
return true;
break;
}
}
// Also look into protocols, for a user declared instance method.
for (const auto *Proto : ID->all_referenced_protocols())
if (Proto->HasUserDeclaredSetterMethod(Property))
return true;
// And in its super class.
ObjCInterfaceDecl *OSC = ID->getSuperClass();
while (OSC) {
if (OSC->HasUserDeclaredSetterMethod(Property))
return true;
OSC = OSC->getSuperClass();
}
}
if (const ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(this))
for (const auto *PI : PD->protocols())
if (PI->HasUserDeclaredSetterMethod(Property))
return true;
return false;
}
ObjCPropertyDecl *
ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
const IdentifierInfo *propertyID) {
// If this context is a hidden protocol definition, don't find any
// property.
if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(DC)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
if (Def->isHidden())
return nullptr;
}
DeclContext::lookup_result R = DC->lookup(propertyID);
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E;
++I)
if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(*I))
return PD;
return nullptr;
}
IdentifierInfo *
ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const {
SmallString<128> ivarName;
{
llvm::raw_svector_ostream os(ivarName);
os << '_' << getIdentifier()->getName();
}
return &Ctx.Idents.get(ivarName.str());
}
/// FindPropertyDeclaration - Finds declaration of the property given its name
/// in 'PropertyId' and returns it. It returns 0, if not found.
ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
const IdentifierInfo *PropertyId) const {
// Don't find properties within hidden protocol definitions.
if (const ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(this)) {
if (const ObjCProtocolDecl *Def = Proto->getDefinition())
if (Def->isHidden())
return nullptr;
}
if (ObjCPropertyDecl *PD =
ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
return PD;
switch (getKind()) {
default:
break;
case Decl::ObjCProtocol: {
const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this);
for (const auto *I : PID->protocols())
if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
return P;
break;
}
case Decl::ObjCInterface: {
const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
// Look through categories (but not extensions).
for (const auto *Cat : OID->visible_categories()) {
if (!Cat->IsClassExtension())
if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId))
return P;
}
// Look through protocols.
for (const auto *I : OID->all_referenced_protocols())
if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
return P;
// Finally, check the super class.
if (const ObjCInterfaceDecl *superClass = OID->getSuperClass())
return superClass->FindPropertyDeclaration(PropertyId);
break;
}
case Decl::ObjCCategory: {
const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this);
// Look through protocols.
if (!OCD->IsClassExtension())
for (const auto *I : OCD->protocols())
if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
return P;
break;
}
}
return nullptr;
}
void ObjCInterfaceDecl::anchor() { }
ObjCTypeParamList *ObjCInterfaceDecl::getTypeParamList() const {
// If this particular declaration has a type parameter list, return it.
if (ObjCTypeParamList *written = getTypeParamListAsWritten())
return written;
// If there is a definition, return its type parameter list.
if (const ObjCInterfaceDecl *def = getDefinition())
return def->getTypeParamListAsWritten();
// Otherwise, look at previous declarations to determine whether any
// of them has a type parameter list, skipping over those
// declarations that do not.
for (auto decl = getMostRecentDecl(); decl; decl = decl->getPreviousDecl()) {
if (ObjCTypeParamList *written = decl->getTypeParamListAsWritten())
return written;
}
return nullptr;
}
void ObjCInterfaceDecl::setTypeParamList(ObjCTypeParamList *TPL) {
TypeParamList = TPL;
if (!TPL)
return;
// Set the declaration context of each of the type parameters.
for (auto typeParam : *TypeParamList)
typeParam->setDeclContext(this);
}
ObjCInterfaceDecl *ObjCInterfaceDecl::getSuperClass() const {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
if (data().ExternallyCompleted)
LoadExternalDefinition();
if (const ObjCObjectType *superType = getSuperClassType()) {
if (ObjCInterfaceDecl *superDecl = superType->getInterface()) {
if (ObjCInterfaceDecl *superDef = superDecl->getDefinition())
return superDef;
return superDecl;
}
}
return nullptr;
}
SourceLocation ObjCInterfaceDecl::getSuperClassLoc() const {
if (TypeSourceInfo *superTInfo = getSuperClassTInfo())
return superTInfo->getTypeLoc().getLocStart();
return SourceLocation();
}
/// FindPropertyVisibleInPrimaryClass - Finds declaration of the property
/// with name 'PropertyId' in the primary class; including those in protocols
/// (direct or indirect) used by the primary class.
///
ObjCPropertyDecl *
ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
IdentifierInfo *PropertyId) const {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
if (data().ExternallyCompleted)
LoadExternalDefinition();
if (ObjCPropertyDecl *PD =
ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
return PD;
// Look through protocols.
for (const auto *I : all_referenced_protocols())
if (ObjCPropertyDecl *P = I->FindPropertyDeclaration(PropertyId))
return P;
return nullptr;
}
void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM,
PropertyDeclOrder &PO) const {
for (auto *Prop : properties()) {
PM[Prop->getIdentifier()] = Prop;
PO.push_back(Prop);
}
for (const auto *PI : all_referenced_protocols())
PI->collectPropertiesToImplement(PM, PO);
// Note, the properties declared only in class extensions are still copied
// into the main @interface's property list, and therefore we don't
// explicitly, have to search class extension properties.
}
bool ObjCInterfaceDecl::isArcWeakrefUnavailable() const {
const ObjCInterfaceDecl *Class = this;
while (Class) {
if (Class->hasAttr<ArcWeakrefUnavailableAttr>())
return true;
Class = Class->getSuperClass();
}
return false;
}
const ObjCInterfaceDecl *ObjCInterfaceDecl::isObjCRequiresPropertyDefs() const {
const ObjCInterfaceDecl *Class = this;
while (Class) {
if (Class->hasAttr<ObjCRequiresPropertyDefsAttr>())
return Class;
Class = Class->getSuperClass();
}
return nullptr;
}
void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
ObjCProtocolDecl *const* ExtList, unsigned ExtNum,
ASTContext &C)
{
if (data().ExternallyCompleted)
LoadExternalDefinition();
if (data().AllReferencedProtocols.empty() &&
data().ReferencedProtocols.empty()) {
data().AllReferencedProtocols.set(ExtList, ExtNum, C);
return;
}
// Check for duplicate protocol in class's protocol list.
// This is O(n*m). But it is extremely rare and number of protocols in
// class or its extension are very few.
SmallVector<ObjCProtocolDecl*, 8> ProtocolRefs;
for (unsigned i = 0; i < ExtNum; i++) {
bool protocolExists = false;
ObjCProtocolDecl *ProtoInExtension = ExtList[i];
for (auto *Proto : all_referenced_protocols()) {
if (C.ProtocolCompatibleWithProtocol(ProtoInExtension, Proto)) {
protocolExists = true;
break;
}
}
// Do we want to warn on a protocol in extension class which
// already exist in the class? Probably not.
if (!protocolExists)
ProtocolRefs.push_back(ProtoInExtension);
}
if (ProtocolRefs.empty())
return;
// Merge ProtocolRefs into class's protocol list;
ProtocolRefs.append(all_referenced_protocol_begin(),
all_referenced_protocol_end());
data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C);
}
const ObjCInterfaceDecl *
ObjCInterfaceDecl::findInterfaceWithDesignatedInitializers() const {
const ObjCInterfaceDecl *IFace = this;
while (IFace) {
if (IFace->hasDesignatedInitializers())
return IFace;
if (!IFace->inheritsDesignatedInitializers())
break;
IFace = IFace->getSuperClass();
}
return nullptr;
}
static bool isIntroducingInitializers(const ObjCInterfaceDecl *D) {
for (const auto *MD : D->instance_methods()) {
if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
return true;
}
for (const auto *Ext : D->visible_extensions()) {
for (const auto *MD : Ext->instance_methods()) {
if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
return true;
}
}
if (const auto *ImplD = D->getImplementation()) {
for (const auto *MD : ImplD->instance_methods()) {
if (MD->getMethodFamily() == OMF_init && !MD->isOverriding())
return true;
}
}
return false;
}
bool ObjCInterfaceDecl::inheritsDesignatedInitializers() const {
switch (data().InheritedDesignatedInitializers) {
case DefinitionData::IDI_Inherited:
return true;
case DefinitionData::IDI_NotInherited:
return false;
case DefinitionData::IDI_Unknown: {
// If the class introduced initializers we conservatively assume that we
// don't know if any of them is a designated initializer to avoid possible
// misleading warnings.
if (isIntroducingInitializers(this)) {
data().InheritedDesignatedInitializers = DefinitionData::IDI_NotInherited;
} else {
if (auto SuperD = getSuperClass()) {
data().InheritedDesignatedInitializers =
SuperD->declaresOrInheritsDesignatedInitializers() ?
DefinitionData::IDI_Inherited :
DefinitionData::IDI_NotInherited;
} else {
data().InheritedDesignatedInitializers =
DefinitionData::IDI_NotInherited;
}
}
assert(data().InheritedDesignatedInitializers
!= DefinitionData::IDI_Unknown);
return data().InheritedDesignatedInitializers ==
DefinitionData::IDI_Inherited;
}
}
llvm_unreachable("unexpected InheritedDesignatedInitializers value");
}
void ObjCInterfaceDecl::getDesignatedInitializers(
llvm::SmallVectorImpl<const ObjCMethodDecl *> &Methods) const {
// Check for a complete definition and recover if not so.
if (!isThisDeclarationADefinition())
return;
if (data().ExternallyCompleted)
LoadExternalDefinition();
const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
if (!IFace)
return;
for (const auto *MD : IFace->instance_methods())
if (MD->isThisDeclarationADesignatedInitializer())
Methods.push_back(MD);
for (const auto *Ext : IFace->visible_extensions()) {
for (const auto *MD : Ext->instance_methods())
if (MD->isThisDeclarationADesignatedInitializer())
Methods.push_back(MD);
}
}
bool ObjCInterfaceDecl::isDesignatedInitializer(Selector Sel,
const ObjCMethodDecl **InitMethod) const {
// Check for a complete definition and recover if not so.
if (!isThisDeclarationADefinition())
return false;
if (data().ExternallyCompleted)
LoadExternalDefinition();
const ObjCInterfaceDecl *IFace= findInterfaceWithDesignatedInitializers();
if (!IFace)
return false;
if (const ObjCMethodDecl *MD = IFace->getInstanceMethod(Sel)) {
if (MD->isThisDeclarationADesignatedInitializer()) {
if (InitMethod)
*InitMethod = MD;
return true;
}
}
for (const auto *Ext : IFace->visible_extensions()) {
if (const ObjCMethodDecl *MD = Ext->getInstanceMethod(Sel)) {
if (MD->isThisDeclarationADesignatedInitializer()) {
if (InitMethod)
*InitMethod = MD;
return true;
}
}
}
return false;
}
void ObjCInterfaceDecl::allocateDefinitionData() {
assert(!hasDefinition() && "ObjC class already has a definition");
Data.setPointer(new (getASTContext()) DefinitionData());
Data.getPointer()->Definition = this;
// Make the type point at the definition, now that we have one.
if (TypeForDecl)
cast<ObjCInterfaceType>(TypeForDecl)->Decl = this;
}
void ObjCInterfaceDecl::startDefinition() {
allocateDefinitionData();
// Update all of the declarations with a pointer to the definition.
for (auto RD : redecls()) {
if (RD != this)
RD->Data = Data;
}
}
ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
ObjCInterfaceDecl *&clsDeclared) {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
if (data().ExternallyCompleted)
LoadExternalDefinition();
ObjCInterfaceDecl* ClassDecl = this;
while (ClassDecl != nullptr) {
if (ObjCIvarDecl *I = ClassDecl->getIvarDecl(ID)) {
clsDeclared = ClassDecl;
return I;
}
for (const auto *Ext : ClassDecl->visible_extensions()) {
if (ObjCIvarDecl *I = Ext->getIvarDecl(ID)) {
clsDeclared = ClassDecl;
return I;
}
}
ClassDecl = ClassDecl->getSuperClass();
}
return nullptr;
}
/// lookupInheritedClass - This method returns ObjCInterfaceDecl * of the super
/// class whose name is passed as argument. If it is not one of the super classes
/// the it returns NULL.
ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass(
const IdentifierInfo*ICName) {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
if (data().ExternallyCompleted)
LoadExternalDefinition();
ObjCInterfaceDecl* ClassDecl = this;
while (ClassDecl != nullptr) {
if (ClassDecl->getIdentifier() == ICName)
return ClassDecl;
ClassDecl = ClassDecl->getSuperClass();
}
return nullptr;
}
ObjCProtocolDecl *
ObjCInterfaceDecl::lookupNestedProtocol(IdentifierInfo *Name) {
for (auto *P : all_referenced_protocols())
if (P->lookupProtocolNamed(Name))
return P;
ObjCInterfaceDecl *SuperClass = getSuperClass();
return SuperClass ? SuperClass->lookupNestedProtocol(Name) : nullptr;
}
/// lookupMethod - This method returns an instance/class method by looking in
/// the class, its categories, and its super classes (using a linear search).
/// When argument category "C" is specified, any implicit method found
/// in this category is ignored.
ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
bool isInstance,
bool shallowCategoryLookup,
bool followSuper,
const ObjCCategoryDecl *C) const
{
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
const ObjCInterfaceDecl* ClassDecl = this;
ObjCMethodDecl *MethodDecl = nullptr;
if (data().ExternallyCompleted)
LoadExternalDefinition();
while (ClassDecl) {
// 1. Look through primary class.
if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
return MethodDecl;
// 2. Didn't find one yet - now look through categories.
for (const auto *Cat : ClassDecl->visible_categories())
if ((MethodDecl = Cat->getMethod(Sel, isInstance)))
if (C != Cat || !MethodDecl->isImplicit())
return MethodDecl;
// 3. Didn't find one yet - look through primary class's protocols.
for (const auto *I : ClassDecl->protocols())
if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
return MethodDecl;
// 4. Didn't find one yet - now look through categories' protocols
if (!shallowCategoryLookup)
for (const auto *Cat : ClassDecl->visible_categories()) {
// Didn't find one yet - look through protocols.
const ObjCList<ObjCProtocolDecl> &Protocols =
Cat->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end(); I != E; ++I)
if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
if (C != Cat || !MethodDecl->isImplicit())
return MethodDecl;
}
if (!followSuper)
return nullptr;
// 5. Get to the super class (if any).
ClassDecl = ClassDecl->getSuperClass();
}
return nullptr;
}
// Will search "local" class/category implementations for a method decl.
// If failed, then we search in class's root for an instance method.
// Returns 0 if no method is found.
ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
const Selector &Sel,
bool Instance) const {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
if (data().ExternallyCompleted)
LoadExternalDefinition();
ObjCMethodDecl *Method = nullptr;
if (ObjCImplementationDecl *ImpDecl = getImplementation())
Method = Instance ? ImpDecl->getInstanceMethod(Sel)
: ImpDecl->getClassMethod(Sel);
// Look through local category implementations associated with the class.
if (!Method)
Method = getCategoryMethod(Sel, Instance);
// Before we give up, check if the selector is an instance method.
// But only in the root. This matches gcc's behavior and what the
// runtime expects.
if (!Instance && !Method && !getSuperClass()) {
Method = lookupInstanceMethod(Sel);
// Look through local category implementations associated
// with the root class.
if (!Method)
Method = lookupPrivateMethod(Sel, true);
}
if (!Method && getSuperClass())
return getSuperClass()->lookupPrivateMethod(Sel, Instance);
return Method;
}
//===----------------------------------------------------------------------===//
// ObjCMethodDecl
//===----------------------------------------------------------------------===//
ObjCMethodDecl *ObjCMethodDecl::Create(
ASTContext &C, SourceLocation beginLoc, SourceLocation endLoc,
Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
DeclContext *contextDecl, bool isInstance, bool isVariadic,
bool isPropertyAccessor, bool isImplicitlyDeclared, bool isDefined,
ImplementationControl impControl, bool HasRelatedResultType) {
return new (C, contextDecl) ObjCMethodDecl(
beginLoc, endLoc, SelInfo, T, ReturnTInfo, contextDecl, isInstance,
isVariadic, isPropertyAccessor, isImplicitlyDeclared, isDefined,
impControl, HasRelatedResultType);
}
ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) ObjCMethodDecl(SourceLocation(), SourceLocation(),
Selector(), QualType(), nullptr, nullptr);
}
bool ObjCMethodDecl::isThisDeclarationADesignatedInitializer() const {
return getMethodFamily() == OMF_init &&
hasAttr<ObjCDesignatedInitializerAttr>();
}
bool ObjCMethodDecl::isDesignatedInitializerForTheInterface(
const ObjCMethodDecl **InitMethod) const {
if (getMethodFamily() != OMF_init)
return false;
const DeclContext *DC = getDeclContext();
if (isa<ObjCProtocolDecl>(DC))
return false;
if (const ObjCInterfaceDecl *ID = getClassInterface())
return ID->isDesignatedInitializer(getSelector(), InitMethod);
return false;
}
Stmt *ObjCMethodDecl::getBody() const {
return Body.get(getASTContext().getExternalSource());
}
void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) {
assert(PrevMethod);
getASTContext().setObjCMethodRedeclaration(PrevMethod, this);
IsRedeclaration = true;
PrevMethod->HasRedeclaration = true;
}
void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
ArrayRef<ParmVarDecl*> Params,
ArrayRef<SourceLocation> SelLocs) {
ParamsAndSelLocs = nullptr;
NumParams = Params.size();
if (Params.empty() && SelLocs.empty())
return;
unsigned Size = sizeof(ParmVarDecl *) * NumParams +
sizeof(SourceLocation) * SelLocs.size();
ParamsAndSelLocs = C.Allocate(Size);
std::copy(Params.begin(), Params.end(), getParams());
std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
}
void ObjCMethodDecl::getSelectorLocs(
SmallVectorImpl<SourceLocation> &SelLocs) const {
for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
SelLocs.push_back(getSelectorLoc(i));
}
void ObjCMethodDecl::setMethodParams(ASTContext &C,
ArrayRef<ParmVarDecl*> Params,
ArrayRef<SourceLocation> SelLocs) {
assert((!SelLocs.empty() || isImplicit()) &&
"No selector locs for non-implicit method");
if (isImplicit())
return setParamsAndSelLocs(C, Params, llvm::None);
SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params,
DeclEndLoc);
if (SelLocsKind != SelLoc_NonStandard)
return setParamsAndSelLocs(C, Params, llvm::None);
setParamsAndSelLocs(C, Params, SelLocs);
}
/// \brief A definition will return its interface declaration.
/// An interface declaration will return its definition.
/// Otherwise it will return itself.
ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
ASTContext &Ctx = getASTContext();
ObjCMethodDecl *Redecl = nullptr;
if (HasRedeclaration)
Redecl = const_cast<ObjCMethodDecl*>(Ctx.getObjCMethodRedeclaration(this));
if (Redecl)
return Redecl;
Decl *CtxD = cast<Decl>(getDeclContext());
if (!CtxD->isInvalidDecl()) {
if (ObjCInterfaceDecl *IFD = dyn_cast<ObjCInterfaceDecl>(CtxD)) {
if (ObjCImplementationDecl *ImplD = Ctx.getObjCImplementation(IFD))
if (!ImplD->isInvalidDecl())
Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
} else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CtxD)) {
if (ObjCCategoryImplDecl *ImplD = Ctx.getObjCImplementation(CD))
if (!ImplD->isInvalidDecl())
Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
} else if (ObjCImplementationDecl *ImplD =
dyn_cast<ObjCImplementationDecl>(CtxD)) {
if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
if (!IFD->isInvalidDecl())
Redecl = IFD->getMethod(getSelector(), isInstanceMethod());
} else if (ObjCCategoryImplDecl *CImplD =
dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
if (!CatD->isInvalidDecl())
Redecl = CatD->getMethod(getSelector(), isInstanceMethod());
}
}
if (!Redecl && isRedeclaration()) {
// This is the last redeclaration, go back to the first method.
return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
isInstanceMethod());
}
return Redecl ? Redecl : this;
}
ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
Decl *CtxD = cast<Decl>(getDeclContext());
if (ObjCImplementationDecl *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) {
if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
if (ObjCMethodDecl *MD = IFD->getMethod(getSelector(),
isInstanceMethod()))
return MD;
} else if (ObjCCategoryImplDecl *CImplD =
dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
if (ObjCMethodDecl *MD = CatD->getMethod(getSelector(),
isInstanceMethod()))
return MD;
}
if (isRedeclaration())
return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
isInstanceMethod());
return this;
}
SourceLocation ObjCMethodDecl::getLocEnd() const {
if (Stmt *Body = getBody())
return Body->getLocEnd();
return DeclEndLoc;
}
ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family);
if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
return family;
// Check for an explicit attribute.
if (const ObjCMethodFamilyAttr *attr = getAttr<ObjCMethodFamilyAttr>()) {
// The unfortunate necessity of mapping between enums here is due
// to the attributes framework.
switch (attr->getFamily()) {
case ObjCMethodFamilyAttr::OMF_None: family = OMF_None; break;
case ObjCMethodFamilyAttr::OMF_alloc: family = OMF_alloc; break;
case ObjCMethodFamilyAttr::OMF_copy: family = OMF_copy; break;
case ObjCMethodFamilyAttr::OMF_init: family = OMF_init; break;
case ObjCMethodFamilyAttr::OMF_mutableCopy: family = OMF_mutableCopy; break;
case ObjCMethodFamilyAttr::OMF_new: family = OMF_new; break;
}
Family = static_cast<unsigned>(family);
return family;
}
family = getSelector().getMethodFamily();
switch (family) {
case OMF_None: break;
// init only has a conventional meaning for an instance method, and
// it has to return an object.
case OMF_init:
if (!isInstanceMethod() || !getReturnType()->isObjCObjectPointerType())
family = OMF_None;
break;
// alloc/copy/new have a conventional meaning for both class and
// instance methods, but they require an object return.
case OMF_alloc:
case OMF_copy:
case OMF_mutableCopy:
case OMF_new:
if (!getReturnType()->isObjCObjectPointerType())
family = OMF_None;
break;
// These selectors have a conventional meaning only for instance methods.
case OMF_dealloc:
case OMF_finalize:
case OMF_retain:
case OMF_release:
case OMF_autorelease:
case OMF_retainCount:
case OMF_self:
if (!isInstanceMethod())
family = OMF_None;
break;
case OMF_initialize:
if (isInstanceMethod() || !getReturnType()->isVoidType())
family = OMF_None;
break;
case OMF_performSelector:
if (!isInstanceMethod() || !getReturnType()->isObjCIdType())
family = OMF_None;
else {
unsigned noParams = param_size();
if (noParams < 1 || noParams > 3)
family = OMF_None;
else {
ObjCMethodDecl::param_type_iterator it = param_type_begin();
QualType ArgT = (*it);
if (!ArgT->isObjCSelType()) {
family = OMF_None;
break;
}
while (--noParams) {
it++;
ArgT = (*it);
if (!ArgT->isObjCIdType()) {
family = OMF_None;
break;
}
}
}
}
break;
}
// Cache the result.
Family = static_cast<unsigned>(family);
return family;
}
QualType ObjCMethodDecl::getSelfType(ASTContext &Context,
const ObjCInterfaceDecl *OID,
bool &selfIsPseudoStrong,
bool &selfIsConsumed) {
QualType selfTy;
selfIsPseudoStrong = false;
selfIsConsumed = false;
if (isInstanceMethod()) {
// There may be no interface context due to error in declaration
// of the interface (which has been reported). Recover gracefully.
if (OID) {
selfTy = Context.getObjCInterfaceType(OID);
selfTy = Context.getObjCObjectPointerType(selfTy);
} else {
selfTy = Context.getObjCIdType();
}
} else // we have a factory method.
selfTy = Context.getObjCClassType();
if (Context.getLangOpts().ObjCAutoRefCount) {
if (isInstanceMethod()) {
selfIsConsumed = hasAttr<NSConsumesSelfAttr>();
// 'self' is always __strong. It's actually pseudo-strong except
// in init methods (or methods labeled ns_consumes_self), though.
Qualifiers qs;
qs.setObjCLifetime(Qualifiers::OCL_Strong);
selfTy = Context.getQualifiedType(selfTy, qs);
// In addition, 'self' is const unless this is an init method.
if (getMethodFamily() != OMF_init && !selfIsConsumed) {
selfTy = selfTy.withConst();
selfIsPseudoStrong = true;
}
}
else {
assert(isClassMethod());
// 'self' is always const in class methods.
selfTy = selfTy.withConst();
selfIsPseudoStrong = true;
}
}
return selfTy;
}
void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
const ObjCInterfaceDecl *OID) {
bool selfIsPseudoStrong, selfIsConsumed;
QualType selfTy =
getSelfType(Context, OID, selfIsPseudoStrong, selfIsConsumed);
ImplicitParamDecl *self
= ImplicitParamDecl::Create(Context, this, SourceLocation(),
&Context.Idents.get("self"), selfTy);
setSelfDecl(self);
if (selfIsConsumed)
self->addAttr(NSConsumedAttr::CreateImplicit(Context));
if (selfIsPseudoStrong)
self->setARCPseudoStrong(true);
setCmdDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
&Context.Idents.get("_cmd"),
Context.getObjCSelType()));
}
ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext()))
return ID;
if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
return CD->getClassInterface();
if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
return IMD->getClassInterface();
if (isa<ObjCProtocolDecl>(getDeclContext()))
return nullptr;
llvm_unreachable("unknown method context");
}
SourceRange ObjCMethodDecl::getReturnTypeSourceRange() const {
const auto *TSI = getReturnTypeSourceInfo();
if (TSI)
return TSI->getTypeLoc().getSourceRange();
return SourceRange();
}
QualType ObjCMethodDecl::getSendResultType() const {
ASTContext &Ctx = getASTContext();
return getReturnType().getNonLValueExprType(Ctx)
.substObjCTypeArgs(Ctx, {}, ObjCSubstitutionContext::Result);
}
QualType ObjCMethodDecl::getSendResultType(QualType receiverType) const {
// FIXME: Handle related result types here.
return getReturnType().getNonLValueExprType(getASTContext())
.substObjCMemberType(receiverType, getDeclContext(),
ObjCSubstitutionContext::Result);
}
static void CollectOverriddenMethodsRecurse(const ObjCContainerDecl *Container,
const ObjCMethodDecl *Method,
SmallVectorImpl<const ObjCMethodDecl *> &Methods,
bool MovedToSuper) {
if (!Container)
return;
// In categories look for overriden methods from protocols. A method from
// category is not "overriden" since it is considered as the "same" method
// (same USR) as the one from the interface.
if (const ObjCCategoryDecl *
Category = dyn_cast<ObjCCategoryDecl>(Container)) {
// Check whether we have a matching method at this category but only if we
// are at the super class level.
if (MovedToSuper)
if (ObjCMethodDecl *
Overridden = Container->getMethod(Method->getSelector(),
Method->isInstanceMethod(),
/*AllowHidden=*/true))
if (Method != Overridden) {
// We found an override at this category; there is no need to look
// into its protocols.
Methods.push_back(Overridden);
return;
}
for (const auto *P : Category->protocols())
CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
return;
}
// Check whether we have a matching method at this level.
if (const ObjCMethodDecl *
Overridden = Container->getMethod(Method->getSelector(),
Method->isInstanceMethod(),
/*AllowHidden=*/true))
if (Method != Overridden) {
// We found an override at this level; there is no need to look
// into other protocols or categories.
Methods.push_back(Overridden);
return;
}
if (const ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)){
for (const auto *P : Protocol->protocols())
CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
}
if (const ObjCInterfaceDecl *
Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
for (const auto *P : Interface->protocols())
CollectOverriddenMethodsRecurse(P, Method, Methods, MovedToSuper);
for (const auto *Cat : Interface->known_categories())
CollectOverriddenMethodsRecurse(Cat, Method, Methods, MovedToSuper);
if (const ObjCInterfaceDecl *Super = Interface->getSuperClass())
return CollectOverriddenMethodsRecurse(Super, Method, Methods,
/*MovedToSuper=*/true);
}
}
static inline void CollectOverriddenMethods(const ObjCContainerDecl *Container,
const ObjCMethodDecl *Method,
SmallVectorImpl<const ObjCMethodDecl *> &Methods) {
CollectOverriddenMethodsRecurse(Container, Method, Methods,
/*MovedToSuper=*/false);
}
static void collectOverriddenMethodsSlow(const ObjCMethodDecl *Method,
SmallVectorImpl<const ObjCMethodDecl *> &overridden) {
assert(Method->isOverriding());
if (const ObjCProtocolDecl *
ProtD = dyn_cast<ObjCProtocolDecl>(Method->getDeclContext())) {
CollectOverriddenMethods(ProtD, Method, overridden);
} else if (const ObjCImplDecl *
IMD = dyn_cast<ObjCImplDecl>(Method->getDeclContext())) {
const ObjCInterfaceDecl *ID = IMD->getClassInterface();
if (!ID)
return;
// Start searching for overridden methods using the method from the
// interface as starting point.
if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(),
Method->isInstanceMethod(),
/*AllowHidden=*/true))
Method = IFaceMeth;
CollectOverriddenMethods(ID, Method, overridden);
} else if (const ObjCCategoryDecl *
CatD = dyn_cast<ObjCCategoryDecl>(Method->getDeclContext())) {
const ObjCInterfaceDecl *ID = CatD->getClassInterface();
if (!ID)
return;
// Start searching for overridden methods using the method from the
// interface as starting point.
if (const ObjCMethodDecl *IFaceMeth = ID->getMethod(Method->getSelector(),
Method->isInstanceMethod(),
/*AllowHidden=*/true))
Method = IFaceMeth;
CollectOverriddenMethods(ID, Method, overridden);
} else {
CollectOverriddenMethods(
dyn_cast_or_null<ObjCContainerDecl>(Method->getDeclContext()),
Method, overridden);
}
}
void ObjCMethodDecl::getOverriddenMethods(
SmallVectorImpl<const ObjCMethodDecl *> &Overridden) const {
const ObjCMethodDecl *Method = this;
if (Method->isRedeclaration()) {
Method = cast<ObjCContainerDecl>(Method->getDeclContext())->
getMethod(Method->getSelector(), Method->isInstanceMethod());
}
if (Method->isOverriding()) {
collectOverriddenMethodsSlow(Method, Overridden);
assert(!Overridden.empty() &&
"ObjCMethodDecl's overriding bit is not as expected");
}
}
const ObjCPropertyDecl *
ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const {
Selector Sel = getSelector();
unsigned NumArgs = Sel.getNumArgs();
if (NumArgs > 1)
return nullptr;
if (!isInstanceMethod())
return nullptr;
if (isPropertyAccessor()) {
const ObjCContainerDecl *Container = cast<ObjCContainerDecl>(getParent());
// If container is class extension, find its primary class.
if (const ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(Container))
if (CatDecl->IsClassExtension())
Container = CatDecl->getClassInterface();
bool IsGetter = (NumArgs == 0);
for (const auto *I : Container->properties()) {
Selector NextSel = IsGetter ? I->getGetterName()
: I->getSetterName();
if (NextSel == Sel)
return I;
}
llvm_unreachable("Marked as a property accessor but no property found!");
}
if (!CheckOverrides)
return nullptr;
typedef SmallVector<const ObjCMethodDecl *, 8> OverridesTy;
OverridesTy Overrides;
getOverriddenMethods(Overrides);
for (OverridesTy::const_iterator I = Overrides.begin(), E = Overrides.end();
I != E; ++I) {
if (const ObjCPropertyDecl *Prop = (*I)->findPropertyDecl(false))
return Prop;
}
return nullptr;
}
//===----------------------------------------------------------------------===//
// ObjCTypeParamDecl
//===----------------------------------------------------------------------===//
void ObjCTypeParamDecl::anchor() { }
ObjCTypeParamDecl *ObjCTypeParamDecl::Create(ASTContext &ctx, DeclContext *dc,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
SourceLocation nameLoc,
IdentifierInfo *name,
SourceLocation colonLoc,
TypeSourceInfo *boundInfo) {
return new (ctx, dc) ObjCTypeParamDecl(ctx, dc, variance, varianceLoc, index,
nameLoc, name, colonLoc, boundInfo);
}
ObjCTypeParamDecl *ObjCTypeParamDecl::CreateDeserialized(ASTContext &ctx,
unsigned ID) {
return new (ctx, ID) ObjCTypeParamDecl(ctx, nullptr,
ObjCTypeParamVariance::Invariant,
SourceLocation(), 0, SourceLocation(),
nullptr, SourceLocation(), nullptr);
}
SourceRange ObjCTypeParamDecl::getSourceRange() const {
SourceLocation startLoc = VarianceLoc;
if (startLoc.isInvalid())
startLoc = getLocation();
if (hasExplicitBound()) {
return SourceRange(startLoc,
getTypeSourceInfo()->getTypeLoc().getEndLoc());
}
return SourceRange(startLoc);
}
//===----------------------------------------------------------------------===//
// ObjCTypeParamList
//===----------------------------------------------------------------------===//
ObjCTypeParamList::ObjCTypeParamList(SourceLocation lAngleLoc,
ArrayRef<ObjCTypeParamDecl *> typeParams,
SourceLocation rAngleLoc)
: NumParams(typeParams.size())
{
Brackets.Begin = lAngleLoc.getRawEncoding();
Brackets.End = rAngleLoc.getRawEncoding();
std::copy(typeParams.begin(), typeParams.end(), begin());
}
ObjCTypeParamList *ObjCTypeParamList::create(
ASTContext &ctx,
SourceLocation lAngleLoc,
ArrayRef<ObjCTypeParamDecl *> typeParams,
SourceLocation rAngleLoc) {
unsigned size = sizeof(ObjCTypeParamList)
+ sizeof(ObjCTypeParamDecl *) * typeParams.size();
static_assert(llvm::AlignOf<ObjCTypeParamList>::Alignment >=
llvm::AlignOf<ObjCTypeParamDecl *>::Alignment,
"type parameter list needs greater alignment");
unsigned align = llvm::alignOf<ObjCTypeParamList>();
void *mem = ctx.Allocate(size, align);
return new (mem) ObjCTypeParamList(lAngleLoc, typeParams, rAngleLoc);
}
void ObjCTypeParamList::gatherDefaultTypeArgs(
SmallVectorImpl<QualType> &typeArgs) const {
typeArgs.reserve(size());
for (auto typeParam : *this)
typeArgs.push_back(typeParam->getUnderlyingType());
}
//===----------------------------------------------------------------------===//
// ObjCInterfaceDecl
//===----------------------------------------------------------------------===//
ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
DeclContext *DC,
SourceLocation atLoc,
IdentifierInfo *Id,
ObjCTypeParamList *typeParamList,
ObjCInterfaceDecl *PrevDecl,
SourceLocation ClassLoc,
bool isInternal){
ObjCInterfaceDecl *Result = new (C, DC)
ObjCInterfaceDecl(C, DC, atLoc, Id, typeParamList, ClassLoc, PrevDecl,
isInternal);
Result->Data.setInt(!C.getLangOpts().Modules);
C.getObjCInterfaceType(Result, PrevDecl);
return Result;
}
ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(const ASTContext &C,
unsigned ID) {
ObjCInterfaceDecl *Result = new (C, ID) ObjCInterfaceDecl(C, nullptr,
SourceLocation(),
nullptr,
nullptr,
SourceLocation(),
nullptr, false);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
}
ObjCInterfaceDecl::ObjCInterfaceDecl(const ASTContext &C, DeclContext *DC,
SourceLocation AtLoc, IdentifierInfo *Id,
ObjCTypeParamList *typeParamList,
SourceLocation CLoc,
ObjCInterfaceDecl *PrevDecl,
bool IsInternal)
: ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, AtLoc),
redeclarable_base(C), TypeForDecl(nullptr), TypeParamList(nullptr),
Data() {
setPreviousDecl(PrevDecl);
// Copy the 'data' pointer over.
if (PrevDecl)
Data = PrevDecl->Data;
setImplicit(IsInternal);
setTypeParamList(typeParamList);
}
void ObjCInterfaceDecl::LoadExternalDefinition() const {
assert(data().ExternallyCompleted && "Class is not externally completed");
data().ExternallyCompleted = false;
getASTContext().getExternalSource()->CompleteType(
const_cast<ObjCInterfaceDecl *>(this));
}
void ObjCInterfaceDecl::setExternallyCompleted() {
assert(getASTContext().getExternalSource() &&
"Class can't be externally completed without an external source");
assert(hasDefinition() &&
"Forward declarations can't be externally completed");
data().ExternallyCompleted = true;
}
void ObjCInterfaceDecl::setHasDesignatedInitializers() {
// Check for a complete definition and recover if not so.
if (!isThisDeclarationADefinition())
return;
data().HasDesignatedInitializers = true;
}
bool ObjCInterfaceDecl::hasDesignatedInitializers() const {
// Check for a complete definition and recover if not so.
if (!isThisDeclarationADefinition())
return false;
if (data().ExternallyCompleted)
LoadExternalDefinition();
return data().HasDesignatedInitializers;
}
StringRef
ObjCInterfaceDecl::getObjCRuntimeNameAsString() const {
if (ObjCRuntimeNameAttr *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
return ObjCRTName->getMetadataName();
return getName();
}
StringRef
ObjCImplementationDecl::getObjCRuntimeNameAsString() const {
if (ObjCInterfaceDecl *ID =
const_cast<ObjCImplementationDecl*>(this)->getClassInterface())
return ID->getObjCRuntimeNameAsString();
return getName();
}
ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
if (const ObjCInterfaceDecl *Def = getDefinition()) {
if (data().ExternallyCompleted)
LoadExternalDefinition();
return getASTContext().getObjCImplementation(
const_cast<ObjCInterfaceDecl*>(Def));
}
// FIXME: Should make sure no callers ever do this.
return nullptr;
}
void ObjCInterfaceDecl::setImplementation(ObjCImplementationDecl *ImplD) {
getASTContext().setObjCImplementation(getDefinition(), ImplD);
}
namespace {
struct SynthesizeIvarChunk {
uint64_t Size;
ObjCIvarDecl *Ivar;
SynthesizeIvarChunk(uint64_t size, ObjCIvarDecl *ivar)
: Size(size), Ivar(ivar) {}
};
bool operator<(const SynthesizeIvarChunk & LHS,
const SynthesizeIvarChunk &RHS) {
return LHS.Size < RHS.Size;
}
}
/// all_declared_ivar_begin - return first ivar declared in this class,
/// its extensions and its implementation. Lazily build the list on first
/// access.
///
/// Caveat: The list returned by this method reflects the current
/// state of the parser. The cache will be updated for every ivar
/// added by an extension or the implementation when they are
/// encountered.
/// See also ObjCIvarDecl::Create().
ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
ObjCIvarDecl *curIvar = nullptr;
if (!data().IvarList) {
if (!ivar_empty()) {
ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
data().IvarList = *I; ++I;
for (curIvar = data().IvarList; I != E; curIvar = *I, ++I)
curIvar->setNextIvar(*I);
}
for (const auto *Ext : known_extensions()) {
if (!Ext->ivar_empty()) {
ObjCCategoryDecl::ivar_iterator
I = Ext->ivar_begin(),
E = Ext->ivar_end();
if (!data().IvarList) {
data().IvarList = *I; ++I;
curIvar = data().IvarList;
}
for ( ;I != E; curIvar = *I, ++I)
curIvar->setNextIvar(*I);
}
}
data().IvarListMissingImplementation = true;
}
// cached and complete!
if (!data().IvarListMissingImplementation)
return data().IvarList;
if (ObjCImplementationDecl *ImplDecl = getImplementation()) {
data().IvarListMissingImplementation = false;
if (!ImplDecl->ivar_empty()) {
SmallVector<SynthesizeIvarChunk, 16> layout;
for (auto *IV : ImplDecl->ivars()) {
if (IV->getSynthesize() && !IV->isInvalidDecl()) {
layout.push_back(SynthesizeIvarChunk(
IV->getASTContext().getTypeSize(IV->getType()), IV));
continue;
}
if (!data().IvarList)
data().IvarList = IV;
else
curIvar->setNextIvar(IV);
curIvar = IV;
}
if (!layout.empty()) {
// Order synthesized ivars by their size.
std::stable_sort(layout.begin(), layout.end());
unsigned Ix = 0, EIx = layout.size();
if (!data().IvarList) {
data().IvarList = layout[0].Ivar; Ix++;
curIvar = data().IvarList;
}
for ( ; Ix != EIx; curIvar = layout[Ix].Ivar, Ix++)
curIvar->setNextIvar(layout[Ix].Ivar);
}
}
}
return data().IvarList;
}
/// FindCategoryDeclaration - Finds category declaration in the list of
/// categories for this class and returns it. Name of the category is passed
/// in 'CategoryId'. If category not found, return 0;
///
ObjCCategoryDecl *
ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
// FIXME: Should make sure no callers ever do this.
if (!hasDefinition())
return nullptr;
if (data().ExternallyCompleted)
LoadExternalDefinition();
for (auto *Cat : visible_categories())
if (Cat->getIdentifier() == CategoryId)
return Cat;
return nullptr;
}
ObjCMethodDecl *
ObjCInterfaceDecl::getCategoryInstanceMethod(Selector Sel) const {
for (const auto *Cat : visible_categories()) {
if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
if (ObjCMethodDecl *MD = Impl->getInstanceMethod(Sel))
return MD;
}
return nullptr;
}
ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const {
for (const auto *Cat : visible_categories()) {
if (ObjCCategoryImplDecl *Impl = Cat->getImplementation())
if (ObjCMethodDecl *MD = Impl->getClassMethod(Sel))
return MD;
}
return nullptr;
}
/// ClassImplementsProtocol - Checks that 'lProto' protocol
/// has been implemented in IDecl class, its super class or categories (if
/// lookupCategory is true).
bool ObjCInterfaceDecl::ClassImplementsProtocol(ObjCProtocolDecl *lProto,
bool lookupCategory,
bool RHSIsQualifiedID) {
if (!hasDefinition())
return false;
ObjCInterfaceDecl *IDecl = this;
// 1st, look up the class.
for (auto *PI : IDecl->protocols()){
if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
return true;
// This is dubious and is added to be compatible with gcc. In gcc, it is
// also allowed assigning a protocol-qualified 'id' type to a LHS object
// when protocol in qualified LHS is in list of protocols in the rhs 'id'
// object. This IMO, should be a bug.
// FIXME: Treat this as an extension, and flag this as an error when GCC
// extensions are not enabled.
if (RHSIsQualifiedID &&
getASTContext().ProtocolCompatibleWithProtocol(PI, lProto))
return true;
}
// 2nd, look up the category.
if (lookupCategory)
for (const auto *Cat : visible_categories()) {
for (auto *PI : Cat->protocols())
if (getASTContext().ProtocolCompatibleWithProtocol(lProto, PI))
return true;
}
// 3rd, look up the super class(s)
if (IDecl->getSuperClass())
return
IDecl->getSuperClass()->ClassImplementsProtocol(lProto, lookupCategory,
RHSIsQualifiedID);
return false;
}
//===----------------------------------------------------------------------===//
// ObjCIvarDecl
//===----------------------------------------------------------------------===//
void ObjCIvarDecl::anchor() { }
ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo,
AccessControl ac, Expr *BW,
bool synthesized) {
if (DC) {
// Ivar's can only appear in interfaces, implementations (via synthesized
// properties), and class extensions (via direct declaration, or synthesized
// properties).
//
// FIXME: This should really be asserting this:
// (isa<ObjCCategoryDecl>(DC) &&
// cast<ObjCCategoryDecl>(DC)->IsClassExtension()))
// but unfortunately we sometimes place ivars into non-class extension
// categories on error. This breaks an AST invariant, and should not be
// fixed.
assert((isa<ObjCInterfaceDecl>(DC) || isa<ObjCImplementationDecl>(DC) ||
isa<ObjCCategoryDecl>(DC)) &&
"Invalid ivar decl context!");
// Once a new ivar is created in any of class/class-extension/implementation
// decl contexts, the previously built IvarList must be rebuilt.
ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(DC);
if (!ID) {
if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC))
ID = IM->getClassInterface();
else
ID = cast<ObjCCategoryDecl>(DC)->getClassInterface();
}
ID->setIvarList(nullptr);
}
return new (C, DC) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo, ac, BW,
synthesized);
}
ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) ObjCIvarDecl(nullptr, SourceLocation(), SourceLocation(),
nullptr, QualType(), nullptr,
ObjCIvarDecl::None, nullptr, false);
}
const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
const ObjCContainerDecl *DC = cast<ObjCContainerDecl>(getDeclContext());
switch (DC->getKind()) {
default:
case ObjCCategoryImpl:
case ObjCProtocol:
llvm_unreachable("invalid ivar container!");
// Ivars can only appear in class extension categories.
case ObjCCategory: {
const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
assert(CD->IsClassExtension() && "invalid container for ivar!");
return CD->getClassInterface();
}
case ObjCImplementation:
return cast<ObjCImplementationDecl>(DC)->getClassInterface();
case ObjCInterface:
return cast<ObjCInterfaceDecl>(DC);
}
}
QualType ObjCIvarDecl::getUsageType(QualType objectType) const {
return getType().substObjCMemberType(objectType, getDeclContext(),
ObjCSubstitutionContext::Property);
}
//===----------------------------------------------------------------------===//
// ObjCAtDefsFieldDecl
//===----------------------------------------------------------------------===//
void ObjCAtDefsFieldDecl::anchor() { }
ObjCAtDefsFieldDecl
*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T, Expr *BW) {
return new (C, DC) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
}
ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ObjCAtDefsFieldDecl(nullptr, SourceLocation(),
SourceLocation(), nullptr, QualType(),
nullptr);
}
//===----------------------------------------------------------------------===//
// ObjCProtocolDecl
//===----------------------------------------------------------------------===//
void ObjCProtocolDecl::anchor() { }
ObjCProtocolDecl::ObjCProtocolDecl(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, SourceLocation nameLoc,
SourceLocation atStartLoc,
ObjCProtocolDecl *PrevDecl)
: ObjCContainerDecl(ObjCProtocol, DC, Id, nameLoc, atStartLoc),
redeclarable_base(C), Data() {
setPreviousDecl(PrevDecl);
if (PrevDecl)
Data = PrevDecl->Data;
}
ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
SourceLocation nameLoc,
SourceLocation atStartLoc,
ObjCProtocolDecl *PrevDecl) {
ObjCProtocolDecl *Result =
new (C, DC) ObjCProtocolDecl(C, DC, Id, nameLoc, atStartLoc, PrevDecl);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
}
ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
ObjCProtocolDecl *Result =
new (C, ID) ObjCProtocolDecl(C, nullptr, nullptr, SourceLocation(),
SourceLocation(), nullptr);
Result->Data.setInt(!C.getLangOpts().Modules);
return Result;
}
ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) {
ObjCProtocolDecl *PDecl = this;
if (Name == getIdentifier())
return PDecl;
for (auto *I : protocols())
if ((PDecl = I->lookupProtocolNamed(Name)))
return PDecl;
return nullptr;
}
// lookupMethod - Lookup a instance/class method in the protocol and protocols
// it inherited.
ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel,
bool isInstance) const {
ObjCMethodDecl *MethodDecl = nullptr;
// If there is no definition or the definition is hidden, we don't find
// anything.
const ObjCProtocolDecl *Def = getDefinition();
if (!Def || Def->isHidden())
return nullptr;
if ((MethodDecl = getMethod(Sel, isInstance)))
return MethodDecl;
for (const auto *I : protocols())
if ((MethodDecl = I->lookupMethod(Sel, isInstance)))
return MethodDecl;
return nullptr;
}
void ObjCProtocolDecl::allocateDefinitionData() {
assert(!Data.getPointer() && "Protocol already has a definition!");
Data.setPointer(new (getASTContext()) DefinitionData);
Data.getPointer()->Definition = this;
}
void ObjCProtocolDecl::startDefinition() {
allocateDefinitionData();
// Update all of the declarations with a pointer to the definition.
for (auto RD : redecls())
RD->Data = this->Data;
}
void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM,
PropertyDeclOrder &PO) const {
if (const ObjCProtocolDecl *PDecl = getDefinition()) {
for (auto *Prop : PDecl->properties()) {
// Insert into PM if not there already.
PM.insert(std::make_pair(Prop->getIdentifier(), Prop));
PO.push_back(Prop);
}
// Scan through protocol's protocols.
for (const auto *PI : PDecl->protocols())
PI->collectPropertiesToImplement(PM, PO);
}
}
void ObjCProtocolDecl::collectInheritedProtocolProperties(
const ObjCPropertyDecl *Property,
ProtocolPropertyMap &PM) const {
if (const ObjCProtocolDecl *PDecl = getDefinition()) {
bool MatchFound = false;
for (auto *Prop : PDecl->properties()) {
if (Prop == Property)
continue;
if (Prop->getIdentifier() == Property->getIdentifier()) {
PM[PDecl] = Prop;
MatchFound = true;
break;
}
}
// Scan through protocol's protocols which did not have a matching property.
if (!MatchFound)
for (const auto *PI : PDecl->protocols())
PI->collectInheritedProtocolProperties(Property, PM);
}
}
StringRef
ObjCProtocolDecl::getObjCRuntimeNameAsString() const {
if (ObjCRuntimeNameAttr *ObjCRTName = getAttr<ObjCRuntimeNameAttr>())
return ObjCRTName->getMetadataName();
return getName();
}
//===----------------------------------------------------------------------===//
// ObjCCategoryDecl
//===----------------------------------------------------------------------===//
void ObjCCategoryDecl::anchor() { }
ObjCCategoryDecl::ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc,
SourceLocation ClassNameLoc,
SourceLocation CategoryNameLoc,
IdentifierInfo *Id, ObjCInterfaceDecl *IDecl,
ObjCTypeParamList *typeParamList,
SourceLocation IvarLBraceLoc,
SourceLocation IvarRBraceLoc)
: ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc),
ClassInterface(IDecl), TypeParamList(nullptr),
NextClassCategory(nullptr), CategoryNameLoc(CategoryNameLoc),
IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc)
{
setTypeParamList(typeParamList);
}
ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation AtLoc,
SourceLocation ClassNameLoc,
SourceLocation CategoryNameLoc,
IdentifierInfo *Id,
ObjCInterfaceDecl *IDecl,
ObjCTypeParamList *typeParamList,
SourceLocation IvarLBraceLoc,
SourceLocation IvarRBraceLoc) {
ObjCCategoryDecl *CatDecl =
new (C, DC) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc, CategoryNameLoc, Id,
IDecl, typeParamList, IvarLBraceLoc,
IvarRBraceLoc);
if (IDecl) {
// Link this category into its class's category list.
CatDecl->NextClassCategory = IDecl->getCategoryListRaw();
if (IDecl->hasDefinition()) {
IDecl->setCategoryListRaw(CatDecl);
if (ASTMutationListener *L = C.getASTMutationListener())
L->AddedObjCCategoryToInterface(CatDecl, IDecl);
}
}
return CatDecl;
}
ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ObjCCategoryDecl(nullptr, SourceLocation(),
SourceLocation(), SourceLocation(),
nullptr, nullptr, nullptr);
}
ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const {
return getASTContext().getObjCImplementation(
const_cast<ObjCCategoryDecl*>(this));
}
void ObjCCategoryDecl::setImplementation(ObjCCategoryImplDecl *ImplD) {
getASTContext().setObjCImplementation(this, ImplD);
}
void ObjCCategoryDecl::setTypeParamList(ObjCTypeParamList *TPL) {
TypeParamList = TPL;
if (!TPL)
return;
// Set the declaration context of each of the type parameters.
for (auto typeParam : *TypeParamList)
typeParam->setDeclContext(this);
}
//===----------------------------------------------------------------------===//
// ObjCCategoryImplDecl
//===----------------------------------------------------------------------===//
void ObjCCategoryImplDecl::anchor() { }
ObjCCategoryImplDecl *
ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
ObjCInterfaceDecl *ClassInterface,
SourceLocation nameLoc,
SourceLocation atStartLoc,
SourceLocation CategoryNameLoc) {
if (ClassInterface && ClassInterface->hasDefinition())
ClassInterface = ClassInterface->getDefinition();
return new (C, DC) ObjCCategoryImplDecl(DC, Id, ClassInterface, nameLoc,
atStartLoc, CategoryNameLoc);
}
ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ObjCCategoryImplDecl(nullptr, nullptr, nullptr,
SourceLocation(), SourceLocation(),
SourceLocation());
}
ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
// The class interface might be NULL if we are working with invalid code.
if (const ObjCInterfaceDecl *ID = getClassInterface())
return ID->FindCategoryDeclaration(getIdentifier());
return nullptr;
}
void ObjCImplDecl::anchor() { }
void ObjCImplDecl::addPropertyImplementation(ObjCPropertyImplDecl *property) {
// FIXME: The context should be correct before we get here.
property->setLexicalDeclContext(this);
addDecl(property);
}
void ObjCImplDecl::setClassInterface(ObjCInterfaceDecl *IFace) {
ASTContext &Ctx = getASTContext();
if (ObjCImplementationDecl *ImplD
= dyn_cast_or_null<ObjCImplementationDecl>(this)) {
if (IFace)
Ctx.setObjCImplementation(IFace, ImplD);
} else if (ObjCCategoryImplDecl *ImplD =
dyn_cast_or_null<ObjCCategoryImplDecl>(this)) {
if (ObjCCategoryDecl *CD = IFace->FindCategoryDeclaration(getIdentifier()))
Ctx.setObjCImplementation(CD, ImplD);
}
ClassInterface = IFace;
}
/// FindPropertyImplIvarDecl - This method lookup the ivar in the list of
/// properties implemented in this \@implementation block and returns
/// the implemented property that uses it.
///
ObjCPropertyImplDecl *ObjCImplDecl::
FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const {
for (auto *PID : property_impls())
if (PID->getPropertyIvarDecl() &&
PID->getPropertyIvarDecl()->getIdentifier() == ivarId)
return PID;
return nullptr;
}
/// FindPropertyImplDecl - This method looks up a previous ObjCPropertyImplDecl
/// added to the list of those properties \@synthesized/\@dynamic in this
/// category \@implementation block.
///
ObjCPropertyImplDecl *ObjCImplDecl::
FindPropertyImplDecl(IdentifierInfo *Id) const {
for (auto *PID : property_impls())
if (PID->getPropertyDecl()->getIdentifier() == Id)
return PID;
return nullptr;
}
raw_ostream &clang::operator<<(raw_ostream &OS,
const ObjCCategoryImplDecl &CID) {
OS << CID.getName();
return OS;
}
//===----------------------------------------------------------------------===//
// ObjCImplementationDecl
//===----------------------------------------------------------------------===//
void ObjCImplementationDecl::anchor() { }
ObjCImplementationDecl *
ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC,
ObjCInterfaceDecl *ClassInterface,
ObjCInterfaceDecl *SuperDecl,
SourceLocation nameLoc,
SourceLocation atStartLoc,
SourceLocation superLoc,
SourceLocation IvarLBraceLoc,
SourceLocation IvarRBraceLoc) {
if (ClassInterface && ClassInterface->hasDefinition())
ClassInterface = ClassInterface->getDefinition();
return new (C, DC) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
nameLoc, atStartLoc, superLoc,
IvarLBraceLoc, IvarRBraceLoc);
}
ObjCImplementationDecl *
ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) ObjCImplementationDecl(nullptr, nullptr, nullptr,
SourceLocation(), SourceLocation());
}
void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
CXXCtorInitializer ** initializers,
unsigned numInitializers) {
if (numInitializers > 0) {
NumIvarInitializers = numInitializers;
CXXCtorInitializer **ivarInitializers =
new (C) CXXCtorInitializer*[NumIvarInitializers];
memcpy(ivarInitializers, initializers,
numInitializers * sizeof(CXXCtorInitializer*));
IvarInitializers = ivarInitializers;
}
}
ObjCImplementationDecl::init_const_iterator
ObjCImplementationDecl::init_begin() const {
return IvarInitializers.get(getASTContext().getExternalSource());
}
raw_ostream &clang::operator<<(raw_ostream &OS,
const ObjCImplementationDecl &ID) {
OS << ID.getName();
return OS;
}
//===----------------------------------------------------------------------===//
// ObjCCompatibleAliasDecl
//===----------------------------------------------------------------------===//
void ObjCCompatibleAliasDecl::anchor() { }
ObjCCompatibleAliasDecl *
ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
IdentifierInfo *Id,
ObjCInterfaceDecl* AliasedClass) {
return new (C, DC) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
}
ObjCCompatibleAliasDecl *
ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) ObjCCompatibleAliasDecl(nullptr, SourceLocation(),
nullptr, nullptr);
}
//===----------------------------------------------------------------------===//
// ObjCPropertyDecl
//===----------------------------------------------------------------------===//
void ObjCPropertyDecl::anchor() { }
ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
IdentifierInfo *Id,
SourceLocation AtLoc,
SourceLocation LParenLoc,
QualType T,
TypeSourceInfo *TSI,
PropertyControl propControl) {
return new (C, DC) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T, TSI,
propControl);
}
ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ObjCPropertyDecl(nullptr, SourceLocation(), nullptr,
SourceLocation(), SourceLocation(),
QualType(), nullptr, None);
}
QualType ObjCPropertyDecl::getUsageType(QualType objectType) const {
return DeclType.substObjCMemberType(objectType, getDeclContext(),
ObjCSubstitutionContext::Property);
}
//===----------------------------------------------------------------------===//
// ObjCPropertyImplDecl
//===----------------------------------------------------------------------===//
ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation atLoc,
SourceLocation L,
ObjCPropertyDecl *property,
Kind PK,
ObjCIvarDecl *ivar,
SourceLocation ivarLoc) {
return new (C, DC) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
ivarLoc);
}
ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) ObjCPropertyImplDecl(nullptr, SourceLocation(),
SourceLocation(), nullptr, Dynamic,
nullptr, SourceLocation());
}
SourceRange ObjCPropertyImplDecl::getSourceRange() const {
SourceLocation EndLoc = getLocation();
if (IvarLoc.isValid())
EndLoc = IvarLoc;
return SourceRange(AtLoc, EndLoc);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/DeclFriend.cpp | //===--- DeclFriend.cpp - C++ Friend Declaration AST Node Implementation --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the AST classes related to C++ friend
// declarations.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclTemplate.h"
using namespace clang;
void FriendDecl::anchor() { }
FriendDecl *FriendDecl::getNextFriendSlowCase() {
return cast_or_null<FriendDecl>(
NextFriend.get(getASTContext().getExternalSource()));
}
FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
FriendUnion Friend,
SourceLocation FriendL,
ArrayRef<TemplateParameterList*> FriendTypeTPLists) {
#ifndef NDEBUG
if (Friend.is<NamedDecl*>()) {
NamedDecl *D = Friend.get<NamedDecl*>();
assert(isa<FunctionDecl>(D) ||
isa<CXXRecordDecl>(D) ||
isa<FunctionTemplateDecl>(D) ||
isa<ClassTemplateDecl>(D));
// As a temporary hack, we permit template instantiation to point
// to the original declaration when instantiating members.
assert(D->getFriendObjectKind() ||
(cast<CXXRecordDecl>(DC)->getTemplateSpecializationKind()));
// These template parameters are for friend types only.
assert(FriendTypeTPLists.size() == 0);
}
#endif
std::size_t Extra = FriendTypeTPLists.size() * sizeof(TemplateParameterList*);
FriendDecl *FD = new (C, DC, Extra) FriendDecl(DC, L, Friend, FriendL,
FriendTypeTPLists);
cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
return FD;
}
FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned FriendTypeNumTPLists) {
std::size_t Extra = FriendTypeNumTPLists * sizeof(TemplateParameterList*);
return new (C, ID, Extra) FriendDecl(EmptyShell(), FriendTypeNumTPLists);
}
FriendDecl *CXXRecordDecl::getFirstFriend() const {
ExternalASTSource *Source = getParentASTContext().getExternalSource();
Decl *First = data().FirstFriend.get(Source);
return First ? cast<FriendDecl>(First) : nullptr;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/NSAPI.cpp | //===--- NSAPI.cpp - NSFoundation APIs ------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/NSAPI.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
NSAPI::NSAPI(ASTContext &ctx)
: Ctx(ctx), ClassIds(), BOOLId(nullptr), NSIntegerId(nullptr),
NSUIntegerId(nullptr), NSASCIIStringEncodingId(nullptr),
NSUTF8StringEncodingId(nullptr) {}
IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const {
static const char *ClassName[NumClassIds] = {
"NSObject",
"NSString",
"NSArray",
"NSMutableArray",
"NSDictionary",
"NSMutableDictionary",
"NSNumber",
"NSMutableSet",
"NSMutableOrderedSet",
"NSValue"
};
if (!ClassIds[K])
return (ClassIds[K] = &Ctx.Idents.get(ClassName[K]));
return ClassIds[K];
}
Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
if (NSStringSelectors[MK].isNull()) {
Selector Sel;
switch (MK) {
case NSStr_stringWithString:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithString"));
break;
case NSStr_stringWithUTF8String:
Sel = Ctx.Selectors.getUnarySelector(
&Ctx.Idents.get("stringWithUTF8String"));
break;
case NSStr_initWithUTF8String:
Sel = Ctx.Selectors.getUnarySelector(
&Ctx.Idents.get("initWithUTF8String"));
break;
case NSStr_stringWithCStringEncoding: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("stringWithCString"),
&Ctx.Idents.get("encoding")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSStr_stringWithCString:
Sel= Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithCString"));
break;
case NSStr_initWithString:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithString"));
break;
}
return (NSStringSelectors[MK] = Sel);
}
return NSStringSelectors[MK];
}
Optional<NSAPI::NSStringMethodKind>
NSAPI::getNSStringMethodKind(Selector Sel) const {
for (unsigned i = 0; i != NumNSStringMethods; ++i) {
NSStringMethodKind MK = NSStringMethodKind(i);
if (Sel == getNSStringSelector(MK))
return MK;
}
return None;
}
Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
if (NSArraySelectors[MK].isNull()) {
Selector Sel;
switch (MK) {
case NSArr_array:
Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("array"));
break;
case NSArr_arrayWithArray:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithArray"));
break;
case NSArr_arrayWithObject:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObject"));
break;
case NSArr_arrayWithObjects:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects"));
break;
case NSArr_arrayWithObjectsCount: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("arrayWithObjects"),
&Ctx.Idents.get("count")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSArr_initWithArray:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithArray"));
break;
case NSArr_initWithObjects:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithObjects"));
break;
case NSArr_objectAtIndex:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex"));
break;
case NSMutableArr_replaceObjectAtIndex: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("replaceObjectAtIndex"),
&Ctx.Idents.get("withObject")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSMutableArr_addObject:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject"));
break;
case NSMutableArr_insertObjectAtIndex: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("insertObject"),
&Ctx.Idents.get("atIndex")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSMutableArr_setObjectAtIndexedSubscript: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("setObject"),
&Ctx.Idents.get("atIndexedSubscript")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
}
return (NSArraySelectors[MK] = Sel);
}
return NSArraySelectors[MK];
}
Optional<NSAPI::NSArrayMethodKind> NSAPI::getNSArrayMethodKind(Selector Sel) {
for (unsigned i = 0; i != NumNSArrayMethods; ++i) {
NSArrayMethodKind MK = NSArrayMethodKind(i);
if (Sel == getNSArraySelector(MK))
return MK;
}
return None;
}
Selector NSAPI::getNSDictionarySelector(
NSDictionaryMethodKind MK) const {
if (NSDictionarySelectors[MK].isNull()) {
Selector Sel;
switch (MK) {
case NSDict_dictionary:
Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("dictionary"));
break;
case NSDict_dictionaryWithDictionary:
Sel = Ctx.Selectors.getUnarySelector(
&Ctx.Idents.get("dictionaryWithDictionary"));
break;
case NSDict_dictionaryWithObjectForKey: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("dictionaryWithObject"),
&Ctx.Idents.get("forKey")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSDict_dictionaryWithObjectsForKeys: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("dictionaryWithObjects"),
&Ctx.Idents.get("forKeys")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSDict_dictionaryWithObjectsForKeysCount: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("dictionaryWithObjects"),
&Ctx.Idents.get("forKeys"),
&Ctx.Idents.get("count")
};
Sel = Ctx.Selectors.getSelector(3, KeyIdents);
break;
}
case NSDict_dictionaryWithObjectsAndKeys:
Sel = Ctx.Selectors.getUnarySelector(
&Ctx.Idents.get("dictionaryWithObjectsAndKeys"));
break;
case NSDict_initWithDictionary:
Sel = Ctx.Selectors.getUnarySelector(
&Ctx.Idents.get("initWithDictionary"));
break;
case NSDict_initWithObjectsAndKeys:
Sel = Ctx.Selectors.getUnarySelector(
&Ctx.Idents.get("initWithObjectsAndKeys"));
break;
case NSDict_initWithObjectsForKeys: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("initWithObjects"),
&Ctx.Idents.get("forKeys")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSDict_objectForKey:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey"));
break;
case NSMutableDict_setObjectForKey: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("setObject"),
&Ctx.Idents.get("forKey")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSMutableDict_setObjectForKeyedSubscript: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("setObject"),
&Ctx.Idents.get("forKeyedSubscript")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSMutableDict_setValueForKey: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("setValue"),
&Ctx.Idents.get("forKey")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
}
return (NSDictionarySelectors[MK] = Sel);
}
return NSDictionarySelectors[MK];
}
Optional<NSAPI::NSDictionaryMethodKind>
NSAPI::getNSDictionaryMethodKind(Selector Sel) {
for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) {
NSDictionaryMethodKind MK = NSDictionaryMethodKind(i);
if (Sel == getNSDictionarySelector(MK))
return MK;
}
return None;
}
Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const {
if (NSSetSelectors[MK].isNull()) {
Selector Sel;
switch (MK) {
case NSMutableSet_addObject:
Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject"));
break;
case NSOrderedSet_insertObjectAtIndex: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("insertObject"),
&Ctx.Idents.get("atIndex")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSOrderedSet_setObjectAtIndex: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("setObject"),
&Ctx.Idents.get("atIndex")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSOrderedSet_setObjectAtIndexedSubscript: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("setObject"),
&Ctx.Idents.get("atIndexedSubscript")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
case NSOrderedSet_replaceObjectAtIndexWithObject: {
IdentifierInfo *KeyIdents[] = {
&Ctx.Idents.get("replaceObjectAtIndex"),
&Ctx.Idents.get("withObject")
};
Sel = Ctx.Selectors.getSelector(2, KeyIdents);
break;
}
}
return (NSSetSelectors[MK] = Sel);
}
return NSSetSelectors[MK];
}
Optional<NSAPI::NSSetMethodKind>
NSAPI::getNSSetMethodKind(Selector Sel) {
for (unsigned i = 0; i != NumNSSetMethods; ++i) {
NSSetMethodKind MK = NSSetMethodKind(i);
if (Sel == getNSSetSelector(MK))
return MK;
}
return None;
}
Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
bool Instance) const {
static const char *ClassSelectorName[NumNSNumberLiteralMethods] = {
"numberWithChar",
"numberWithUnsignedChar",
"numberWithShort",
"numberWithUnsignedShort",
"numberWithInt",
"numberWithUnsignedInt",
"numberWithLong",
"numberWithUnsignedLong",
"numberWithLongLong",
"numberWithUnsignedLongLong",
"numberWithFloat",
"numberWithDouble",
"numberWithBool",
"numberWithInteger",
"numberWithUnsignedInteger"
};
static const char *InstanceSelectorName[NumNSNumberLiteralMethods] = {
"initWithChar",
"initWithUnsignedChar",
"initWithShort",
"initWithUnsignedShort",
"initWithInt",
"initWithUnsignedInt",
"initWithLong",
"initWithUnsignedLong",
"initWithLongLong",
"initWithUnsignedLongLong",
"initWithFloat",
"initWithDouble",
"initWithBool",
"initWithInteger",
"initWithUnsignedInteger"
};
Selector *Sels;
const char **Names;
if (Instance) {
Sels = NSNumberInstanceSelectors;
Names = InstanceSelectorName;
} else {
Sels = NSNumberClassSelectors;
Names = ClassSelectorName;
}
if (Sels[MK].isNull())
Sels[MK] = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(Names[MK]));
return Sels[MK];
}
Optional<NSAPI::NSNumberLiteralMethodKind>
NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) {
NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i);
if (isNSNumberLiteralSelector(MK, Sel))
return MK;
}
return None;
}
Optional<NSAPI::NSNumberLiteralMethodKind>
NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
const BuiltinType *BT = T->getAs<BuiltinType>();
if (!BT)
return None;
const TypedefType *TDT = T->getAs<TypedefType>();
if (TDT) {
QualType TDTTy = QualType(TDT, 0);
if (isObjCBOOLType(TDTTy))
return NSAPI::NSNumberWithBool;
if (isObjCNSIntegerType(TDTTy))
return NSAPI::NSNumberWithInteger;
if (isObjCNSUIntegerType(TDTTy))
return NSAPI::NSNumberWithUnsignedInteger;
}
switch (BT->getKind()) {
case BuiltinType::Char_S:
case BuiltinType::SChar:
return NSAPI::NSNumberWithChar;
case BuiltinType::Char_U:
case BuiltinType::UChar:
return NSAPI::NSNumberWithUnsignedChar;
case BuiltinType::Short:
return NSAPI::NSNumberWithShort;
case BuiltinType::UShort:
return NSAPI::NSNumberWithUnsignedShort;
case BuiltinType::Int:
return NSAPI::NSNumberWithInt;
case BuiltinType::UInt:
return NSAPI::NSNumberWithUnsignedInt;
case BuiltinType::Long:
return NSAPI::NSNumberWithLong;
case BuiltinType::ULong:
return NSAPI::NSNumberWithUnsignedLong;
case BuiltinType::LongLong:
return NSAPI::NSNumberWithLongLong;
case BuiltinType::ULongLong:
return NSAPI::NSNumberWithUnsignedLongLong;
case BuiltinType::Float:
return NSAPI::NSNumberWithFloat;
case BuiltinType::Double:
return NSAPI::NSNumberWithDouble;
case BuiltinType::Bool:
return NSAPI::NSNumberWithBool;
case BuiltinType::Void:
case BuiltinType::WChar_U:
case BuiltinType::WChar_S:
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::Int128:
case BuiltinType::LongDouble:
case BuiltinType::UInt128:
case BuiltinType::NullPtr:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCId:
case BuiltinType::ObjCSel:
case BuiltinType::OCLImage1d:
case BuiltinType::OCLImage1dArray:
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::BoundMember:
case BuiltinType::Dependent:
case BuiltinType::Overload:
case BuiltinType::UnknownAny:
case BuiltinType::ARCUnbridgedCast:
case BuiltinType::Half:
case BuiltinType::PseudoObject:
case BuiltinType::BuiltinFn:
break;
}
return None;
}
/// \brief Returns true if \param T is a typedef of "BOOL" in objective-c.
bool NSAPI::isObjCBOOLType(QualType T) const {
return isObjCTypedef(T, "BOOL", BOOLId);
}
/// \brief Returns true if \param T is a typedef of "NSInteger" in objective-c.
bool NSAPI::isObjCNSIntegerType(QualType T) const {
return isObjCTypedef(T, "NSInteger", NSIntegerId);
}
/// \brief Returns true if \param T is a typedef of "NSUInteger" in objective-c.
bool NSAPI::isObjCNSUIntegerType(QualType T) const {
return isObjCTypedef(T, "NSUInteger", NSUIntegerId);
}
StringRef NSAPI::GetNSIntegralKind(QualType T) const {
if (!Ctx.getLangOpts().ObjC1 || T.isNull())
return StringRef();
while (const TypedefType *TDT = T->getAs<TypedefType>()) {
StringRef NSIntegralResust =
llvm::StringSwitch<StringRef>(
TDT->getDecl()->getDeclName().getAsIdentifierInfo()->getName())
.Case("int8_t", "int8_t")
.Case("int16_t", "int16_t")
.Case("int32_t", "int32_t")
.Case("NSInteger", "NSInteger")
.Case("int64_t", "int64_t")
.Case("uint8_t", "uint8_t")
.Case("uint16_t", "uint16_t")
.Case("uint32_t", "uint32_t")
.Case("NSUInteger", "NSUInteger")
.Case("uint64_t", "uint64_t")
.Default(StringRef());
if (!NSIntegralResust.empty())
return NSIntegralResust;
T = TDT->desugar();
}
return StringRef();
}
bool NSAPI::isMacroDefined(StringRef Id) const {
// FIXME: Check whether the relevant module macros are visible.
return Ctx.Idents.get(Id).hasMacroDefinition();
}
bool NSAPI::isSubclassOfNSClass(ObjCInterfaceDecl *InterfaceDecl,
NSClassIdKindKind NSClassKind) const {
if (!InterfaceDecl) {
return false;
}
IdentifierInfo *NSClassID = getNSClassId(NSClassKind);
bool IsSubclass = false;
do {
IsSubclass = NSClassID == InterfaceDecl->getIdentifier();
if (IsSubclass) {
break;
}
} while ((InterfaceDecl = InterfaceDecl->getSuperClass()));
return IsSubclass;
}
bool NSAPI::isObjCTypedef(QualType T,
StringRef name, IdentifierInfo *&II) const {
if (!Ctx.getLangOpts().ObjC1)
return false;
if (T.isNull())
return false;
if (!II)
II = &Ctx.Idents.get(name);
while (const TypedefType *TDT = T->getAs<TypedefType>()) {
if (TDT->getDecl()->getDeclName().getAsIdentifierInfo() == II)
return true;
T = TDT->desugar();
}
return false;
}
bool NSAPI::isObjCEnumerator(const Expr *E,
StringRef name, IdentifierInfo *&II) const {
if (!Ctx.getLangOpts().ObjC1)
return false;
if (!E)
return false;
if (!II)
II = &Ctx.Idents.get(name);
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
if (const EnumConstantDecl *
EnumD = dyn_cast_or_null<EnumConstantDecl>(DRE->getDecl()))
return EnumD->getIdentifier() == II;
return false;
}
Selector NSAPI::getOrInitSelector(ArrayRef<StringRef> Ids,
Selector &Sel) const {
if (Sel.isNull()) {
SmallVector<IdentifierInfo *, 4> Idents;
for (ArrayRef<StringRef>::const_iterator
I = Ids.begin(), E = Ids.end(); I != E; ++I)
Idents.push_back(&Ctx.Idents.get(*I));
Sel = Ctx.Selectors.getSelector(Idents.size(), Idents.data());
}
return Sel;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/TypePrinter.cpp | //===--- TypePrinter.cpp - Pretty-Print Clang Types -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to print types from Clang's type system.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
namespace {
/// \brief RAII object that enables printing of the ARC __strong lifetime
/// qualifier.
class IncludeStrongLifetimeRAII {
PrintingPolicy &Policy;
bool Old;
public:
explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
: Policy(Policy), Old(Policy.SuppressStrongLifetime) {
if (!Policy.SuppressLifetimeQualifiers)
Policy.SuppressStrongLifetime = false;
}
~IncludeStrongLifetimeRAII() {
Policy.SuppressStrongLifetime = Old;
}
};
class ParamPolicyRAII {
PrintingPolicy &Policy;
bool Old;
public:
explicit ParamPolicyRAII(PrintingPolicy &Policy)
: Policy(Policy), Old(Policy.SuppressSpecifiers) {
Policy.SuppressSpecifiers = false;
}
~ParamPolicyRAII() {
Policy.SuppressSpecifiers = Old;
}
};
class ElaboratedTypePolicyRAII {
PrintingPolicy &Policy;
bool SuppressTagKeyword;
bool SuppressScope;
public:
explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) {
SuppressTagKeyword = Policy.SuppressTagKeyword;
SuppressScope = Policy.SuppressScope;
Policy.SuppressTagKeyword = true;
Policy.SuppressScope = true;
}
~ElaboratedTypePolicyRAII() {
Policy.SuppressTagKeyword = SuppressTagKeyword;
Policy.SuppressScope = SuppressScope;
}
};
class TypePrinter {
PrintingPolicy Policy;
bool HasEmptyPlaceHolder;
bool InsideCCAttribute;
public:
explicit TypePrinter(const PrintingPolicy &Policy)
: Policy(Policy), HasEmptyPlaceHolder(false), InsideCCAttribute(false) { }
void print(const Type *ty, Qualifiers qs, raw_ostream &OS,
StringRef PlaceHolder);
void print(QualType T, raw_ostream &OS, StringRef PlaceHolder);
static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier);
void spaceBeforePlaceHolder(raw_ostream &OS);
void printTypeSpec(const NamedDecl *D, raw_ostream &OS);
void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
void printBefore(QualType T, raw_ostream &OS);
void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS);
void printAfter(QualType T, raw_ostream &OS);
void AppendScope(DeclContext *DC, raw_ostream &OS);
void printTag(TagDecl *T, raw_ostream &OS);
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) \
void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \
void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS);
#include "clang/AST/TypeNodes.def"
};
}
static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals, bool C99) {
bool appendSpace = false;
if (TypeQuals & Qualifiers::Const) {
OS << "const";
appendSpace = true;
}
if (TypeQuals & Qualifiers::Volatile) {
if (appendSpace) OS << ' ';
OS << "volatile";
appendSpace = true;
}
if (TypeQuals & Qualifiers::Restrict) {
if (appendSpace) OS << ' ';
if (C99) {
OS << "restrict";
} else {
OS << "__restrict";
}
}
}
void TypePrinter::spaceBeforePlaceHolder(raw_ostream &OS) {
if (!HasEmptyPlaceHolder)
OS << ' ';
}
void TypePrinter::print(QualType t, raw_ostream &OS, StringRef PlaceHolder) {
SplitQualType split = t.split();
print(split.Ty, split.Quals, OS, PlaceHolder);
}
void TypePrinter::print(const Type *T, Qualifiers Quals, raw_ostream &OS,
StringRef PlaceHolder) {
if (!T) {
OS << "NULL TYPE";
return;
}
SaveAndRestore<bool> PHVal(HasEmptyPlaceHolder, PlaceHolder.empty());
// HLSL Change Starts
// Print 'char *' as 'string' and 'const char *' as 'const string'
if (Policy.LangOpts.HLSL) {
if (T->isPointerType()) {
QualType Pointee = T->getPointeeType();
if (Pointee->isSpecificBuiltinType(BuiltinType::Char_S)) {
Quals = Pointee.getQualifiers();
Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true);
OS << "string";
return;
}
}
else if (T->isConstantArrayType()) {
const Type *pElemType = T->getArrayElementTypeNoTypeQual();
if (pElemType->isSpecificBuiltinType(BuiltinType::Char_S)) {
OS << "literal string";
return;
}
}
}
// HLSL Change Ends
printBefore(T, Quals, OS);
OS << PlaceHolder;
printAfter(T, Quals, OS);
}
bool TypePrinter::canPrefixQualifiers(const Type *T,
bool &NeedARCStrongQualifier) {
// CanPrefixQualifiers - We prefer to print type qualifiers before the type,
// so that we get "const int" instead of "int const", but we can't do this if
// the type is complex. For example if the type is "int*", we *must* print
// "int * const", printing "const int *" is different. Only do this when the
// type expands to a simple string.
bool CanPrefixQualifiers = false;
NeedARCStrongQualifier = false;
Type::TypeClass TC = T->getTypeClass();
if (const AutoType *AT = dyn_cast<AutoType>(T))
TC = AT->desugar()->getTypeClass();
if (const SubstTemplateTypeParmType *Subst
= dyn_cast<SubstTemplateTypeParmType>(T))
TC = Subst->getReplacementType()->getTypeClass();
switch (TC) {
case Type::Auto:
case Type::Builtin:
case Type::Complex:
case Type::UnresolvedUsing:
case Type::Typedef:
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
case Type::UnaryTransform:
case Type::Record:
case Type::Enum:
case Type::Elaborated:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
case Type::TemplateSpecialization:
case Type::InjectedClassName:
case Type::DependentName:
case Type::DependentTemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::Atomic:
CanPrefixQualifiers = true;
break;
case Type::ObjCObjectPointer:
CanPrefixQualifiers = T->isObjCIdType() || T->isObjCClassType() ||
T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType();
break;
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
NeedARCStrongQualifier = true;
LLVM_FALLTHROUGH; // HLSL Change
case Type::Adjusted:
case Type::Decayed:
case Type::Pointer:
case Type::BlockPointer:
case Type::LValueReference:
case Type::RValueReference:
case Type::MemberPointer:
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Paren:
case Type::Attributed:
case Type::PackExpansion:
case Type::SubstTemplateTypeParm:
CanPrefixQualifiers = false;
break;
}
return CanPrefixQualifiers;
}
void TypePrinter::printBefore(QualType T, raw_ostream &OS) {
SplitQualType Split = T.split();
// If we have cv1 T, where T is substituted for cv2 U, only print cv1 - cv2
// at this level.
Qualifiers Quals = Split.Quals;
if (const SubstTemplateTypeParmType *Subst =
dyn_cast<SubstTemplateTypeParmType>(Split.Ty))
Quals -= QualType(Subst, 0).getQualifiers();
printBefore(Split.Ty, Quals, OS);
}
/// \brief Prints the part of the type string before an identifier, e.g. for
/// "int foo[10]" it prints "int ".
void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) {
if (Policy.SuppressSpecifiers && T->isSpecifierType())
return;
SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder);
// Print qualifiers as appropriate.
bool CanPrefixQualifiers = false;
bool NeedARCStrongQualifier = false;
CanPrefixQualifiers = canPrefixQualifiers(T, NeedARCStrongQualifier);
if (CanPrefixQualifiers && !Quals.empty()) {
if (NeedARCStrongQualifier) {
IncludeStrongLifetimeRAII Strong(Policy);
Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true);
} else {
Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/true);
}
}
bool hasAfterQuals = false;
if (!CanPrefixQualifiers && !Quals.empty()) {
hasAfterQuals = !Quals.isEmptyWhenPrinted(Policy);
if (hasAfterQuals)
HasEmptyPlaceHolder = false;
}
switch (T->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) case Type::CLASS: \
print##CLASS##Before(cast<CLASS##Type>(T), OS); \
break;
#include "clang/AST/TypeNodes.def"
}
if (hasAfterQuals) {
if (NeedARCStrongQualifier) {
IncludeStrongLifetimeRAII Strong(Policy);
Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get());
} else {
Quals.print(OS, Policy, /*appendSpaceIfNonEmpty=*/!PrevPHIsEmpty.get());
}
}
}
void TypePrinter::printAfter(QualType t, raw_ostream &OS) {
SplitQualType split = t.split();
printAfter(split.Ty, split.Quals, OS);
}
/// \brief Prints the part of the type string after an identifier, e.g. for
/// "int foo[10]" it prints "[10]".
void TypePrinter::printAfter(const Type *T, Qualifiers Quals, raw_ostream &OS) {
switch (T->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) case Type::CLASS: \
print##CLASS##After(cast<CLASS##Type>(T), OS); \
break;
#include "clang/AST/TypeNodes.def"
}
}
void TypePrinter::printBuiltinBefore(const BuiltinType *T, raw_ostream &OS) {
OS << T->getName(Policy);
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printBuiltinAfter(const BuiltinType *T, raw_ostream &OS) { }
void TypePrinter::printComplexBefore(const ComplexType *T, raw_ostream &OS) {
OS << "_Complex ";
printBefore(T->getElementType(), OS);
}
void TypePrinter::printComplexAfter(const ComplexType *T, raw_ostream &OS) {
printAfter(T->getElementType(), OS);
}
void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeType(), OS);
// Handle things like 'int (*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
OS << '(';
OS << '*';
}
void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
// Handle things like 'int (*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
OS << ')';
printAfter(T->getPointeeType(), OS);
}
void TypePrinter::printBlockPointerBefore(const BlockPointerType *T,
raw_ostream &OS) {
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeType(), OS);
OS << '^';
}
void TypePrinter::printBlockPointerAfter(const BlockPointerType *T,
raw_ostream &OS) {
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printAfter(T->getPointeeType(), OS);
}
void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeTypeAsWritten(), OS);
// Handle things like 'int (&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
OS << '(';
OS << '&';
}
void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
// Handle things like 'int (&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
OS << ')';
printAfter(T->getPointeeTypeAsWritten(), OS);
}
void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeTypeAsWritten(), OS);
// Handle things like 'int (&&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
OS << '(';
OS << "&&";
}
void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
// Handle things like 'int (&&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
OS << ')';
printAfter(T->getPointeeTypeAsWritten(), OS);
}
void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeType(), OS);
// Handle things like 'int (Cls::*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
OS << '(';
PrintingPolicy InnerPolicy(Policy);
InnerPolicy.SuppressTag = false;
TypePrinter(InnerPolicy).print(QualType(T->getClass(), 0), OS, StringRef());
OS << "::*";
}
void TypePrinter::printMemberPointerAfter(const MemberPointerType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
// Handle things like 'int (Cls::*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
OS << ')';
printAfter(T->getPointeeType(), OS);
}
void TypePrinter::printConstantArrayBefore(const ConstantArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
raw_ostream &OS) {
OS << '[';
if (T->getIndexTypeQualifiers().hasQualifiers()) {
AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers(), Policy.LangOpts.C99);
OS << ' ';
}
if (T->getSizeModifier() == ArrayType::Static)
OS << "static ";
OS << T->getSize().getZExtValue() << ']';
printAfter(T->getElementType(), OS);
}
void TypePrinter::printIncompleteArrayBefore(const IncompleteArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
void TypePrinter::printIncompleteArrayAfter(const IncompleteArrayType *T,
raw_ostream &OS) {
OS << "[]";
printAfter(T->getElementType(), OS);
}
void TypePrinter::printVariableArrayBefore(const VariableArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
void TypePrinter::printVariableArrayAfter(const VariableArrayType *T,
raw_ostream &OS) {
OS << '[';
if (T->getIndexTypeQualifiers().hasQualifiers()) {
AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers(), Policy.LangOpts.C99);
OS << ' ';
}
if (T->getSizeModifier() == VariableArrayType::Static)
OS << "static ";
else if (T->getSizeModifier() == VariableArrayType::Star)
OS << '*';
if (T->getSizeExpr())
T->getSizeExpr()->printPretty(OS, nullptr, Policy);
OS << ']';
printAfter(T->getElementType(), OS);
}
void TypePrinter::printAdjustedBefore(const AdjustedType *T, raw_ostream &OS) {
// Print the adjusted representation, otherwise the adjustment will be
// invisible.
printBefore(T->getAdjustedType(), OS);
}
void TypePrinter::printAdjustedAfter(const AdjustedType *T, raw_ostream &OS) {
printAfter(T->getAdjustedType(), OS);
}
void TypePrinter::printDecayedBefore(const DecayedType *T, raw_ostream &OS) {
// Print as though it's a pointer.
printAdjustedBefore(T, OS);
}
void TypePrinter::printDecayedAfter(const DecayedType *T, raw_ostream &OS) {
printAdjustedAfter(T, OS);
}
void TypePrinter::printDependentSizedArrayBefore(
const DependentSizedArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
void TypePrinter::printDependentSizedArrayAfter(
const DependentSizedArrayType *T,
raw_ostream &OS) {
OS << '[';
if (T->getSizeExpr())
T->getSizeExpr()->printPretty(OS, nullptr, Policy);
OS << ']';
printAfter(T->getElementType(), OS);
}
void TypePrinter::printDependentSizedExtVectorBefore(
const DependentSizedExtVectorType *T,
raw_ostream &OS) {
printBefore(T->getElementType(), OS);
}
void TypePrinter::printDependentSizedExtVectorAfter(
const DependentSizedExtVectorType *T,
raw_ostream &OS) {
OS << " __attribute__((ext_vector_type(";
if (T->getSizeExpr())
T->getSizeExpr()->printPretty(OS, nullptr, Policy);
OS << ")))";
printAfter(T->getElementType(), OS);
}
void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
switch (T->getVectorKind()) {
case VectorType::AltiVecPixel:
OS << "__vector __pixel ";
break;
case VectorType::AltiVecBool:
OS << "__vector __bool ";
printBefore(T->getElementType(), OS);
break;
case VectorType::AltiVecVector:
OS << "__vector ";
printBefore(T->getElementType(), OS);
break;
case VectorType::NeonVector:
OS << "__attribute__((neon_vector_type("
<< T->getNumElements() << "))) ";
printBefore(T->getElementType(), OS);
break;
case VectorType::NeonPolyVector:
OS << "__attribute__((neon_polyvector_type(" <<
T->getNumElements() << "))) ";
printBefore(T->getElementType(), OS);
break;
case VectorType::GenericVector: {
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
OS << "__attribute__((__vector_size__("
<< T->getNumElements()
<< " * sizeof(";
print(T->getElementType(), OS, StringRef());
OS << ")))) ";
printBefore(T->getElementType(), OS);
break;
}
}
}
void TypePrinter::printVectorAfter(const VectorType *T, raw_ostream &OS) {
printAfter(T->getElementType(), OS);
}
void TypePrinter::printExtVectorBefore(const ExtVectorType *T,
raw_ostream &OS) {
printBefore(T->getElementType(), OS);
}
void TypePrinter::printExtVectorAfter(const ExtVectorType *T, raw_ostream &OS) {
printAfter(T->getElementType(), OS);
OS << " __attribute__((ext_vector_type(";
OS << T->getNumElements();
OS << ")))";
}
void
FunctionProtoType::printExceptionSpecification(raw_ostream &OS,
const PrintingPolicy &Policy)
const {
if (hasDynamicExceptionSpec()) {
OS << " throw(";
if (getExceptionSpecType() == EST_MSAny)
OS << "...";
else
for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) {
if (I)
OS << ", ";
OS << getExceptionType(I).stream(Policy);
}
OS << ')';
} else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
OS << " noexcept";
if (getExceptionSpecType() == EST_ComputedNoexcept) {
OS << '(';
if (getNoexceptExpr())
getNoexceptExpr()->printPretty(OS, nullptr, Policy);
OS << ')';
}
}
}
void TypePrinter::printFunctionProtoBefore(const FunctionProtoType *T,
raw_ostream &OS) {
if (T->hasTrailingReturn()) {
OS << "auto ";
if (!HasEmptyPlaceHolder)
OS << '(';
} else {
// If needed for precedence reasons, wrap the inner part in grouping parens.
SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
printBefore(T->getReturnType(), OS);
if (!PrevPHIsEmpty.get())
OS << '(';
}
}
void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
if (!HasEmptyPlaceHolder)
OS << ')';
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
OS << '(';
{
ParamPolicyRAII ParamPolicy(Policy);
for (unsigned i = 0, e = T->getNumParams(); i != e; ++i) {
if (i) OS << ", ";
print(T->getParamType(i), OS, StringRef());
}
}
if (T->isVariadic()) {
if (T->getNumParams())
OS << ", ";
OS << "...";
} else if (T->getNumParams() == 0 && !Policy.LangOpts.CPlusPlus) {
// Do not emit int() if we have a proto, emit 'int(void)'.
OS << "void";
}
OS << ')';
FunctionType::ExtInfo Info = T->getExtInfo();
if (!InsideCCAttribute) {
switch (Info.getCC()) {
case CC_C:
// The C calling convention is the default on the vast majority of platforms
// we support. If the user wrote it explicitly, it will usually be printed
// while traversing the AttributedType. If the type has been desugared, let
// the canonical spelling be the implicit calling convention.
// FIXME: It would be better to be explicit in certain contexts, such as a
// cdecl function typedef used to declare a member function with the
// Microsoft C++ ABI.
break;
case CC_X86StdCall:
OS << " __attribute__((stdcall))";
break;
case CC_X86FastCall:
OS << " __attribute__((fastcall))";
break;
case CC_X86ThisCall:
OS << " __attribute__((thiscall))";
break;
case CC_X86VectorCall:
OS << " __attribute__((vectorcall))";
break;
case CC_X86Pascal:
OS << " __attribute__((pascal))";
break;
case CC_AAPCS:
OS << " __attribute__((pcs(\"aapcs\")))";
break;
case CC_AAPCS_VFP:
OS << " __attribute__((pcs(\"aapcs-vfp\")))";
break;
case CC_IntelOclBicc:
OS << " __attribute__((intel_ocl_bicc))";
break;
case CC_X86_64Win64:
OS << " __attribute__((ms_abi))";
break;
case CC_X86_64SysV:
OS << " __attribute__((sysv_abi))";
break;
case CC_SpirFunction:
case CC_SpirKernel:
// Do nothing. These CCs are not available as attributes.
break;
}
}
if (Info.getNoReturn())
OS << " __attribute__((noreturn))";
if (Info.getRegParm())
OS << " __attribute__((regparm ("
<< Info.getRegParm() << ")))";
if (unsigned quals = T->getTypeQuals()) {
OS << ' ';
AppendTypeQualList(OS, quals, Policy.LangOpts.C99);
}
switch (T->getRefQualifier()) {
case RQ_None:
break;
case RQ_LValue:
OS << " &";
break;
case RQ_RValue:
OS << " &&";
break;
}
T->printExceptionSpecification(OS, Policy);
if (T->hasTrailingReturn()) {
OS << " -> ";
print(T->getReturnType(), OS, StringRef());
} else
printAfter(T->getReturnType(), OS);
}
void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T,
raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
printBefore(T->getReturnType(), OS);
if (!PrevPHIsEmpty.get())
OS << '(';
}
void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T,
raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
if (!HasEmptyPlaceHolder)
OS << ')';
SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
OS << "()";
if (T->getNoReturnAttr())
OS << " __attribute__((noreturn))";
printAfter(T->getReturnType(), OS);
}
void TypePrinter::printTypeSpec(const NamedDecl *D, raw_ostream &OS) {
IdentifierInfo *II = D->getIdentifier();
OS << II->getName();
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
raw_ostream &OS) {
printTypeSpec(T->getDecl(), OS);
}
void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
raw_ostream &OS) { }
void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
printTypeSpec(T->getDecl(), OS);
}
void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) { }
void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T,
raw_ostream &OS) {
OS << "typeof ";
if (T->getUnderlyingExpr())
T->getUnderlyingExpr()->printPretty(OS, nullptr, Policy);
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printTypeOfExprAfter(const TypeOfExprType *T,
raw_ostream &OS) { }
void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) {
OS << "typeof(";
print(T->getUnderlyingType(), OS, StringRef());
OS << ')';
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printTypeOfAfter(const TypeOfType *T, raw_ostream &OS) { }
void TypePrinter::printDecltypeBefore(const DecltypeType *T, raw_ostream &OS) {
OS << "decltype(";
if (T->getUnderlyingExpr())
T->getUnderlyingExpr()->printPretty(OS, nullptr, Policy);
OS << ')';
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printDecltypeAfter(const DecltypeType *T, raw_ostream &OS) { }
void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
switch (T->getUTTKind()) {
case UnaryTransformType::EnumUnderlyingType:
OS << "__underlying_type(";
print(T->getBaseType(), OS, StringRef());
OS << ')';
spaceBeforePlaceHolder(OS);
return;
}
printBefore(T->getBaseType(), OS);
}
void TypePrinter::printUnaryTransformAfter(const UnaryTransformType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
switch (T->getUTTKind()) {
case UnaryTransformType::EnumUnderlyingType:
return;
}
printAfter(T->getBaseType(), OS);
}
void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
// If the type has been deduced, do not print 'auto'.
if (!T->getDeducedType().isNull()) {
printBefore(T->getDeducedType(), OS);
} else {
OS << (T->isDecltypeAuto() ? "decltype(auto)" : "auto");
spaceBeforePlaceHolder(OS);
}
}
void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) {
// If the type has been deduced, do not print 'auto'.
if (!T->getDeducedType().isNull())
printAfter(T->getDeducedType(), OS);
}
void TypePrinter::printAtomicBefore(const AtomicType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
OS << "_Atomic(";
print(T->getValueType(), OS, StringRef());
OS << ')';
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) { }
/// Appends the given scope to the end of a string.
void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS) {
if (DC->isTranslationUnit()) return;
if (DC->isFunctionOrMethod()) return;
AppendScope(DC->getParent(), OS);
if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
if (Policy.SuppressUnwrittenScope &&
(NS->isAnonymousNamespace() || NS->isInline()))
return;
if (NS->getIdentifier())
OS << NS->getName() << "::";
else
OS << "(anonymous namespace)::";
} else if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
IncludeStrongLifetimeRAII Strong(Policy);
OS << Spec->getIdentifier()->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
TemplateSpecializationType::PrintTemplateArgumentList(OS,
TemplateArgs.data(),
TemplateArgs.size(),
Policy);
OS << "::";
} else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
OS << Typedef->getIdentifier()->getName() << "::";
else if (Tag->getIdentifier())
OS << Tag->getIdentifier()->getName() << "::";
else
return;
}
}
void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
if (Policy.SuppressTag)
return;
bool HasKindDecoration = false;
// bool SuppressTagKeyword
// = Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword;
// We don't print tags unless this is an elaborated type.
// In C, we just assume every RecordType is an elaborated type.
// if (!(Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword ||
if (!(Policy.SuppressTagKeyword || // HLSL Change - let SuppressTagKeyword control this in every instance
D->getTypedefNameForAnonDecl())) {
HasKindDecoration = true;
OS << D->getKindName();
OS << ' ';
}
// Compute the full nested-name-specifier for this type.
// In C, this will always be empty except when the type
// being printed is anonymous within other Record.
if (!Policy.SuppressScope)
AppendScope(D->getDeclContext(), OS);
if (const IdentifierInfo *II = D->getIdentifier())
OS << II->getName();
else if (TypedefNameDecl *Typedef = D->getTypedefNameForAnonDecl()) {
assert(Typedef->getIdentifier() && "Typedef without identifier?");
OS << Typedef->getIdentifier()->getName();
} else {
// Make an unambiguous representation for anonymous types, e.g.
// (anonymous enum at /usr/include/string.h:120:9)
if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
OS << "(lambda";
HasKindDecoration = true;
} else {
OS << "(anonymous";
}
if (Policy.AnonymousTagLocations) {
// Suppress the redundant tag keyword if we just printed one.
// We don't have to worry about ElaboratedTypes here because you can't
// refer to an anonymous type with one.
if (!HasKindDecoration)
OS << " " << D->getKindName();
PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
D->getLocation());
if (PLoc.isValid()) {
OS << " at " << PLoc.getFilename()
<< ':' << PLoc.getLine()
<< ':' << PLoc.getColumn();
}
}
OS << ')';
}
// If this is a class template specialization, print the template
// arguments.
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(D)) {
const TemplateArgument *Args;
unsigned NumArgs;
if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
const TemplateSpecializationType *TST =
cast<TemplateSpecializationType>(TAW->getType());
Args = TST->getArgs();
NumArgs = TST->getNumArgs();
} else {
// HLSL Change Starts
ClassTemplateDecl *TD = Spec->getSpecializedTemplate();
TemplateParameterList *Params = TD->getTemplateParameters();
// If this is an HLSL default template specialization, omit the template
// argument list, unless this is a vector or matrix type.
if (Policy.LangOpts.HLSL && Policy.HLSLOmitDefaultTemplateParams &&
Params->getLAngleLoc() == Params->getRAngleLoc() &&
(TD->getName() != "vector" && TD->getName() != "matrix")) {
spaceBeforePlaceHolder(OS);
return;
}
// HLSL Change Ends
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
Args = TemplateArgs.data();
NumArgs = TemplateArgs.size();
}
IncludeStrongLifetimeRAII Strong(Policy);
TemplateSpecializationType::PrintTemplateArgumentList(OS,
Args, NumArgs,
Policy);
}
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
printTag(T->getDecl(), OS);
}
void TypePrinter::printRecordAfter(const RecordType *T, raw_ostream &OS) { }
void TypePrinter::printEnumBefore(const EnumType *T, raw_ostream &OS) {
printTag(T->getDecl(), OS);
}
void TypePrinter::printEnumAfter(const EnumType *T, raw_ostream &OS) { }
void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
raw_ostream &OS) {
if (IdentifierInfo *Id = T->getIdentifier())
OS << Id->getName();
else
OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printTemplateTypeParmAfter(const TemplateTypeParmType *T,
raw_ostream &OS) { }
void TypePrinter::printSubstTemplateTypeParmBefore(
const SubstTemplateTypeParmType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
printBefore(T->getReplacementType(), OS);
}
void TypePrinter::printSubstTemplateTypeParmAfter(
const SubstTemplateTypeParmType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
printAfter(T->getReplacementType(), OS);
}
void TypePrinter::printSubstTemplateTypeParmPackBefore(
const SubstTemplateTypeParmPackType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
printTemplateTypeParmBefore(T->getReplacedParameter(), OS);
}
void TypePrinter::printSubstTemplateTypeParmPackAfter(
const SubstTemplateTypeParmPackType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
printTemplateTypeParmAfter(T->getReplacedParameter(), OS);
}
void TypePrinter::printTemplateSpecializationBefore(
const TemplateSpecializationType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
T->getTemplateName().print(OS, Policy);
TemplateSpecializationType::PrintTemplateArgumentList(OS,
T->getArgs(),
T->getNumArgs(),
Policy);
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printTemplateSpecializationAfter(
const TemplateSpecializationType *T,
raw_ostream &OS) { }
void TypePrinter::printInjectedClassNameBefore(const InjectedClassNameType *T,
raw_ostream &OS) {
printTemplateSpecializationBefore(T->getInjectedTST(), OS);
}
void TypePrinter::printInjectedClassNameAfter(const InjectedClassNameType *T,
raw_ostream &OS) { }
void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
raw_ostream &OS) {
if (Policy.SuppressTag && isa<TagType>(T->getNamedType()))
return;
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
if (T->getKeyword() != ETK_None)
OS << " ";
NestedNameSpecifier* Qualifier = T->getQualifier();
if (Qualifier)
Qualifier->print(OS, Policy);
ElaboratedTypePolicyRAII PolicyRAII(Policy);
printBefore(T->getNamedType(), OS);
}
void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
raw_ostream &OS) {
ElaboratedTypePolicyRAII PolicyRAII(Policy);
printAfter(T->getNamedType(), OS);
}
void TypePrinter::printParenBefore(const ParenType *T, raw_ostream &OS) {
if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
printBefore(T->getInnerType(), OS);
OS << '(';
} else
printBefore(T->getInnerType(), OS);
}
void TypePrinter::printParenAfter(const ParenType *T, raw_ostream &OS) {
if (!HasEmptyPlaceHolder && !isa<FunctionType>(T->getInnerType())) {
OS << ')';
printAfter(T->getInnerType(), OS);
} else
printAfter(T->getInnerType(), OS);
}
void TypePrinter::printDependentNameBefore(const DependentNameType *T,
raw_ostream &OS) {
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
if (T->getKeyword() != ETK_None)
OS << " ";
T->getQualifier()->print(OS, Policy);
OS << T->getIdentifier()->getName();
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printDependentNameAfter(const DependentNameType *T,
raw_ostream &OS) { }
void TypePrinter::printDependentTemplateSpecializationBefore(
const DependentTemplateSpecializationType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
if (T->getKeyword() != ETK_None)
OS << " ";
if (T->getQualifier())
T->getQualifier()->print(OS, Policy);
OS << T->getIdentifier()->getName();
TemplateSpecializationType::PrintTemplateArgumentList(OS,
T->getArgs(),
T->getNumArgs(),
Policy);
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printDependentTemplateSpecializationAfter(
const DependentTemplateSpecializationType *T, raw_ostream &OS) { }
void TypePrinter::printPackExpansionBefore(const PackExpansionType *T,
raw_ostream &OS) {
printBefore(T->getPattern(), OS);
}
void TypePrinter::printPackExpansionAfter(const PackExpansionType *T,
raw_ostream &OS) {
printAfter(T->getPattern(), OS);
OS << "...";
}
void TypePrinter::printAttributedBefore(const AttributedType *T,
raw_ostream &OS) {
// Prefer the macro forms of the GC and ownership qualifiers.
if (T->getAttrKind() == AttributedType::attr_objc_gc ||
T->getAttrKind() == AttributedType::attr_objc_ownership)
return printBefore(T->getEquivalentType(), OS);
// HLSL Change Starts
if (T->isHLSLTypeSpec()) {
switch (T->getAttrKind()) {
case AttributedType::attr_hlsl_row_major: OS << "row_major "; break;
case AttributedType::attr_hlsl_column_major: OS << "column_major "; break;
case AttributedType::attr_hlsl_unorm: OS << "unorm "; break;
case AttributedType::attr_hlsl_snorm: OS << "snorm "; break;
case AttributedType::attr_hlsl_globallycoherent:
OS << "globallycoherent ";
break;
default:
// Only HLSL attribute types are covered.
break;
}
}
// HLSL Change Ends
if (T->getAttrKind() == AttributedType::attr_objc_kindof)
OS << "__kindof ";
printBefore(T->getModifiedType(), OS);
if (T->isMSTypeSpec()) {
switch (T->getAttrKind()) {
default: return;
case AttributedType::attr_ptr32: OS << " __ptr32"; break;
case AttributedType::attr_ptr64: OS << " __ptr64"; break;
case AttributedType::attr_sptr: OS << " __sptr"; break;
case AttributedType::attr_uptr: OS << " __uptr"; break;
}
spaceBeforePlaceHolder(OS);
}
// Print nullability type specifiers.
if (T->getAttrKind() == AttributedType::attr_nonnull ||
T->getAttrKind() == AttributedType::attr_nullable ||
T->getAttrKind() == AttributedType::attr_null_unspecified) {
if (T->getAttrKind() == AttributedType::attr_nonnull)
OS << " _Nonnull";
else if (T->getAttrKind() == AttributedType::attr_nullable)
OS << " _Nullable";
else if (T->getAttrKind() == AttributedType::attr_null_unspecified)
OS << " _Null_unspecified";
else
llvm_unreachable("unhandled nullability");
spaceBeforePlaceHolder(OS);
}
}
void TypePrinter::printAttributedAfter(const AttributedType *T,
raw_ostream &OS) {
// Prefer the macro forms of the GC and ownership qualifiers.
if (T->getAttrKind() == AttributedType::attr_objc_gc ||
T->getAttrKind() == AttributedType::attr_objc_ownership)
return printAfter(T->getEquivalentType(), OS);
if (T->getAttrKind() == AttributedType::attr_objc_kindof)
return;
// TODO: not all attributes are GCC-style attributes.
if (T->isMSTypeSpec())
return;
if (T->isHLSLTypeSpec()) // HLSL Change
return;
// Nothing to print after.
if (T->getAttrKind() == AttributedType::attr_nonnull ||
T->getAttrKind() == AttributedType::attr_nullable ||
T->getAttrKind() == AttributedType::attr_null_unspecified)
return printAfter(T->getModifiedType(), OS);
// If this is a calling convention attribute, don't print the implicit CC from
// the modified type.
SaveAndRestore<bool> MaybeSuppressCC(InsideCCAttribute, T->isCallingConv());
printAfter(T->getModifiedType(), OS);
// Print nullability type specifiers that occur after
if (T->getAttrKind() == AttributedType::attr_nonnull ||
T->getAttrKind() == AttributedType::attr_nullable ||
T->getAttrKind() == AttributedType::attr_null_unspecified) {
if (T->getAttrKind() == AttributedType::attr_nonnull)
OS << " _Nonnull";
else if (T->getAttrKind() == AttributedType::attr_nullable)
OS << " _Nullable";
else if (T->getAttrKind() == AttributedType::attr_null_unspecified)
OS << " _Null_unspecified";
else
llvm_unreachable("unhandled nullability");
return;
}
OS << " __attribute__((";
switch (T->getAttrKind()) {
default: llvm_unreachable("This attribute should have been handled already");
case AttributedType::attr_address_space:
OS << "address_space(";
OS << T->getEquivalentType().getAddressSpace();
OS << ')';
break;
case AttributedType::attr_vector_size: {
OS << "__vector_size__(";
if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) {
OS << vector->getNumElements();
OS << " * sizeof(";
print(vector->getElementType(), OS, StringRef());
OS << ')';
}
OS << ')';
break;
}
case AttributedType::attr_neon_vector_type:
case AttributedType::attr_neon_polyvector_type: {
if (T->getAttrKind() == AttributedType::attr_neon_vector_type)
OS << "neon_vector_type(";
else
OS << "neon_polyvector_type(";
const VectorType *vector = T->getEquivalentType()->getAs<VectorType>();
OS << vector->getNumElements();
OS << ')';
break;
}
case AttributedType::attr_regparm: {
// FIXME: When Sema learns to form this AttributedType, avoid printing the
// attribute again in printFunctionProtoAfter.
OS << "regparm(";
QualType t = T->getEquivalentType();
while (!t->isFunctionType())
t = t->getPointeeType();
OS << t->getAs<FunctionType>()->getRegParmType();
OS << ')';
break;
}
case AttributedType::attr_objc_gc: {
OS << "objc_gc(";
QualType tmp = T->getEquivalentType();
while (tmp.getObjCGCAttr() == Qualifiers::GCNone) {
QualType next = tmp->getPointeeType();
if (next == tmp) break;
tmp = next;
}
if (tmp.isObjCGCWeak())
OS << "weak";
else
OS << "strong";
OS << ')';
break;
}
case AttributedType::attr_objc_ownership:
OS << "objc_ownership(";
switch (T->getEquivalentType().getObjCLifetime()) {
case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
case Qualifiers::OCL_ExplicitNone: OS << "none"; break;
case Qualifiers::OCL_Strong: OS << "strong"; break;
case Qualifiers::OCL_Weak: OS << "weak"; break;
case Qualifiers::OCL_Autoreleasing: OS << "autoreleasing"; break;
}
OS << ')';
break;
// FIXME: When Sema learns to form this AttributedType, avoid printing the
// attribute again in printFunctionProtoAfter.
case AttributedType::attr_noreturn: OS << "noreturn"; break;
case AttributedType::attr_cdecl: OS << "cdecl"; break;
case AttributedType::attr_fastcall: OS << "fastcall"; break;
case AttributedType::attr_stdcall: OS << "stdcall"; break;
case AttributedType::attr_thiscall: OS << "thiscall"; break;
case AttributedType::attr_vectorcall: OS << "vectorcall"; break;
case AttributedType::attr_pascal: OS << "pascal"; break;
case AttributedType::attr_ms_abi: OS << "ms_abi"; break;
case AttributedType::attr_sysv_abi: OS << "sysv_abi"; break;
case AttributedType::attr_pcs:
case AttributedType::attr_pcs_vfp: {
OS << "pcs(";
QualType t = T->getEquivalentType();
while (!t->isFunctionType())
t = t->getPointeeType();
OS << (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ?
"\"aapcs\"" : "\"aapcs-vfp\"");
OS << ')';
break;
}
case AttributedType::attr_inteloclbicc: OS << "inteloclbicc"; break;
}
OS << "))";
}
void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T,
raw_ostream &OS) {
OS << T->getDecl()->getName();
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printObjCInterfaceAfter(const ObjCInterfaceType *T,
raw_ostream &OS) { }
void TypePrinter::printObjCObjectBefore(const ObjCObjectType *T,
raw_ostream &OS) {
if (T->qual_empty() && T->isUnspecializedAsWritten() &&
!T->isKindOfTypeAsWritten())
return printBefore(T->getBaseType(), OS);
if (T->isKindOfTypeAsWritten())
OS << "__kindof ";
print(T->getBaseType(), OS, StringRef());
if (T->isSpecializedAsWritten()) {
bool isFirst = true;
OS << '<';
for (auto typeArg : T->getTypeArgsAsWritten()) {
if (isFirst)
isFirst = false;
else
OS << ",";
print(typeArg, OS, StringRef());
}
OS << '>';
}
if (!T->qual_empty()) {
bool isFirst = true;
OS << '<';
for (const auto *I : T->quals()) {
if (isFirst)
isFirst = false;
else
OS << ',';
OS << I->getName();
}
OS << '>';
}
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printObjCObjectAfter(const ObjCObjectType *T,
raw_ostream &OS) {
if (T->qual_empty() && T->isUnspecializedAsWritten() &&
!T->isKindOfTypeAsWritten())
return printAfter(T->getBaseType(), OS);
}
void TypePrinter::printObjCObjectPointerBefore(const ObjCObjectPointerType *T,
raw_ostream &OS) {
printBefore(T->getPointeeType(), OS);
// If we need to print the pointer, print it now.
if (!T->isObjCIdType() && !T->isObjCQualifiedIdType() &&
!T->isObjCClassType() && !T->isObjCQualifiedClassType()) {
if (HasEmptyPlaceHolder)
OS << ' ';
OS << '*';
}
}
void TypePrinter::printObjCObjectPointerAfter(const ObjCObjectPointerType *T,
raw_ostream &OS) { }
void TemplateSpecializationType::
PrintTemplateArgumentList(raw_ostream &OS,
const TemplateArgumentListInfo &Args,
const PrintingPolicy &Policy) {
return PrintTemplateArgumentList(OS,
Args.getArgumentArray(),
Args.size(),
Policy);
}
void
TemplateSpecializationType::PrintTemplateArgumentList(
raw_ostream &OS,
const TemplateArgument *Args,
unsigned NumArgs,
const PrintingPolicy &Policy,
bool SkipBrackets) {
if (!SkipBrackets && !(Policy.LangOpts.HLSL && NumArgs == 0)) // HLSL Change
OS << '<';
bool needSpace = false;
for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
// Print the argument into a string.
SmallString<128> Buf;
llvm::raw_svector_ostream ArgOS(Buf);
if (Args[Arg].getKind() == TemplateArgument::Pack) {
if (Args[Arg].pack_size() && Arg > 0)
OS << ", ";
PrintTemplateArgumentList(ArgOS,
Args[Arg].pack_begin(),
Args[Arg].pack_size(),
Policy, true);
} else {
if (Arg > 0)
OS << ", ";
Args[Arg].print(Policy, ArgOS);
}
StringRef ArgString = ArgOS.str();
// If this is the first argument and its string representation
// begins with the global scope specifier ('::foo'), add a space
// to avoid printing the diagraph '<:'.
if (!Arg && !ArgString.empty() && ArgString[0] == ':')
OS << ' ';
OS << ArgString;
needSpace = (!ArgString.empty() && ArgString.back() == '>');
}
// If the last character of our string is '>', add another space to
// keep the two '>''s separate tokens. We don't *have* to do this in
// C++0x, but it's still good hygiene.
if (needSpace)
OS << ' ';
if (!SkipBrackets && !(Policy.LangOpts.HLSL && NumArgs == 0)) // HLSL Change
OS << '>';
}
// Sadly, repeat all that with TemplateArgLoc.
void TemplateSpecializationType::
PrintTemplateArgumentList(raw_ostream &OS,
const TemplateArgumentLoc *Args, unsigned NumArgs,
const PrintingPolicy &Policy) {
if (!(Policy.LangOpts.HLSL && NumArgs == 0)) // HLSL Change
OS << '<';
bool needSpace = false;
for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
if (Arg > 0)
OS << ", ";
// Print the argument into a string.
SmallString<128> Buf;
llvm::raw_svector_ostream ArgOS(Buf);
if (Args[Arg].getArgument().getKind() == TemplateArgument::Pack) {
PrintTemplateArgumentList(ArgOS,
Args[Arg].getArgument().pack_begin(),
Args[Arg].getArgument().pack_size(),
Policy, true);
} else {
Args[Arg].getArgument().print(Policy, ArgOS);
}
StringRef ArgString = ArgOS.str();
// If this is the first argument and its string representation
// begins with the global scope specifier ('::foo'), add a space
// to avoid printing the diagraph '<:'.
if (!Arg && !ArgString.empty() && ArgString[0] == ':')
OS << ' ';
OS << ArgString;
needSpace = (!ArgString.empty() && ArgString.back() == '>');
}
// If the last character of our string is '>', add another space to
// keep the two '>''s separate tokens. We don't *have* to do this in
// C++0x, but it's still good hygiene.
if (needSpace)
OS << ' ';
if (!(Policy.LangOpts.HLSL && NumArgs == 0)) // HLSL Change
OS << '>';
}
std::string Qualifiers::getAsString() const {
LangOptions LO;
return getAsString(PrintingPolicy(LO));
}
// Appends qualifiers to the given string, separated by spaces. Will
// prefix a space if the string is non-empty. Will not append a final
// space.
std::string Qualifiers::getAsString(const PrintingPolicy &Policy) const {
SmallString<64> Buf;
llvm::raw_svector_ostream StrOS(Buf);
print(StrOS, Policy);
return StrOS.str();
}
bool Qualifiers::isEmptyWhenPrinted(const PrintingPolicy &Policy) const {
if (getCVRQualifiers())
return false;
if (getAddressSpace())
return false;
if (getObjCGCAttr())
return false;
if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime())
if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime))
return false;
return true;
}
// Appends qualifiers to the given string, separated by spaces. Will
// prefix a space if the string is non-empty. Will not append a final
// space.
void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy,
bool appendSpaceIfNonEmpty) const {
bool addSpace = false;
unsigned quals = getCVRQualifiers();
if (quals) {
AppendTypeQualList(OS, quals, Policy.LangOpts.C99);
addSpace = true;
}
if (unsigned addrspace = getAddressSpace()) {
if (addSpace)
OS << ' ';
addSpace = true;
switch (addrspace) {
case LangAS::opencl_global:
OS << "__global";
break;
case LangAS::opencl_local:
OS << "__local";
break;
case LangAS::opencl_constant:
OS << "__constant";
break;
case LangAS::opencl_generic:
OS << "__generic";
break;
default:
OS << "__attribute__((address_space(";
OS << addrspace;
OS << ")))";
}
}
if (Qualifiers::GC gc = getObjCGCAttr()) {
if (addSpace)
OS << ' ';
addSpace = true;
if (gc == Qualifiers::Weak)
OS << "__weak";
else
OS << "__strong";
}
if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) {
if (!(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime)){
if (addSpace)
OS << ' ';
addSpace = true;
}
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("none but true");
case Qualifiers::OCL_ExplicitNone: OS << "__unsafe_unretained"; break;
case Qualifiers::OCL_Strong:
if (!Policy.SuppressStrongLifetime)
OS << "__strong";
break;
case Qualifiers::OCL_Weak: OS << "__weak"; break;
case Qualifiers::OCL_Autoreleasing: OS << "__autoreleasing"; break;
}
}
if (appendSpaceIfNonEmpty && addSpace)
OS << ' ';
}
std::string QualType::getAsString(const PrintingPolicy &Policy) const {
std::string S;
getAsStringInternal(S, Policy);
return S;
}
std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
std::string buffer;
LangOptions options;
getAsStringInternal(ty, qs, buffer, PrintingPolicy(options));
return buffer;
}
void QualType::print(const Type *ty, Qualifiers qs,
raw_ostream &OS, const PrintingPolicy &policy,
const Twine &PlaceHolder) {
SmallString<128> PHBuf;
StringRef PH = PlaceHolder.toStringRef(PHBuf);
TypePrinter(policy).print(ty, qs, OS, PH);
}
void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
std::string &buffer,
const PrintingPolicy &policy) {
SmallString<256> Buf;
llvm::raw_svector_ostream StrOS(Buf);
TypePrinter(policy).print(ty, qs, StrOS, buffer);
std::string str = StrOS.str();
buffer.swap(str);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/StmtProfile.cpp | //===---- StmtProfile.cpp - Profile implementation for Stmt ASTs ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Stmt::Profile method, which builds a unique bit
// representation that identifies a statement/expression.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/FoldingSet.h"
using namespace clang;
// //
///////////////////////////////////////////////////////////////////////////////
namespace {
class StmtProfiler : public ConstStmtVisitor<StmtProfiler> {
llvm::FoldingSetNodeID &ID;
const ASTContext &Context;
bool Canonical;
public:
StmtProfiler(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical)
: ID(ID), Context(Context), Canonical(Canonical) { }
void VisitStmt(const Stmt *S);
#define STMT(Node, Base) void Visit##Node(const Node *S);
#include "clang/AST/StmtNodes.inc"
/// \brief Visit a declaration that is referenced within an expression
/// or statement.
void VisitDecl(const Decl *D);
/// \brief Visit a type that is referenced within an expression or
/// statement.
void VisitType(QualType T);
/// \brief Visit a name that occurs within an expression or statement.
void VisitName(DeclarationName Name);
/// \brief Visit a nested-name-specifier that occurs within an expression
/// or statement.
void VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
/// \brief Visit a template name that occurs within an expression or
/// statement.
void VisitTemplateName(TemplateName Name);
/// \brief Visit template arguments that occur within an expression or
/// statement.
void VisitTemplateArguments(const TemplateArgumentLoc *Args,
unsigned NumArgs);
/// \brief Visit a single template argument.
void VisitTemplateArgument(const TemplateArgument &Arg);
};
}
void StmtProfiler::VisitStmt(const Stmt *S) {
ID.AddInteger(S->getStmtClass());
for (const Stmt *SubStmt : S->children()) {
if (SubStmt)
Visit(SubStmt);
else
ID.AddInteger(0);
}
}
void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
VisitStmt(S);
for (const auto *D : S->decls())
VisitDecl(D);
}
void StmtProfiler::VisitNullStmt(const NullStmt *S) {
VisitStmt(S);
}
// HLSL Change: adding support for HLSL discard stmt.
void StmtProfiler::VisitDiscardStmt(const DiscardStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitCompoundStmt(const CompoundStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitSwitchCase(const SwitchCase *S) {
VisitStmt(S);
}
void StmtProfiler::VisitCaseStmt(const CaseStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitDefaultStmt(const DefaultStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitLabelStmt(const LabelStmt *S) {
VisitStmt(S);
VisitDecl(S->getDecl());
}
void StmtProfiler::VisitAttributedStmt(const AttributedStmt *S) {
VisitStmt(S);
// TODO: maybe visit attributes?
}
void StmtProfiler::VisitIfStmt(const IfStmt *S) {
VisitStmt(S);
VisitDecl(S->getConditionVariable());
}
void StmtProfiler::VisitSwitchStmt(const SwitchStmt *S) {
VisitStmt(S);
VisitDecl(S->getConditionVariable());
}
void StmtProfiler::VisitWhileStmt(const WhileStmt *S) {
VisitStmt(S);
VisitDecl(S->getConditionVariable());
}
void StmtProfiler::VisitDoStmt(const DoStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitForStmt(const ForStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitGotoStmt(const GotoStmt *S) {
VisitStmt(S);
VisitDecl(S->getLabel());
}
void StmtProfiler::VisitIndirectGotoStmt(const IndirectGotoStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitContinueStmt(const ContinueStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitBreakStmt(const BreakStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitGCCAsmStmt(const GCCAsmStmt *S) {
VisitStmt(S);
ID.AddBoolean(S->isVolatile());
ID.AddBoolean(S->isSimple());
VisitStringLiteral(S->getAsmString());
ID.AddInteger(S->getNumOutputs());
for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
ID.AddString(S->getOutputName(I));
VisitStringLiteral(S->getOutputConstraintLiteral(I));
}
ID.AddInteger(S->getNumInputs());
for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
ID.AddString(S->getInputName(I));
VisitStringLiteral(S->getInputConstraintLiteral(I));
}
ID.AddInteger(S->getNumClobbers());
for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
VisitStringLiteral(S->getClobberStringLiteral(I));
}
void StmtProfiler::VisitMSAsmStmt(const MSAsmStmt *S) {
// FIXME: Implement MS style inline asm statement profiler.
VisitStmt(S);
}
void StmtProfiler::VisitCXXCatchStmt(const CXXCatchStmt *S) {
VisitStmt(S);
VisitType(S->getCaughtType());
}
void StmtProfiler::VisitCXXTryStmt(const CXXTryStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitMSDependentExistsStmt(const MSDependentExistsStmt *S) {
VisitStmt(S);
ID.AddBoolean(S->isIfExists());
VisitNestedNameSpecifier(S->getQualifierLoc().getNestedNameSpecifier());
VisitName(S->getNameInfo().getName());
}
void StmtProfiler::VisitSEHTryStmt(const SEHTryStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitSEHFinallyStmt(const SEHFinallyStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitSEHExceptStmt(const SEHExceptStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitSEHLeaveStmt(const SEHLeaveStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitCapturedStmt(const CapturedStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *S) {
VisitStmt(S);
ID.AddBoolean(S->hasEllipsis());
if (S->getCatchParamDecl())
VisitType(S->getCatchParamDecl()->getType());
}
void StmtProfiler::VisitObjCAtFinallyStmt(const ObjCAtFinallyStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitObjCAtTryStmt(const ObjCAtTryStmt *S) {
VisitStmt(S);
}
void
StmtProfiler::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S) {
VisitStmt(S);
}
void StmtProfiler::VisitObjCAtThrowStmt(const ObjCAtThrowStmt *S) {
VisitStmt(S);
}
void
StmtProfiler::VisitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt *S) {
VisitStmt(S);
}
namespace {
class OMPClauseProfiler : public ConstOMPClauseVisitor<OMPClauseProfiler> {
StmtProfiler *Profiler;
/// \brief Process clauses with list of variables.
template <typename T>
void VisitOMPClauseList(T *Node);
public:
OMPClauseProfiler(StmtProfiler *P) : Profiler(P) { }
#define OPENMP_CLAUSE(Name, Class) \
void Visit##Class(const Class *C);
#include "clang/Basic/OpenMPKinds.def"
};
void OMPClauseProfiler::VisitOMPIfClause(const OMPIfClause *C) {
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
}
void OMPClauseProfiler::VisitOMPFinalClause(const OMPFinalClause *C) {
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
}
void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) {
if (C->getNumThreads())
Profiler->VisitStmt(C->getNumThreads());
}
void OMPClauseProfiler::VisitOMPSafelenClause(const OMPSafelenClause *C) {
if (C->getSafelen())
Profiler->VisitStmt(C->getSafelen());
}
void OMPClauseProfiler::VisitOMPCollapseClause(const OMPCollapseClause *C) {
if (C->getNumForLoops())
Profiler->VisitStmt(C->getNumForLoops());
}
void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) {
if (C->getChunkSize()) {
Profiler->VisitStmt(C->getChunkSize());
if (C->getHelperChunkSize()) {
Profiler->VisitStmt(C->getChunkSize());
}
}
}
void OMPClauseProfiler::VisitOMPOrderedClause(const OMPOrderedClause *) {}
void OMPClauseProfiler::VisitOMPNowaitClause(const OMPNowaitClause *) {}
void OMPClauseProfiler::VisitOMPUntiedClause(const OMPUntiedClause *) {}
void OMPClauseProfiler::VisitOMPMergeableClause(const OMPMergeableClause *) {}
void OMPClauseProfiler::VisitOMPReadClause(const OMPReadClause *) {}
void OMPClauseProfiler::VisitOMPWriteClause(const OMPWriteClause *) {}
void OMPClauseProfiler::VisitOMPUpdateClause(const OMPUpdateClause *) {}
void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
template<typename T>
void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
for (auto *E : Node->varlists()) {
Profiler->VisitStmt(E);
}
}
void OMPClauseProfiler::VisitOMPPrivateClause(const OMPPrivateClause *C) {
VisitOMPClauseList(C);
for (auto *E : C->private_copies()) {
Profiler->VisitStmt(E);
}
}
void
OMPClauseProfiler::VisitOMPFirstprivateClause(const OMPFirstprivateClause *C) {
VisitOMPClauseList(C);
for (auto *E : C->private_copies()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->inits()) {
Profiler->VisitStmt(E);
}
}
void
OMPClauseProfiler::VisitOMPLastprivateClause(const OMPLastprivateClause *C) {
VisitOMPClauseList(C);
for (auto *E : C->source_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->destination_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->assignment_ops()) {
Profiler->VisitStmt(E);
}
}
void OMPClauseProfiler::VisitOMPSharedClause(const OMPSharedClause *C) {
VisitOMPClauseList(C);
}
void OMPClauseProfiler::VisitOMPReductionClause(
const OMPReductionClause *C) {
Profiler->VisitNestedNameSpecifier(
C->getQualifierLoc().getNestedNameSpecifier());
Profiler->VisitName(C->getNameInfo().getName());
VisitOMPClauseList(C);
for (auto *E : C->lhs_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->rhs_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->reduction_ops()) {
Profiler->VisitStmt(E);
}
}
void OMPClauseProfiler::VisitOMPLinearClause(const OMPLinearClause *C) {
VisitOMPClauseList(C);
for (auto *E : C->inits()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->updates()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->finals()) {
Profiler->VisitStmt(E);
}
Profiler->VisitStmt(C->getStep());
Profiler->VisitStmt(C->getCalcStep());
}
void OMPClauseProfiler::VisitOMPAlignedClause(const OMPAlignedClause *C) {
VisitOMPClauseList(C);
Profiler->VisitStmt(C->getAlignment());
}
void OMPClauseProfiler::VisitOMPCopyinClause(const OMPCopyinClause *C) {
VisitOMPClauseList(C);
for (auto *E : C->source_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->destination_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->assignment_ops()) {
Profiler->VisitStmt(E);
}
}
void
OMPClauseProfiler::VisitOMPCopyprivateClause(const OMPCopyprivateClause *C) {
VisitOMPClauseList(C);
for (auto *E : C->source_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->destination_exprs()) {
Profiler->VisitStmt(E);
}
for (auto *E : C->assignment_ops()) {
Profiler->VisitStmt(E);
}
}
void OMPClauseProfiler::VisitOMPFlushClause(const OMPFlushClause *C) {
VisitOMPClauseList(C);
}
void OMPClauseProfiler::VisitOMPDependClause(const OMPDependClause *C) {
VisitOMPClauseList(C);
}
}
void
StmtProfiler::VisitOMPExecutableDirective(const OMPExecutableDirective *S) {
VisitStmt(S);
OMPClauseProfiler P(this);
ArrayRef<OMPClause *> Clauses = S->clauses();
for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
I != E; ++I)
if (*I)
P.Visit(*I);
}
void StmtProfiler::VisitOMPLoopDirective(const OMPLoopDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) {
VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) {
VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPForSimdDirective(const OMPForSimdDirective *S) {
VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPSectionsDirective(const OMPSectionsDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPSectionDirective(const OMPSectionDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPSingleDirective(const OMPSingleDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPMasterDirective(const OMPMasterDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPCriticalDirective(const OMPCriticalDirective *S) {
VisitOMPExecutableDirective(S);
VisitName(S->getDirectiveName().getName());
}
void
StmtProfiler::VisitOMPParallelForDirective(const OMPParallelForDirective *S) {
VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPParallelForSimdDirective(
const OMPParallelForSimdDirective *S) {
VisitOMPLoopDirective(S);
}
void StmtProfiler::VisitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPTaskDirective(const OMPTaskDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPTaskyieldDirective(const OMPTaskyieldDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPBarrierDirective(const OMPBarrierDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPTaskwaitDirective(const OMPTaskwaitDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPTaskgroupDirective(const OMPTaskgroupDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPFlushDirective(const OMPFlushDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPOrderedDirective(const OMPOrderedDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPAtomicDirective(const OMPAtomicDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPTargetDirective(const OMPTargetDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPTeamsDirective(const OMPTeamsDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPCancellationPointDirective(
const OMPCancellationPointDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitOMPCancelDirective(const OMPCancelDirective *S) {
VisitOMPExecutableDirective(S);
}
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
VisitExpr(S);
if (!Canonical)
VisitNestedNameSpecifier(S->getQualifier());
VisitDecl(S->getDecl());
if (!Canonical)
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getIdentType());
}
void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
VisitExpr(S);
S->getValue().Profile(ID);
ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
}
void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
VisitExpr(S);
ID.AddInteger(S->getKind());
ID.AddInteger(S->getValue());
}
void StmtProfiler::VisitFloatingLiteral(const FloatingLiteral *S) {
VisitExpr(S);
S->getValue().Profile(ID);
ID.AddBoolean(S->isExact());
ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
}
void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
VisitExpr(S);
}
void StmtProfiler::VisitStringLiteral(const StringLiteral *S) {
VisitExpr(S);
ID.AddString(S->getBytes());
ID.AddInteger(S->getKind());
}
void StmtProfiler::VisitParenExpr(const ParenExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitParenListExpr(const ParenListExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitUnaryOperator(const UnaryOperator *S) {
VisitExpr(S);
ID.AddInteger(S->getOpcode());
}
void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
VisitType(S->getTypeSourceInfo()->getType());
unsigned n = S->getNumComponents();
for (unsigned i = 0; i < n; ++i) {
const OffsetOfExpr::OffsetOfNode& ON = S->getComponent(i);
ID.AddInteger(ON.getKind());
switch (ON.getKind()) {
case OffsetOfExpr::OffsetOfNode::Array:
// Expressions handled below.
break;
case OffsetOfExpr::OffsetOfNode::Field:
VisitDecl(ON.getField());
break;
case OffsetOfExpr::OffsetOfNode::Identifier:
ID.AddPointer(ON.getFieldName());
break;
case OffsetOfExpr::OffsetOfNode::Base:
// These nodes are implicit, and therefore don't need profiling.
break;
}
}
VisitExpr(S);
}
void
StmtProfiler::VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getKind());
if (S->isArgumentType())
VisitType(S->getArgumentType());
}
void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCallExpr(const CallExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitMemberExpr(const MemberExpr *S) {
VisitExpr(S);
VisitDecl(S->getMemberDecl());
if (!Canonical)
VisitNestedNameSpecifier(S->getQualifier());
ID.AddBoolean(S->isArrow());
}
void StmtProfiler::VisitCompoundLiteralExpr(const CompoundLiteralExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isFileScope());
}
void StmtProfiler::VisitCastExpr(const CastExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitImplicitCastExpr(const ImplicitCastExpr *S) {
VisitCastExpr(S);
ID.AddInteger(S->getValueKind());
}
void StmtProfiler::VisitExplicitCastExpr(const ExplicitCastExpr *S) {
VisitCastExpr(S);
VisitType(S->getTypeAsWritten());
}
void StmtProfiler::VisitCStyleCastExpr(const CStyleCastExpr *S) {
VisitExplicitCastExpr(S);
}
void StmtProfiler::VisitBinaryOperator(const BinaryOperator *S) {
VisitExpr(S);
ID.AddInteger(S->getOpcode());
}
void
StmtProfiler::VisitCompoundAssignOperator(const CompoundAssignOperator *S) {
VisitBinaryOperator(S);
}
void StmtProfiler::VisitConditionalOperator(const ConditionalOperator *S) {
VisitExpr(S);
}
void StmtProfiler::VisitBinaryConditionalOperator(
const BinaryConditionalOperator *S) {
VisitExpr(S);
}
void StmtProfiler::VisitAddrLabelExpr(const AddrLabelExpr *S) {
VisitExpr(S);
VisitDecl(S->getLabel());
}
void StmtProfiler::VisitStmtExpr(const StmtExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitShuffleVectorExpr(const ShuffleVectorExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitConvertVectorExpr(const ConvertVectorExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitChooseExpr(const ChooseExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitGNUNullExpr(const GNUNullExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitVAArgExpr(const VAArgExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitInitListExpr(const InitListExpr *S) {
if (S->getSyntacticForm()) {
VisitInitListExpr(S->getSyntacticForm());
return;
}
VisitExpr(S);
}
void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->usesGNUSyntax());
for (DesignatedInitExpr::const_designators_iterator D =
S->designators_begin(), DEnd = S->designators_end();
D != DEnd; ++D) {
if (D->isFieldDesignator()) {
ID.AddInteger(0);
VisitName(D->getFieldName());
continue;
}
if (D->isArrayDesignator()) {
ID.AddInteger(1);
} else {
assert(D->isArrayRangeDesignator());
ID.AddInteger(2);
}
ID.AddInteger(D->getFirstExprIndex());
}
}
// Seems that if VisitInitListExpr() only works on the syntactic form of an
// InitListExpr, then a DesignatedInitUpdateExpr is not encountered.
void StmtProfiler::VisitDesignatedInitUpdateExpr(
const DesignatedInitUpdateExpr *S) {
llvm_unreachable("Unexpected DesignatedInitUpdateExpr in syntactic form of "
"initializer");
}
void StmtProfiler::VisitNoInitExpr(const NoInitExpr *S) {
llvm_unreachable("Unexpected NoInitExpr in syntactic form of initializer");
}
void StmtProfiler::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitExtVectorElementExpr(const ExtVectorElementExpr *S) {
VisitExpr(S);
VisitName(&S->getAccessor());
}
// HLSL Change
void StmtProfiler::VisitExtMatrixElementExpr(const ExtMatrixElementExpr *S) {
VisitExpr(S);
VisitName(&S->getAccessor());
}
void StmtProfiler::VisitHLSLVectorElementExpr(const HLSLVectorElementExpr *S) {
VisitExpr(S);
VisitName(&S->getAccessor());
}
void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
VisitExpr(S);
VisitDecl(S->getBlockDecl());
}
void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
VisitExpr(S);
for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
QualType T = S->getAssocType(i);
if (T.isNull())
ID.AddPointer(nullptr);
else
VisitType(T);
VisitExpr(S->getAssocExpr(i));
}
}
void StmtProfiler::VisitPseudoObjectExpr(const PseudoObjectExpr *S) {
VisitExpr(S);
for (PseudoObjectExpr::const_semantics_iterator
i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i)
// Normally, we would not profile the source expressions of OVEs.
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(*i))
Visit(OVE->getSourceExpr());
}
void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getOp());
}
static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
UnaryOperatorKind &UnaryOp,
BinaryOperatorKind &BinaryOp) {
switch (S->getOperator()) {
case OO_None:
case OO_New:
case OO_Delete:
case OO_Array_New:
case OO_Array_Delete:
case OO_Arrow:
case OO_Call:
case OO_Conditional:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Invalid operator call kind");
case OO_Plus:
if (S->getNumArgs() == 1) {
UnaryOp = UO_Plus;
return Stmt::UnaryOperatorClass;
}
BinaryOp = BO_Add;
return Stmt::BinaryOperatorClass;
case OO_Minus:
if (S->getNumArgs() == 1) {
UnaryOp = UO_Minus;
return Stmt::UnaryOperatorClass;
}
BinaryOp = BO_Sub;
return Stmt::BinaryOperatorClass;
case OO_Star:
if (S->getNumArgs() == 1) {
UnaryOp = UO_Deref;
return Stmt::UnaryOperatorClass;
}
BinaryOp = BO_Mul;
return Stmt::BinaryOperatorClass;
case OO_Slash:
BinaryOp = BO_Div;
return Stmt::BinaryOperatorClass;
case OO_Percent:
BinaryOp = BO_Rem;
return Stmt::BinaryOperatorClass;
case OO_Caret:
BinaryOp = BO_Xor;
return Stmt::BinaryOperatorClass;
case OO_Amp:
if (S->getNumArgs() == 1) {
UnaryOp = UO_AddrOf;
return Stmt::UnaryOperatorClass;
}
BinaryOp = BO_And;
return Stmt::BinaryOperatorClass;
case OO_Pipe:
BinaryOp = BO_Or;
return Stmt::BinaryOperatorClass;
case OO_Tilde:
UnaryOp = UO_Not;
return Stmt::UnaryOperatorClass;
case OO_Exclaim:
UnaryOp = UO_LNot;
return Stmt::UnaryOperatorClass;
case OO_Equal:
BinaryOp = BO_Assign;
return Stmt::BinaryOperatorClass;
case OO_Less:
BinaryOp = BO_LT;
return Stmt::BinaryOperatorClass;
case OO_Greater:
BinaryOp = BO_GT;
return Stmt::BinaryOperatorClass;
case OO_PlusEqual:
BinaryOp = BO_AddAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_MinusEqual:
BinaryOp = BO_SubAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_StarEqual:
BinaryOp = BO_MulAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_SlashEqual:
BinaryOp = BO_DivAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_PercentEqual:
BinaryOp = BO_RemAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_CaretEqual:
BinaryOp = BO_XorAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_AmpEqual:
BinaryOp = BO_AndAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_PipeEqual:
BinaryOp = BO_OrAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_LessLess:
BinaryOp = BO_Shl;
return Stmt::BinaryOperatorClass;
case OO_GreaterGreater:
BinaryOp = BO_Shr;
return Stmt::BinaryOperatorClass;
case OO_LessLessEqual:
BinaryOp = BO_ShlAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_GreaterGreaterEqual:
BinaryOp = BO_ShrAssign;
return Stmt::CompoundAssignOperatorClass;
case OO_EqualEqual:
BinaryOp = BO_EQ;
return Stmt::BinaryOperatorClass;
case OO_ExclaimEqual:
BinaryOp = BO_NE;
return Stmt::BinaryOperatorClass;
case OO_LessEqual:
BinaryOp = BO_LE;
return Stmt::BinaryOperatorClass;
case OO_GreaterEqual:
BinaryOp = BO_GE;
return Stmt::BinaryOperatorClass;
case OO_AmpAmp:
BinaryOp = BO_LAnd;
return Stmt::BinaryOperatorClass;
case OO_PipePipe:
BinaryOp = BO_LOr;
return Stmt::BinaryOperatorClass;
case OO_PlusPlus:
UnaryOp = S->getNumArgs() == 1? UO_PreInc
: UO_PostInc;
return Stmt::UnaryOperatorClass;
case OO_MinusMinus:
UnaryOp = S->getNumArgs() == 1? UO_PreDec
: UO_PostDec;
return Stmt::UnaryOperatorClass;
case OO_Comma:
BinaryOp = BO_Comma;
return Stmt::BinaryOperatorClass;
case OO_ArrowStar:
BinaryOp = BO_PtrMemI;
return Stmt::BinaryOperatorClass;
case OO_Subscript:
return Stmt::ArraySubscriptExprClass;
}
llvm_unreachable("Invalid overloaded operator expression");
}
void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
if (S->isTypeDependent()) {
// Type-dependent operator calls are profiled like their underlying
// syntactic operator.
UnaryOperatorKind UnaryOp = UO_Extension;
BinaryOperatorKind BinaryOp = BO_Comma;
Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
ID.AddInteger(SC);
for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
Visit(S->getArg(I));
if (SC == Stmt::UnaryOperatorClass)
ID.AddInteger(UnaryOp);
else if (SC == Stmt::BinaryOperatorClass ||
SC == Stmt::CompoundAssignOperatorClass)
ID.AddInteger(BinaryOp);
else
assert(SC == Stmt::ArraySubscriptExprClass);
return;
}
VisitCallExpr(S);
ID.AddInteger(S->getOperator());
}
void StmtProfiler::VisitCXXMemberCallExpr(const CXXMemberCallExpr *S) {
VisitCallExpr(S);
}
void StmtProfiler::VisitCUDAKernelCallExpr(const CUDAKernelCallExpr *S) {
VisitCallExpr(S);
}
void StmtProfiler::VisitAsTypeExpr(const AsTypeExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCXXNamedCastExpr(const CXXNamedCastExpr *S) {
VisitExplicitCastExpr(S);
}
void StmtProfiler::VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
void StmtProfiler::VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
void
StmtProfiler::VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
VisitCXXNamedCastExpr(S);
}
void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
VisitCallExpr(S);
}
void StmtProfiler::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->getValue());
}
void StmtProfiler::VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCXXStdInitializerListExpr(
const CXXStdInitializerListExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCXXTypeidExpr(const CXXTypeidExpr *S) {
VisitExpr(S);
if (S->isTypeOperand())
VisitType(S->getTypeOperandSourceInfo()->getType());
}
void StmtProfiler::VisitCXXUuidofExpr(const CXXUuidofExpr *S) {
VisitExpr(S);
if (S->isTypeOperand())
VisitType(S->getTypeOperandSourceInfo()->getType());
}
void StmtProfiler::VisitMSPropertyRefExpr(const MSPropertyRefExpr *S) {
VisitExpr(S);
VisitDecl(S->getPropertyDecl());
}
void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isImplicit());
}
void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *S) {
VisitExpr(S);
VisitDecl(S->getParam());
}
void StmtProfiler::VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) {
VisitExpr(S);
VisitDecl(S->getField());
}
void StmtProfiler::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
VisitExpr(S);
VisitDecl(
const_cast<CXXDestructorDecl *>(S->getTemporary()->getDestructor()));
}
void StmtProfiler::VisitCXXConstructExpr(const CXXConstructExpr *S) {
VisitExpr(S);
VisitDecl(S->getConstructor());
ID.AddBoolean(S->isElidable());
}
void StmtProfiler::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
VisitExplicitCastExpr(S);
}
void
StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
VisitCXXConstructExpr(S);
}
void
StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
VisitExpr(S);
for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
CEnd = S->explicit_capture_end();
C != CEnd; ++C) {
ID.AddInteger(C->getCaptureKind());
switch (C->getCaptureKind()) {
case LCK_This:
break;
case LCK_ByRef:
case LCK_ByCopy:
VisitDecl(C->getCapturedVar());
ID.AddBoolean(C->isPackExpansion());
break;
case LCK_VLAType:
llvm_unreachable("VLA type in explicit captures.");
}
}
// Note: If we actually needed to be able to match lambda
// expressions, we would have to consider parameters and return type
// here, among other things.
VisitStmt(S->getBody());
}
void
StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isGlobalDelete());
ID.AddBoolean(S->isArrayForm());
VisitDecl(S->getOperatorDelete());
}
void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
VisitExpr(S);
VisitType(S->getAllocatedType());
VisitDecl(S->getOperatorNew());
VisitDecl(S->getOperatorDelete());
ID.AddBoolean(S->isArray());
ID.AddInteger(S->getNumPlacementArgs());
ID.AddBoolean(S->isGlobalNew());
ID.AddBoolean(S->isParenTypeId());
ID.AddInteger(S->getInitializationStyle());
}
void
StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isArrow());
VisitNestedNameSpecifier(S->getQualifier());
ID.AddBoolean(S->getScopeTypeInfo() != nullptr);
if (S->getScopeTypeInfo())
VisitType(S->getScopeTypeInfo()->getType());
ID.AddBoolean(S->getDestroyedTypeInfo() != nullptr);
if (S->getDestroyedTypeInfo())
VisitType(S->getDestroyedType());
else
ID.AddPointer(S->getDestroyedTypeIdentifier());
}
void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
VisitExpr(S);
VisitNestedNameSpecifier(S->getQualifier());
VisitName(S->getName());
ID.AddBoolean(S->hasExplicitTemplateArgs());
if (S->hasExplicitTemplateArgs())
VisitTemplateArguments(S->getExplicitTemplateArgs().getTemplateArgs(),
S->getExplicitTemplateArgs().NumTemplateArgs);
}
void
StmtProfiler::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *S) {
VisitOverloadExpr(S);
}
void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
ID.AddInteger(S->getNumArgs());
for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
VisitType(S->getArg(I)->getType());
}
void StmtProfiler::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
VisitType(S->getQueriedType());
}
void StmtProfiler::VisitExpressionTraitExpr(const ExpressionTraitExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getTrait());
VisitExpr(S->getQueriedExpression());
}
void StmtProfiler::VisitDependentScopeDeclRefExpr(
const DependentScopeDeclRefExpr *S) {
VisitExpr(S);
VisitName(S->getDeclName());
VisitNestedNameSpecifier(S->getQualifier());
ID.AddBoolean(S->hasExplicitTemplateArgs());
if (S->hasExplicitTemplateArgs())
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
void StmtProfiler::VisitExprWithCleanups(const ExprWithCleanups *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCXXUnresolvedConstructExpr(
const CXXUnresolvedConstructExpr *S) {
VisitExpr(S);
VisitType(S->getTypeAsWritten());
}
void StmtProfiler::VisitCXXDependentScopeMemberExpr(
const CXXDependentScopeMemberExpr *S) {
ID.AddBoolean(S->isImplicitAccess());
if (!S->isImplicitAccess()) {
VisitExpr(S);
ID.AddBoolean(S->isArrow());
}
VisitNestedNameSpecifier(S->getQualifier());
VisitName(S->getMember());
ID.AddBoolean(S->hasExplicitTemplateArgs());
if (S->hasExplicitTemplateArgs())
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
void StmtProfiler::VisitUnresolvedMemberExpr(const UnresolvedMemberExpr *S) {
ID.AddBoolean(S->isImplicitAccess());
if (!S->isImplicitAccess()) {
VisitExpr(S);
ID.AddBoolean(S->isArrow());
}
VisitNestedNameSpecifier(S->getQualifier());
VisitName(S->getMemberName());
ID.AddBoolean(S->hasExplicitTemplateArgs());
if (S->hasExplicitTemplateArgs())
VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
void StmtProfiler::VisitCXXNoexceptExpr(const CXXNoexceptExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitPackExpansionExpr(const PackExpansionExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) {
VisitExpr(S);
VisitDecl(S->getPack());
}
void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
const SubstNonTypeTemplateParmPackExpr *S) {
VisitExpr(S);
VisitDecl(S->getParameterPack());
VisitTemplateArgument(S->getArgumentPack());
}
void StmtProfiler::VisitSubstNonTypeTemplateParmExpr(
const SubstNonTypeTemplateParmExpr *E) {
// Profile exactly as the replacement expression.
Visit(E->getReplacement());
}
void StmtProfiler::VisitFunctionParmPackExpr(const FunctionParmPackExpr *S) {
VisitExpr(S);
VisitDecl(S->getParameterPack());
ID.AddInteger(S->getNumExpansions());
for (FunctionParmPackExpr::iterator I = S->begin(), E = S->end(); I != E; ++I)
VisitDecl(*I);
}
void StmtProfiler::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *S) {
VisitExpr(S);
}
void StmtProfiler::VisitCXXFoldExpr(const CXXFoldExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getOperator());
}
void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
VisitExpr(E);
}
void StmtProfiler::VisitTypoExpr(const TypoExpr *E) {
VisitExpr(E);
}
void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
VisitExpr(S);
}
void StmtProfiler::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
VisitExpr(E);
}
void StmtProfiler::VisitObjCArrayLiteral(const ObjCArrayLiteral *E) {
VisitExpr(E);
}
void StmtProfiler::VisitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E) {
VisitExpr(E);
}
void StmtProfiler::VisitObjCEncodeExpr(const ObjCEncodeExpr *S) {
VisitExpr(S);
VisitType(S->getEncodedType());
}
void StmtProfiler::VisitObjCSelectorExpr(const ObjCSelectorExpr *S) {
VisitExpr(S);
VisitName(S->getSelector());
}
void StmtProfiler::VisitObjCProtocolExpr(const ObjCProtocolExpr *S) {
VisitExpr(S);
VisitDecl(S->getProtocol());
}
void StmtProfiler::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *S) {
VisitExpr(S);
VisitDecl(S->getDecl());
ID.AddBoolean(S->isArrow());
ID.AddBoolean(S->isFreeIvar());
}
void StmtProfiler::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *S) {
VisitExpr(S);
if (S->isImplicitProperty()) {
VisitDecl(S->getImplicitPropertyGetter());
VisitDecl(S->getImplicitPropertySetter());
} else {
VisitDecl(S->getExplicitProperty());
}
if (S->isSuperReceiver()) {
ID.AddBoolean(S->isSuperReceiver());
VisitType(S->getSuperReceiverType());
}
}
void StmtProfiler::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *S) {
VisitExpr(S);
VisitDecl(S->getAtIndexMethodDecl());
VisitDecl(S->setAtIndexMethodDecl());
}
void StmtProfiler::VisitObjCMessageExpr(const ObjCMessageExpr *S) {
VisitExpr(S);
VisitName(S->getSelector());
VisitDecl(S->getMethodDecl());
}
void StmtProfiler::VisitObjCIsaExpr(const ObjCIsaExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->isArrow());
}
void StmtProfiler::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->getValue());
}
void StmtProfiler::VisitObjCIndirectCopyRestoreExpr(
const ObjCIndirectCopyRestoreExpr *S) {
VisitExpr(S);
ID.AddBoolean(S->shouldCopy());
}
void StmtProfiler::VisitObjCBridgedCastExpr(const ObjCBridgedCastExpr *S) {
VisitExplicitCastExpr(S);
ID.AddBoolean(S->getBridgeKind());
}
void StmtProfiler::VisitDecl(const Decl *D) {
ID.AddInteger(D? D->getKind() : 0);
if (Canonical && D) {
if (const NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(D)) {
ID.AddInteger(NTTP->getDepth());
ID.AddInteger(NTTP->getIndex());
ID.AddBoolean(NTTP->isParameterPack());
VisitType(NTTP->getType());
return;
}
if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
// The Itanium C++ ABI uses the type, scope depth, and scope
// index of a parameter when mangling expressions that involve
// function parameters, so we will use the parameter's type for
// establishing function parameter identity. That way, our
// definition of "equivalent" (per C++ [temp.over.link]) is at
// least as strong as the definition of "equivalent" used for
// name mangling.
VisitType(Parm->getType());
ID.AddInteger(Parm->getFunctionScopeDepth());
ID.AddInteger(Parm->getFunctionScopeIndex());
return;
}
if (const TemplateTypeParmDecl *TTP =
dyn_cast<TemplateTypeParmDecl>(D)) {
ID.AddInteger(TTP->getDepth());
ID.AddInteger(TTP->getIndex());
ID.AddBoolean(TTP->isParameterPack());
return;
}
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(D)) {
ID.AddInteger(TTP->getDepth());
ID.AddInteger(TTP->getIndex());
ID.AddBoolean(TTP->isParameterPack());
return;
}
}
ID.AddPointer(D? D->getCanonicalDecl() : nullptr);
}
void StmtProfiler::VisitType(QualType T) {
if (Canonical)
T = Context.getCanonicalType(T);
ID.AddPointer(T.getAsOpaquePtr());
}
void StmtProfiler::VisitName(DeclarationName Name) {
ID.AddPointer(Name.getAsOpaquePtr());
}
void StmtProfiler::VisitNestedNameSpecifier(NestedNameSpecifier *NNS) {
if (Canonical)
NNS = Context.getCanonicalNestedNameSpecifier(NNS);
ID.AddPointer(NNS);
}
void StmtProfiler::VisitTemplateName(TemplateName Name) {
if (Canonical)
Name = Context.getCanonicalTemplateName(Name);
Name.Profile(ID);
}
void StmtProfiler::VisitTemplateArguments(const TemplateArgumentLoc *Args,
unsigned NumArgs) {
ID.AddInteger(NumArgs);
for (unsigned I = 0; I != NumArgs; ++I)
VisitTemplateArgument(Args[I].getArgument());
}
void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
// Mostly repetitive with TemplateArgument::Profile!
ID.AddInteger(Arg.getKind());
switch (Arg.getKind()) {
case TemplateArgument::Null:
break;
case TemplateArgument::Type:
VisitType(Arg.getAsType());
break;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
break;
case TemplateArgument::Declaration:
VisitDecl(Arg.getAsDecl());
break;
case TemplateArgument::NullPtr:
VisitType(Arg.getNullPtrType());
break;
case TemplateArgument::Integral:
Arg.getAsIntegral().Profile(ID);
VisitType(Arg.getIntegralType());
break;
case TemplateArgument::Expression:
Visit(Arg.getAsExpr());
break;
case TemplateArgument::Pack:
for (const auto &P : Arg.pack_elements())
VisitTemplateArgument(P);
break;
}
}
void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const {
StmtProfiler Profiler(ID, Context, Canonical);
Profiler.Visit(this);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/VTTBuilder.cpp | //===--- VTTBuilder.cpp - C++ VTT layout builder --------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with generation of the layout of virtual table
// tables (VTT).
//
//===----------------------------------------------------------------------===//
#include "clang/AST/VTTBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/Support/Format.h"
#include <algorithm>
#include <cstdio>
using namespace clang;
#define DUMP_OVERRIDERS 0
VTTBuilder::VTTBuilder(ASTContext &Ctx,
const CXXRecordDecl *MostDerivedClass,
bool GenerateDefinition)
: Ctx(Ctx), MostDerivedClass(MostDerivedClass),
MostDerivedClassLayout(Ctx.getASTRecordLayout(MostDerivedClass)),
GenerateDefinition(GenerateDefinition) {
// Lay out this VTT.
LayoutVTT(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
/*BaseIsVirtual=*/false);
}
void VTTBuilder::AddVTablePointer(BaseSubobject Base, uint64_t VTableIndex,
const CXXRecordDecl *VTableClass) {
// Store the vtable pointer index if we're generating the primary VTT.
if (VTableClass == MostDerivedClass) {
assert(!SecondaryVirtualPointerIndices.count(Base) &&
"A virtual pointer index already exists for this base subobject!");
SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
}
if (!GenerateDefinition) {
VTTComponents.push_back(VTTComponent());
return;
}
VTTComponents.push_back(VTTComponent(VTableIndex, Base));
}
void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
const CXXRecordDecl *RD = Base.getBase();
for (const auto &I : RD->bases()) {
// Don't layout virtual bases.
if (I.isVirtual())
continue;
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
CharUnits BaseOffset = Base.getBaseOffset() +
Layout.getBaseClassOffset(BaseDecl);
// Layout the VTT for this base.
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
}
}
void
VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
bool BaseIsMorallyVirtual,
uint64_t VTableIndex,
const CXXRecordDecl *VTableClass,
VisitedVirtualBasesSetTy &VBases) {
const CXXRecordDecl *RD = Base.getBase();
// We're not interested in bases that don't have virtual bases, and not
// morally virtual bases.
if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
return;
for (const auto &I : RD->bases()) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// Itanium C++ ABI 2.6.2:
// Secondary virtual pointers are present for all bases with either
// virtual bases or virtual function declarations overridden along a
// virtual path.
//
// If the base class is not dynamic, we don't want to add it, nor any
// of its base classes.
if (!BaseDecl->isDynamicClass())
continue;
bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
bool BaseDeclIsNonVirtualPrimaryBase = false;
CharUnits BaseOffset;
if (I.isVirtual()) {
// Ignore virtual bases that we've already visited.
if (!VBases.insert(BaseDecl).second)
continue;
BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
BaseDeclIsMorallyVirtual = true;
} else {
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
BaseOffset = Base.getBaseOffset() +
Layout.getBaseClassOffset(BaseDecl);
if (!Layout.isPrimaryBaseVirtual() &&
Layout.getPrimaryBase() == BaseDecl)
BaseDeclIsNonVirtualPrimaryBase = true;
}
// Itanium C++ ABI 2.6.2:
// Secondary virtual pointers: for each base class X which (a) has virtual
// bases or is reachable along a virtual path from D, and (b) is not a
// non-virtual primary base, the address of the virtual table for X-in-D
// or an appropriate construction virtual table.
if (!BaseDeclIsNonVirtualPrimaryBase &&
(BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
// Add the vtable pointer.
AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTableIndex,
VTableClass);
}
// And lay out the secondary virtual pointers for the base class.
LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
BaseDeclIsMorallyVirtual, VTableIndex,
VTableClass, VBases);
}
}
void
VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
uint64_t VTableIndex) {
VisitedVirtualBasesSetTy VBases;
LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
VTableIndex, Base.getBase(), VBases);
}
void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
VisitedVirtualBasesSetTy &VBases) {
for (const auto &I : RD->bases()) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
// Check if this is a virtual base.
if (I.isVirtual()) {
// Check if we've seen this base before.
if (!VBases.insert(BaseDecl).second)
continue;
CharUnits BaseOffset =
MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
}
// We only need to layout virtual VTTs for this base if it actually has
// virtual bases.
if (BaseDecl->getNumVBases())
LayoutVirtualVTTs(BaseDecl, VBases);
}
}
void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
const CXXRecordDecl *RD = Base.getBase();
// Itanium C++ ABI 2.6.2:
// An array of virtual table addresses, called the VTT, is declared for
// each class type that has indirect or direct virtual base classes.
if (RD->getNumVBases() == 0)
return;
bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
if (!IsPrimaryVTT) {
// Remember the sub-VTT index.
SubVTTIndicies[Base] = VTTComponents.size();
}
uint64_t VTableIndex = VTTVTables.size();
VTTVTables.push_back(VTTVTable(Base, BaseIsVirtual));
// Add the primary vtable pointer.
AddVTablePointer(Base, VTableIndex, RD);
// Add the secondary VTTs.
LayoutSecondaryVTTs(Base);
// Add the secondary virtual pointers.
LayoutSecondaryVirtualPointers(Base, VTableIndex);
// If this is the primary VTT, we want to lay out virtual VTTs as well.
if (IsPrimaryVTT) {
VisitedVirtualBasesSetTy VBases;
LayoutVirtualVTTs(Base.getBase(), VBases);
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ASTContext.cpp | //===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the ASTContext interface.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "CXXABI.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Comment.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/MathExtras.h" // HLSL Change
#include <map>
using namespace clang;
unsigned ASTContext::NumImplicitDefaultConstructors;
unsigned ASTContext::NumImplicitDefaultConstructorsDeclared;
unsigned ASTContext::NumImplicitCopyConstructors;
unsigned ASTContext::NumImplicitCopyConstructorsDeclared;
unsigned ASTContext::NumImplicitMoveConstructors;
unsigned ASTContext::NumImplicitMoveConstructorsDeclared;
unsigned ASTContext::NumImplicitCopyAssignmentOperators;
unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
unsigned ASTContext::NumImplicitMoveAssignmentOperators;
unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
unsigned ASTContext::NumImplicitDestructors;
unsigned ASTContext::NumImplicitDestructorsDeclared;
enum FloatingRank {
LitFloatRank, Min10FloatRank, Min16FloatRank, HalfRank, HalfFloatRank, FloatRank, DoubleRank, LongDoubleRank // HLSL Change - adds LitFloatRank, Min10FloatRank, HalfFloat, and Min16FloatRank
};
RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
if (!CommentsLoaded && ExternalSource) {
ExternalSource->ReadComments();
#ifndef NDEBUG
ArrayRef<RawComment *> RawComments = Comments.getComments();
assert(std::is_sorted(RawComments.begin(), RawComments.end(),
BeforeThanCompare<RawComment>(SourceMgr)));
#endif
CommentsLoaded = true;
}
assert(D);
// User can not attach documentation to implicit declarations.
if (D->isImplicit())
return nullptr;
// User can not attach documentation to implicit instantiations.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->isStaticDataMember() &&
VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
if (const ClassTemplateSpecializationDecl *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(D)) {
TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
if (TSK == TSK_ImplicitInstantiation ||
TSK == TSK_Undeclared)
return nullptr;
}
if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return nullptr;
}
if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
// When tag declaration (but not definition!) is part of the
// decl-specifier-seq of some other declaration, it doesn't get comment
if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
return nullptr;
}
// TODO: handle comments for function parameters properly.
if (isa<ParmVarDecl>(D))
return nullptr;
// TODO: we could look up template parameter documentation in the template
// documentation.
if (isa<TemplateTypeParmDecl>(D) ||
isa<NonTypeTemplateParmDecl>(D) ||
isa<TemplateTemplateParmDecl>(D))
return nullptr;
ArrayRef<RawComment *> RawComments = Comments.getComments();
// If there are no comments anywhere, we won't find anything.
if (RawComments.empty())
return nullptr;
// Find declaration location.
// For Objective-C declarations we generally don't expect to have multiple
// declarators, thus use declaration starting location as the "declaration
// location".
// For all other declarations multiple declarators are used quite frequently,
// so we use the location of the identifier as the "declaration location".
SourceLocation DeclLoc;
if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
isa<ObjCPropertyDecl>(D) ||
isa<RedeclarableTemplateDecl>(D) ||
isa<ClassTemplateSpecializationDecl>(D))
DeclLoc = D->getLocStart();
else {
DeclLoc = D->getLocation();
if (DeclLoc.isMacroID()) {
if (isa<TypedefDecl>(D)) {
// If location of the typedef name is in a macro, it is because being
// declared via a macro. Try using declaration's starting location as
// the "declaration location".
DeclLoc = D->getLocStart();
} else if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
// If location of the tag decl is inside a macro, but the spelling of
// the tag name comes from a macro argument, it looks like a special
// macro like NS_ENUM is being used to define the tag decl. In that
// case, adjust the source location to the expansion loc so that we can
// attach the comment to the tag decl.
if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
TD->isCompleteDefinition())
DeclLoc = SourceMgr.getExpansionLoc(DeclLoc);
}
}
}
// If the declaration doesn't map directly to a location in a file, we
// can't find the comment.
if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
return nullptr;
// Find the comment that occurs just after this declaration.
ArrayRef<RawComment *>::iterator Comment;
{
// When searching for comments during parsing, the comment we are looking
// for is usually among the last two comments we parsed -- check them
// first.
RawComment CommentAtDeclLoc(
SourceMgr, SourceRange(DeclLoc), false,
LangOpts.CommentOpts.ParseAllComments);
BeforeThanCompare<RawComment> Compare(SourceMgr);
ArrayRef<RawComment *>::iterator MaybeBeforeDecl = RawComments.end() - 1;
bool Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
if (!Found && RawComments.size() >= 2) {
MaybeBeforeDecl--;
Found = Compare(*MaybeBeforeDecl, &CommentAtDeclLoc);
}
if (Found) {
Comment = MaybeBeforeDecl + 1;
assert(Comment == std::lower_bound(RawComments.begin(), RawComments.end(),
&CommentAtDeclLoc, Compare));
} else {
// Slow path.
Comment = std::lower_bound(RawComments.begin(), RawComments.end(),
&CommentAtDeclLoc, Compare);
}
}
// Decompose the location for the declaration and find the beginning of the
// file buffer.
std::pair<FileID, unsigned> DeclLocDecomp = SourceMgr.getDecomposedLoc(DeclLoc);
// First check whether we have a trailing comment.
if (Comment != RawComments.end() &&
(*Comment)->isDocumentation() && (*Comment)->isTrailingComment() &&
(isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) ||
isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) {
std::pair<FileID, unsigned> CommentBeginDecomp
= SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getBegin());
// Check that Doxygen trailing comment comes after the declaration, starts
// on the same line and in the same file as the declaration.
if (DeclLocDecomp.first == CommentBeginDecomp.first &&
SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second)
== SourceMgr.getLineNumber(CommentBeginDecomp.first,
CommentBeginDecomp.second)) {
return *Comment;
}
}
// The comment just after the declaration was not a trailing comment.
// Let's look at the previous comment.
if (Comment == RawComments.begin())
return nullptr;
--Comment;
// Check that we actually have a non-member Doxygen comment.
if (!(*Comment)->isDocumentation() || (*Comment)->isTrailingComment())
return nullptr;
// Decompose the end of the comment.
std::pair<FileID, unsigned> CommentEndDecomp
= SourceMgr.getDecomposedLoc((*Comment)->getSourceRange().getEnd());
// If the comment and the declaration aren't in the same file, then they
// aren't related.
if (DeclLocDecomp.first != CommentEndDecomp.first)
return nullptr;
// Get the corresponding buffer.
bool Invalid = false;
const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first,
&Invalid).data();
if (Invalid)
return nullptr;
// Extract text between the comment and declaration.
StringRef Text(Buffer + CommentEndDecomp.second,
DeclLocDecomp.second - CommentEndDecomp.second);
// There should be no other declarations or preprocessor directives between
// comment and declaration.
if (Text.find_first_of(";{}#@") != StringRef::npos)
return nullptr;
return *Comment;
}
namespace {
/// If we have a 'templated' declaration for a template, adjust 'D' to
/// refer to the actual template.
/// If we have an implicit instantiation, adjust 'D' to refer to template.
const Decl *adjustDeclToTemplate(const Decl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// Is this function declaration part of a function template?
if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
return FTD;
// Nothing to do if function is not an implicit instantiation.
if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
return D;
// Function is an implicit instantiation of a function template?
if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
return FTD;
// Function is instantiated from a member definition of a class template?
if (const FunctionDecl *MemberDecl =
FD->getInstantiatedFromMemberFunction())
return MemberDecl;
return D;
}
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// Static data member is instantiated from a member definition of a class
// template?
if (VD->isStaticDataMember())
if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
return MemberDecl;
return D;
}
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(D)) {
// Is this class declaration part of a class template?
if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
return CTD;
// Class is an implicit instantiation of a class template or partial
// specialization?
if (const ClassTemplateSpecializationDecl *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(CRD)) {
if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
return D;
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
PU = CTSD->getSpecializedTemplateOrPartial();
return PU.is<ClassTemplateDecl*>() ?
static_cast<const Decl*>(PU.get<ClassTemplateDecl *>()) :
static_cast<const Decl*>(
PU.get<ClassTemplatePartialSpecializationDecl *>());
}
// Class is instantiated from a member definition of a class template?
if (const MemberSpecializationInfo *Info =
CRD->getMemberSpecializationInfo())
return Info->getInstantiatedFrom();
return D;
}
if (const EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
// Enum is instantiated from a member definition of a class template?
if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
return MemberDecl;
return D;
}
// FIXME: Adjust alias templates?
return D;
}
} // unnamed namespace
const RawComment *ASTContext::getRawCommentForAnyRedecl(
const Decl *D,
const Decl **OriginalDecl) const {
D = adjustDeclToTemplate(D);
// Check whether we have cached a comment for this declaration already.
{
llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
RedeclComments.find(D);
if (Pos != RedeclComments.end()) {
const RawCommentAndCacheFlags &Raw = Pos->second;
if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
if (OriginalDecl)
*OriginalDecl = Raw.getOriginalDecl();
return Raw.getRaw();
}
}
}
// Search for comments attached to declarations in the redeclaration chain.
const RawComment *RC = nullptr;
const Decl *OriginalDeclForRC = nullptr;
for (auto I : D->redecls()) {
llvm::DenseMap<const Decl *, RawCommentAndCacheFlags>::iterator Pos =
RedeclComments.find(I);
if (Pos != RedeclComments.end()) {
const RawCommentAndCacheFlags &Raw = Pos->second;
if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) {
RC = Raw.getRaw();
OriginalDeclForRC = Raw.getOriginalDecl();
break;
}
} else {
RC = getRawCommentForDeclNoCache(I);
OriginalDeclForRC = I;
RawCommentAndCacheFlags Raw;
if (RC) {
Raw.setKind(RawCommentAndCacheFlags::FromDecl);
Raw.setRaw(RC);
} else
Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl);
Raw.setOriginalDecl(I);
RedeclComments[I] = Raw;
if (RC)
break;
}
}
// If we found a comment, it should be a documentation comment.
assert(!RC || RC->isDocumentation());
if (OriginalDecl)
*OriginalDecl = OriginalDeclForRC;
// Update cache for every declaration in the redeclaration chain.
RawCommentAndCacheFlags Raw;
Raw.setRaw(RC);
Raw.setKind(RawCommentAndCacheFlags::FromRedecl);
Raw.setOriginalDecl(OriginalDeclForRC);
for (auto I : D->redecls()) {
RawCommentAndCacheFlags &R = RedeclComments[I];
if (R.getKind() == RawCommentAndCacheFlags::NoCommentInDecl)
R = Raw;
}
return RC;
}
static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
SmallVectorImpl<const NamedDecl *> &Redeclared) {
const DeclContext *DC = ObjCMethod->getDeclContext();
if (const ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(DC)) {
const ObjCInterfaceDecl *ID = IMD->getClassInterface();
if (!ID)
return;
// Add redeclared method here.
for (const auto *Ext : ID->known_extensions()) {
if (ObjCMethodDecl *RedeclaredMethod =
Ext->getMethod(ObjCMethod->getSelector(),
ObjCMethod->isInstanceMethod()))
Redeclared.push_back(RedeclaredMethod);
}
}
}
comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
const Decl *D) const {
comments::DeclInfo *ThisDeclInfo = new (*this) comments::DeclInfo;
ThisDeclInfo->CommentDecl = D;
ThisDeclInfo->IsFilled = false;
ThisDeclInfo->fill();
ThisDeclInfo->CommentDecl = FC->getDecl();
if (!ThisDeclInfo->TemplateParameters)
ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
comments::FullComment *CFC =
new (*this) comments::FullComment(FC->getBlocks(),
ThisDeclInfo);
return CFC;
}
comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
const RawComment *RC = getRawCommentForDeclNoCache(D);
return RC ? RC->parse(*this, nullptr, D) : nullptr;
}
comments::FullComment *ASTContext::getCommentForDecl(
const Decl *D,
const Preprocessor *PP) const {
if (D->isInvalidDecl())
return nullptr;
D = adjustDeclToTemplate(D);
const Decl *Canonical = D->getCanonicalDecl();
llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
ParsedComments.find(Canonical);
if (Pos != ParsedComments.end()) {
if (Canonical != D) {
comments::FullComment *FC = Pos->second;
comments::FullComment *CFC = cloneFullComment(FC, D);
return CFC;
}
return Pos->second;
}
const Decl *OriginalDecl;
const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl);
if (!RC) {
if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) {
SmallVector<const NamedDecl*, 8> Overridden;
const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D);
if (OMD && OMD->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
return cloneFullComment(FC, D);
if (OMD)
addRedeclaredMethods(OMD, Overridden);
getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden);
for (unsigned i = 0, e = Overridden.size(); i < e; i++)
if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
return cloneFullComment(FC, D);
}
else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
// Attach any tag type's documentation to its typedef if latter
// does not have one of its own.
QualType QT = TD->getUnderlyingType();
if (const TagType *TT = QT->getAs<TagType>())
if (const Decl *TD = TT->getDecl())
if (comments::FullComment *FC = getCommentForDecl(TD, PP))
return cloneFullComment(FC, D);
}
else if (const ObjCInterfaceDecl *IC = dyn_cast<ObjCInterfaceDecl>(D)) {
while (IC->getSuperClass()) {
IC = IC->getSuperClass();
if (comments::FullComment *FC = getCommentForDecl(IC, PP))
return cloneFullComment(FC, D);
}
}
else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
if (comments::FullComment *FC = getCommentForDecl(IC, PP))
return cloneFullComment(FC, D);
}
else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
if (!(RD = RD->getDefinition()))
return nullptr;
// Check non-virtual bases.
for (const auto &I : RD->bases()) {
if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
continue;
QualType Ty = I.getType();
if (Ty.isNull())
continue;
if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
continue;
if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
return cloneFullComment(FC, D);
}
}
// Check virtual bases.
for (const auto &I : RD->vbases()) {
if (I.getAccessSpecifier() != AS_public)
continue;
QualType Ty = I.getType();
if (Ty.isNull())
continue;
if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
if (!(VirtualBase= VirtualBase->getDefinition()))
continue;
if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
return cloneFullComment(FC, D);
}
}
}
return nullptr;
}
// If the RawComment was attached to other redeclaration of this Decl, we
// should parse the comment in context of that other Decl. This is important
// because comments can contain references to parameter names which can be
// different across redeclarations.
if (D != OriginalDecl)
return getCommentForDecl(OriginalDecl, PP);
comments::FullComment *FC = RC->parse(*this, PP, D);
ParsedComments[Canonical] = FC;
return FC;
}
void
ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
TemplateTemplateParmDecl *Parm) {
ID.AddInteger(Parm->getDepth());
ID.AddInteger(Parm->getPosition());
ID.AddBoolean(Parm->isParameterPack());
TemplateParameterList *Params = Parm->getTemplateParameters();
ID.AddInteger(Params->size());
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
ID.AddInteger(0);
ID.AddBoolean(TTP->isParameterPack());
continue;
}
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
ID.AddInteger(1);
ID.AddBoolean(NTTP->isParameterPack());
ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
if (NTTP->isExpandedParameterPack()) {
ID.AddBoolean(true);
ID.AddInteger(NTTP->getNumExpansionTypes());
for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
QualType T = NTTP->getExpansionType(I);
ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
}
} else
ID.AddBoolean(false);
continue;
}
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
ID.AddInteger(2);
Profile(ID, TTP);
}
}
TemplateTemplateParmDecl *
ASTContext::getCanonicalTemplateTemplateParmDecl(
TemplateTemplateParmDecl *TTP) const {
// Check if we already have a canonical template template parameter.
llvm::FoldingSetNodeID ID;
CanonicalTemplateTemplateParm::Profile(ID, TTP);
void *InsertPos = nullptr;
CanonicalTemplateTemplateParm *Canonical
= CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
if (Canonical)
return Canonical->getParam();
// Build a canonical template parameter list.
TemplateParameterList *Params = TTP->getTemplateParameters();
SmallVector<NamedDecl *, 4> CanonParams;
CanonParams.reserve(Params->size());
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
CanonParams.push_back(
TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(),
SourceLocation(),
TTP->getDepth(),
TTP->getIndex(), nullptr, false,
TTP->isParameterPack()));
else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*P)) {
QualType T = getCanonicalType(NTTP->getType());
TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
NonTypeTemplateParmDecl *Param;
if (NTTP->isExpandedParameterPack()) {
SmallVector<QualType, 2> ExpandedTypes;
SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
ExpandedTInfos.push_back(
getTrivialTypeSourceInfo(ExpandedTypes.back()));
}
Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(),
SourceLocation(),
NTTP->getDepth(),
NTTP->getPosition(), nullptr,
T,
TInfo,
ExpandedTypes.data(),
ExpandedTypes.size(),
ExpandedTInfos.data());
} else {
Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(),
SourceLocation(),
NTTP->getDepth(),
NTTP->getPosition(), nullptr,
T,
NTTP->isParameterPack(),
TInfo);
}
CanonParams.push_back(Param);
} else
CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
cast<TemplateTemplateParmDecl>(*P)));
}
TemplateTemplateParmDecl *CanonTTP
= TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(), TTP->getDepth(),
TTP->getPosition(),
TTP->isParameterPack(),
nullptr,
TemplateParameterList::Create(*this, SourceLocation(),
SourceLocation(),
CanonParams.data(),
CanonParams.size(),
SourceLocation()));
// Get the new insert position for the node we care about.
Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
assert(!Canonical && "Shouldn't be in the map!");
(void)Canonical;
// Create the canonical template template parameter entry.
Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
return CanonTTP;
}
CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
if (!LangOpts.CPlusPlus) return nullptr;
switch (T.getCXXABI().getKind()) {
case TargetCXXABI::GenericARM: // Same as Itanium at this level
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::GenericItanium:
return CreateItaniumCXXABI(*this);
case TargetCXXABI::Microsoft:
return CreateMicrosoftCXXABI(*this);
}
llvm_unreachable("Invalid CXXABI type!");
}
static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
const LangOptions &LOpts) {
if (LOpts.FakeAddressSpaceMap) {
// The fake address space map must have a distinct entry for each
// language-specific address space.
static const unsigned FakeAddrSpaceMap[] = {
1, // opencl_global
2, // opencl_local
3, // opencl_constant
4, // opencl_generic
5, // cuda_device
6, // cuda_constant
7 // cuda_shared
};
return &FakeAddrSpaceMap;
} else {
return &T.getAddressSpaceMap();
}
}
static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
const LangOptions &LangOpts) {
switch (LangOpts.getAddressSpaceMapMangling()) {
case LangOptions::ASMM_Target:
return TI.useAddressSpaceMapMangling();
case LangOptions::ASMM_On:
return true;
case LangOptions::ASMM_Off:
return false;
}
llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
}
ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
IdentifierTable &idents, SelectorTable &sels,
Builtin::Context &builtins)
: FunctionProtoTypes(this_()), TemplateSpecializationTypes(this_()),
DependentTemplateSpecializationTypes(this_()),
SubstTemplateTemplateParmPacks(this_()),
GlobalNestedNameSpecifier(nullptr), Int128Decl(nullptr),
UInt128Decl(nullptr), Float128StubDecl(nullptr),
BuiltinVaListDecl(nullptr), ObjCIdDecl(nullptr), ObjCSelDecl(nullptr),
ObjCClassDecl(nullptr), ObjCProtocolClassDecl(nullptr), BOOLDecl(nullptr),
CFConstantStringTypeDecl(nullptr), ObjCInstanceTypeDecl(nullptr),
FILEDecl(nullptr), jmp_bufDecl(nullptr), sigjmp_bufDecl(nullptr),
ucontext_tDecl(nullptr), BlockDescriptorType(nullptr),
BlockDescriptorExtendedType(nullptr), cudaConfigureCallDecl(nullptr),
FirstLocalImport(), LastLocalImport(), ExternCContext(nullptr),
SourceMgr(SM), LangOpts(LOpts),
SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
AddrSpaceMap(nullptr), Target(nullptr), PrintingPolicy(LOpts),
Idents(idents), Selectors(sels), BuiltinInfo(builtins),
DeclarationNames(*this), ExternalSource(nullptr), Listener(nullptr),
Comments(SM), CommentsLoaded(false),
CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), LastSDM(nullptr, 0) {
TUDecl = TranslationUnitDecl::Create(*this);
}
ASTContext::~ASTContext() {
ReleaseParentMapEntries();
// Release the DenseMaps associated with DeclContext objects.
// FIXME: Is this the ideal solution?
ReleaseDeclContextMaps();
// Call all of the deallocation functions on all of their targets.
for (DeallocationMap::const_iterator I = Deallocations.begin(),
E = Deallocations.end(); I != E; ++I)
for (unsigned J = 0, N = I->second.size(); J != N; ++J)
(I->first)((I->second)[J]);
// ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
// because they can contain DenseMaps.
for (llvm::DenseMap<const ObjCContainerDecl*,
const ASTRecordLayout*>::iterator
I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
// Increment in loop to prevent using deallocated memory.
if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
R->Destroy(*this);
for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
// Increment in loop to prevent using deallocated memory.
if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
R->Destroy(*this);
}
for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
AEnd = DeclAttrs.end();
A != AEnd; ++A)
A->second->~AttrVec();
llvm::DeleteContainerSeconds(MangleNumberingContexts);
}
void ASTContext::ReleaseParentMapEntries() {
if (!AllParents) return;
for (const auto &Entry : *AllParents) {
if (Entry.second.is<ast_type_traits::DynTypedNode *>()) {
delete Entry.second.get<ast_type_traits::DynTypedNode *>();
} else {
assert(Entry.second.is<ParentVector *>());
delete Entry.second.get<ParentVector *>();
}
}
}
void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
Deallocations[Callback].push_back(Data);
}
void
ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
ExternalSource = Source;
}
void ASTContext::PrintStats() const {
llvm::errs() << "\n*** AST Context Stats:\n";
llvm::errs() << " " << Types.size() << " types total.\n";
unsigned counts[] = {
#define TYPE(Name, Parent) 0,
#define ABSTRACT_TYPE(Name, Parent)
#include "clang/AST/TypeNodes.def"
0 // Extra
};
for (unsigned i = 0, e = Types.size(); i != e; ++i) {
Type *T = Types[i];
// HLSL Change Starts - guard against overflows; we can probably prove this is fine, but this is not a sensitive (or retail!) codepath
#if 0
counts[(unsigned)T->getTypeClass()]++;
#else
unsigned offset = (unsigned)T->getTypeClass();
if (offset < _countof(counts)) counts[offset]++;
#endif
// HLSL Change Ends
}
unsigned Idx = 0;
unsigned TotalBytes = 0;
#define TYPE(Name, Parent) \
if (counts[Idx]) \
llvm::errs() << " " << counts[Idx] << " " << #Name \
<< " types\n"; \
TotalBytes += counts[Idx] * sizeof(Name##Type); \
++Idx;
#define ABSTRACT_TYPE(Name, Parent)
#include "clang/AST/TypeNodes.def"
llvm::errs() << "Total bytes = " << TotalBytes << "\n";
// Implicit special member functions.
llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
<< NumImplicitDefaultConstructors
<< " implicit default constructors created\n";
llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
<< NumImplicitCopyConstructors
<< " implicit copy constructors created\n";
if (getLangOpts().CPlusPlus)
llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
<< NumImplicitMoveConstructors
<< " implicit move constructors created\n";
llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
<< NumImplicitCopyAssignmentOperators
<< " implicit copy assignment operators created\n";
if (getLangOpts().CPlusPlus)
llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
<< NumImplicitMoveAssignmentOperators
<< " implicit move assignment operators created\n";
llvm::errs() << NumImplicitDestructorsDeclared << "/"
<< NumImplicitDestructors
<< " implicit destructors created\n";
if (ExternalSource) {
llvm::errs() << "\n";
ExternalSource->PrintStats();
}
BumpAlloc.PrintStats();
}
void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
bool NotifyListeners) {
if (NotifyListeners)
if (auto *Listener = getASTMutationListener())
Listener->RedefinedHiddenDefinition(ND, M);
if (getLangOpts().ModulesLocalVisibility)
MergedDefModules[ND].push_back(M);
else
ND->setHidden(false);
}
void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
auto It = MergedDefModules.find(ND);
if (It == MergedDefModules.end())
return;
auto &Merged = It->second;
llvm::DenseSet<Module*> Found;
for (Module *&M : Merged)
if (!Found.insert(M).second)
M = nullptr;
Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
}
ExternCContextDecl *ASTContext::getExternCContextDecl() const {
if (!ExternCContext)
ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
return ExternCContext;
}
RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
RecordDecl::TagKind TK) const {
SourceLocation Loc;
RecordDecl *NewDecl;
if (getLangOpts().CPlusPlus)
NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
Loc, &Idents.get(Name));
else
NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
&Idents.get(Name));
NewDecl->setImplicit();
NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
return NewDecl;
}
TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
StringRef Name) const {
TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
TypedefDecl *NewDecl = TypedefDecl::Create(
const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
NewDecl->setImplicit();
return NewDecl;
}
TypedefDecl *ASTContext::getInt128Decl() const {
if (!Int128Decl)
Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
return Int128Decl;
}
TypedefDecl *ASTContext::getUInt128Decl() const {
if (!UInt128Decl)
UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
return UInt128Decl;
}
TypeDecl *ASTContext::getFloat128StubType() const {
assert(LangOpts.CPlusPlus && "should only be called for c++");
if (!Float128StubDecl)
Float128StubDecl = buildImplicitRecord("__float128");
return Float128StubDecl;
}
void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K);
R = CanQualType::CreateUnsafe(QualType(Ty, 0));
Types.push_back(Ty);
}
void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
assert((!this->Target || this->Target == &Target) &&
"Incorrect target reinitialization");
assert(VoidTy.isNull() && "Context reinitialized?");
this->Target = &Target;
ABI.reset(createCXXABI(Target));
AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
// C99 6.2.5p19.
InitBuiltinType(VoidTy, BuiltinType::Void);
// C99 6.2.5p2.
InitBuiltinType(BoolTy, BuiltinType::Bool);
// C99 6.2.5p3.
if (LangOpts.CharIsSigned)
InitBuiltinType(CharTy, BuiltinType::Char_S);
else
InitBuiltinType(CharTy, BuiltinType::Char_U);
// C99 6.2.5p4.
InitBuiltinType(SignedCharTy, BuiltinType::SChar);
InitBuiltinType(ShortTy, BuiltinType::Short);
InitBuiltinType(IntTy, BuiltinType::Int);
InitBuiltinType(LongTy, BuiltinType::Long);
InitBuiltinType(LongLongTy, BuiltinType::LongLong);
// C99 6.2.5p6.
InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
// C99 6.2.5p10.
InitBuiltinType(FloatTy, BuiltinType::Float);
InitBuiltinType(DoubleTy, BuiltinType::Double);
InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
// GNU extension, 128-bit integers.
InitBuiltinType(Int128Ty, BuiltinType::Int128);
InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
// C++ 3.9.1p5
if (TargetInfo::isTypeSigned(Target.getWCharType()))
InitBuiltinType(WCharTy, BuiltinType::WChar_S);
else // -fshort-wchar makes wchar_t be unsigned.
InitBuiltinType(WCharTy, BuiltinType::WChar_U);
if (LangOpts.CPlusPlus && LangOpts.WChar)
WideCharTy = WCharTy;
else {
// C99 (or C++ using -fno-wchar).
WideCharTy = getFromTargetType(Target.getWCharType());
}
WIntTy = getFromTargetType(Target.getWIntType());
if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
InitBuiltinType(Char16Ty, BuiltinType::Char16);
else // C99
Char16Ty = getFromTargetType(Target.getChar16Type());
if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
InitBuiltinType(Char32Ty, BuiltinType::Char32);
else // C99
Char32Ty = getFromTargetType(Target.getChar32Type());
// Placeholder type for type-dependent expressions whose type is
// completely unknown. No code should ever check a type against
// DependentTy and users should never see it; however, it is here to
// help diagnose failures to properly check for type-dependent
// expressions.
InitBuiltinType(DependentTy, BuiltinType::Dependent);
// Placeholder type for functions.
InitBuiltinType(OverloadTy, BuiltinType::Overload);
// Placeholder type for bound members.
InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
// Placeholder type for pseudo-objects.
InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
// "any" type; useful for debugger-like clients.
InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
// Placeholder type for unbridged ARC casts.
InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
// Placeholder type for builtin functions.
InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
// C99 6.2.5p11.
FloatComplexTy = getComplexType(FloatTy);
DoubleComplexTy = getComplexType(DoubleTy);
LongDoubleComplexTy = getComplexType(LongDoubleTy);
// Builtin types for 'id', 'Class', and 'SEL'.
InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
if (LangOpts.OpenCL) {
InitBuiltinType(OCLImage1dTy, BuiltinType::OCLImage1d);
InitBuiltinType(OCLImage1dArrayTy, BuiltinType::OCLImage1dArray);
InitBuiltinType(OCLImage1dBufferTy, BuiltinType::OCLImage1dBuffer);
InitBuiltinType(OCLImage2dTy, BuiltinType::OCLImage2d);
InitBuiltinType(OCLImage2dArrayTy, BuiltinType::OCLImage2dArray);
InitBuiltinType(OCLImage3dTy, BuiltinType::OCLImage3d);
InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
}
// Builtin type for __objc_yes and __objc_no
ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
SignedCharTy : BoolTy);
ObjCConstantStringType = QualType();
ObjCSuperType = QualType();
// void * type
VoidPtrTy = getPointerType(VoidTy);
// nullptr type (C++0x 2.14.7)
InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
// half type (OpenCL 6.1.1.1) / ARM NEON __fp16
InitBuiltinType(HalfTy, BuiltinType::Half);
// Builtin type used to help define __builtin_va_list.
VaListTagTy = QualType();
// HLSL Change Starts
if (LangOpts.HLSL) {
InitBuiltinType(Min12IntTy, BuiltinType::Min12Int);
InitBuiltinType(Min16IntTy, BuiltinType::Min16Int);
InitBuiltinType(Min16UIntTy, BuiltinType::Min16UInt);
InitBuiltinType(Min16FloatTy, BuiltinType::Min16Float);
InitBuiltinType(Min10FloatTy, BuiltinType::Min10Float);
InitBuiltinType(HalfFloatTy, BuiltinType::HalfFloat);
InitBuiltinType(LitIntTy, BuiltinType::LitInt);
InitBuiltinType(LitFloatTy, BuiltinType::LitFloat);
InitBuiltinType(Int8_4PackedTy, BuiltinType::Int8_4Packed);
InitBuiltinType(UInt8_4PackedTy, BuiltinType::UInt8_4Packed);
HLSLStringTy = this->getPointerType(CharTy);
hlsl::InitializeASTContextForHLSL(*this); // Previously in constructor, guarded by !DelayInitialization
}
// HLSL Change Ends
}
DiagnosticsEngine &ASTContext::getDiagnostics() const {
return SourceMgr.getDiagnostics();
}
AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
AttrVec *&Result = DeclAttrs[D];
if (!Result) {
try { // HLSL Change
void *Mem = Allocate(sizeof(AttrVec));
Result = new (Mem) AttrVec;
// HLSL Change Begin: Don't leave empty entry on exception
}
catch (...) {
DeclAttrs.erase(D);
throw;
}
// HLSL Change End
}
return *Result;
}
/// \brief Erase the attributes corresponding to the given declaration.
void ASTContext::eraseDeclAttrs(const Decl *D) {
llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
if (Pos != DeclAttrs.end()) {
Pos->second->~AttrVec();
DeclAttrs.erase(Pos);
}
}
// FIXME: Remove ?
MemberSpecializationInfo *
ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
assert(Var->isStaticDataMember() && "Not a static data member");
return getTemplateOrSpecializationInfo(Var)
.dyn_cast<MemberSpecializationInfo *>();
}
ASTContext::TemplateOrSpecializationInfo
ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
TemplateOrInstantiation.find(Var);
if (Pos == TemplateOrInstantiation.end())
return TemplateOrSpecializationInfo();
return Pos->second;
}
void
ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation) {
assert(Inst->isStaticDataMember() && "Not a static data member");
assert(Tmpl->isStaticDataMember() && "Not a static data member");
setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo(
Tmpl, TSK, PointOfInstantiation));
}
void
ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
TemplateOrSpecializationInfo TSI) {
assert(!TemplateOrInstantiation[Inst] &&
"Already noted what the variable was instantiated from");
TemplateOrInstantiation[Inst] = TSI;
}
FunctionDecl *ASTContext::getClassScopeSpecializationPattern(
const FunctionDecl *FD){
assert(FD && "Specialization is 0");
llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
= ClassScopeSpecializationPattern.find(FD);
if (Pos == ClassScopeSpecializationPattern.end())
return nullptr;
return Pos->second;
}
void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD,
FunctionDecl *Pattern) {
assert(FD && "Specialization is 0");
assert(Pattern && "Class scope specialization pattern is 0");
ClassScopeSpecializationPattern[FD] = Pattern;
}
NamedDecl *
ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) {
llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos
= InstantiatedFromUsingDecl.find(UUD);
if (Pos == InstantiatedFromUsingDecl.end())
return nullptr;
return Pos->second;
}
void
ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) {
assert((isa<UsingDecl>(Pattern) ||
isa<UnresolvedUsingValueDecl>(Pattern) ||
isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
"pattern decl is not a using decl");
assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
InstantiatedFromUsingDecl[Inst] = Pattern;
}
UsingShadowDecl *
ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
= InstantiatedFromUsingShadowDecl.find(Inst);
if (Pos == InstantiatedFromUsingShadowDecl.end())
return nullptr;
return Pos->second;
}
void
ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
UsingShadowDecl *Pattern) {
assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
InstantiatedFromUsingShadowDecl[Inst] = Pattern;
}
FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
= InstantiatedFromUnnamedFieldDecl.find(Field);
if (Pos == InstantiatedFromUnnamedFieldDecl.end())
return nullptr;
return Pos->second;
}
void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
FieldDecl *Tmpl) {
assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
"Already noted what unnamed field was instantiated from");
InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
}
ASTContext::overridden_cxx_method_iterator
ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
= OverriddenMethods.find(Method->getCanonicalDecl());
if (Pos == OverriddenMethods.end())
return nullptr;
return Pos->second.begin();
}
ASTContext::overridden_cxx_method_iterator
ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
= OverriddenMethods.find(Method->getCanonicalDecl());
if (Pos == OverriddenMethods.end())
return nullptr;
return Pos->second.end();
}
unsigned
ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
= OverriddenMethods.find(Method->getCanonicalDecl());
if (Pos == OverriddenMethods.end())
return 0;
return Pos->second.size();
}
void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
const CXXMethodDecl *Overridden) {
assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
OverriddenMethods[Method].push_back(Overridden);
}
void ASTContext::getOverriddenMethods(
const NamedDecl *D,
SmallVectorImpl<const NamedDecl *> &Overridden) const {
assert(D);
if (const CXXMethodDecl *CXXMethod = dyn_cast<CXXMethodDecl>(D)) {
Overridden.append(overridden_methods_begin(CXXMethod),
overridden_methods_end(CXXMethod));
return;
}
const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(D);
if (!Method)
return;
SmallVector<const ObjCMethodDecl *, 8> OverDecls;
Method->getOverriddenMethods(OverDecls);
Overridden.append(OverDecls.begin(), OverDecls.end());
}
void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
assert(!Import->NextLocalImport && "Import declaration already in the chain");
assert(!Import->isFromASTFile() && "Non-local import declaration");
if (!FirstLocalImport) {
FirstLocalImport = Import;
LastLocalImport = Import;
return;
}
LastLocalImport->NextLocalImport = Import;
LastLocalImport = Import;
}
//===----------------------------------------------------------------------===//
// Type Sizing and Analysis
//===----------------------------------------------------------------------===//
/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
/// scalar floating point type.
const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
const BuiltinType *BT = T->getAs<BuiltinType>();
assert(BT && "Not a floating point type!");
switch (BT->getKind()) {
default: llvm_unreachable("Not a floating point type!");
// HLSL Change Starts
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
// HLSL Change Ends
case BuiltinType::Half: return Target->getHalfFormat();
case BuiltinType::HalfFloat: // HLSL Change
case BuiltinType::Float: return Target->getFloatFormat();
case BuiltinType::Double: return Target->getDoubleFormat();
case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
case BuiltinType::LitFloat: return Target->getDoubleFormat(); // HLSL Change
}
}
CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
unsigned Align = Target->getCharWidth();
bool UseAlignAttrOnly = false;
if (unsigned AlignFromAttr = D->getMaxAlignment()) {
Align = AlignFromAttr;
// __attribute__((aligned)) can increase or decrease alignment
// *except* on a struct or struct member, where it only increases
// alignment unless 'packed' is also specified.
//
// It is an error for alignas to decrease alignment, so we can
// ignore that possibility; Sema should diagnose it.
if (isa<FieldDecl>(D)) {
UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
} else {
UseAlignAttrOnly = true;
}
}
else if (isa<FieldDecl>(D))
UseAlignAttrOnly =
D->hasAttr<PackedAttr>() ||
cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
// If we're using the align attribute only, just ignore everything
// else about the declaration and its type.
if (UseAlignAttrOnly) {
// do nothing
} else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
QualType T = VD->getType();
if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
if (ForAlignof)
T = RT->getPointeeType();
else
T = getPointerType(RT->getPointeeType());
}
QualType BaseT = getBaseElementType(T);
if (!BaseT->isIncompleteType() && !T->isFunctionType()) {
// Adjust alignments of declarations with array type by the
// large-array alignment on the target.
if (const ArrayType *arrayType = getAsArrayType(T)) {
unsigned MinWidth = Target->getLargeArrayMinWidth();
if (!ForAlignof && MinWidth) {
if (isa<VariableArrayType>(arrayType))
Align = std::max(Align, Target->getLargeArrayAlign());
else if (isa<ConstantArrayType>(arrayType) &&
MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
Align = std::max(Align, Target->getLargeArrayAlign());
}
}
Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasGlobalStorage() && !ForAlignof)
Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
}
}
// Fields can be subject to extra alignment constraints, like if
// the field is packed, the struct is packed, or the struct has a
// a max-field-alignment constraint (#pragma pack). So calculate
// the actual alignment of the field within the struct, and then
// (as we're expected to) constrain that by the alignment of the type.
if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) {
const RecordDecl *Parent = Field->getParent();
// We can only produce a sensible answer if the record is valid.
if (!Parent->isInvalidDecl()) {
const ASTRecordLayout &Layout = getASTRecordLayout(Parent);
// Start with the record's overall alignment.
unsigned FieldAlign = toBits(Layout.getAlignment());
// Use the GCD of that and the offset within the record.
uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex());
if (Offset > 0) {
// Alignment is always a power of 2, so the GCD will be a power of 2,
// which means we get to do this crazy thing instead of Euclid's.
uint64_t LowBitOfOffset = Offset & (~Offset + 1);
if (LowBitOfOffset < FieldAlign)
FieldAlign = static_cast<unsigned>(LowBitOfOffset);
}
Align = std::min(Align, FieldAlign);
}
}
}
return toCharUnitsFromBits(Align);
}
// getTypeInfoDataSizeInChars - Return the size of a type, in
// chars. If the type is a record, its data size is returned. This is
// the size of the memcpy that's performed when assigning this type
// using a trivial copy/move assignment operator.
std::pair<CharUnits, CharUnits>
ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
std::pair<CharUnits, CharUnits> sizeAndAlign = getTypeInfoInChars(T);
// In C++, objects can sometimes be allocated into the tail padding
// of a base-class subobject. We decide whether that's possible
// during class layout, so here we can just trust the layout results.
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = T->getAs<RecordType>()) {
const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl());
sizeAndAlign.first = layout.getDataSize();
}
}
return sizeAndAlign;
}
/// getConstantArrayInfoInChars - Performing the computation in CharUnits
/// instead of in bits prevents overflowing the uint64_t for some large arrays.
std::pair<CharUnits, CharUnits>
static getConstantArrayInfoInChars(const ASTContext &Context,
const ConstantArrayType *CAT) {
std::pair<CharUnits, CharUnits> EltInfo =
Context.getTypeInfoInChars(CAT->getElementType());
uint64_t Size = CAT->getSize().getZExtValue();
assert((Size == 0 || static_cast<uint64_t>(EltInfo.first.getQuantity()) <=
(uint64_t)(-1)/Size) &&
"Overflow in array type char size evaluation");
uint64_t Width = EltInfo.first.getQuantity() * Size;
unsigned Align = EltInfo.second.getQuantity();
if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
Context.getTargetInfo().getPointerWidth(0) == 64)
Width = llvm::RoundUpToAlignment(Width, Align);
return std::make_pair(CharUnits::fromQuantity(Width),
CharUnits::fromQuantity(Align));
}
std::pair<CharUnits, CharUnits>
ASTContext::getTypeInfoInChars(const Type *T) const {
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T))
return getConstantArrayInfoInChars(*this, CAT);
TypeInfo Info = getTypeInfo(T);
return std::make_pair(toCharUnitsFromBits(Info.Width),
toCharUnitsFromBits(Info.Align));
}
std::pair<CharUnits, CharUnits>
ASTContext::getTypeInfoInChars(QualType T) const {
return getTypeInfoInChars(T.getTypePtr());
}
bool ASTContext::isAlignmentRequired(const Type *T) const {
return getTypeInfo(T).AlignIsRequired;
}
bool ASTContext::isAlignmentRequired(QualType T) const {
return isAlignmentRequired(T.getTypePtr());
}
TypeInfo ASTContext::getTypeInfo(const Type *T) const {
TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
if (I != MemoizedTypeInfo.end())
return I->second;
// This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
TypeInfo TI = getTypeInfoImpl(T);
MemoizedTypeInfo[T] = TI;
return TI;
}
/// getTypeInfoImpl - Return the size of the specified type, in bits. This
/// method does not work on incomplete types.
///
/// FIXME: Pointers into different addr spaces could have different sizes and
/// alignment requirements: getPointerInfo should take an AddrSpace, this
/// should take a QualType, &c.
TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
uint64_t Width = 0;
unsigned Align = 8;
bool AlignIsRequired = false;
// HLSL Change Starts
if (getLangOpts().HLSL) {
if (const ExtVectorType *Ty =
hlsl::ConvertHLSLVecMatTypeToExtVectorType(*this, QualType(T, 0))) {
T = Ty;
}
}
// HLSL Change Ends
switch (T->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
case Type::Class: \
assert(!T->isDependentType() && "should not see dependent types here"); \
return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
#include "clang/AST/TypeNodes.def"
llvm_unreachable("Should not see dependent types");
case Type::FunctionNoProto:
case Type::FunctionProto:
// GCC extension: alignof(function) = 32 bits
Width = 0;
Align = 32;
break;
case Type::IncompleteArray:
case Type::VariableArray:
Width = 0;
Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
break;
case Type::ConstantArray: {
const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
TypeInfo EltInfo = getTypeInfo(CAT->getElementType());
uint64_t Size = CAT->getSize().getZExtValue();
assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
"Overflow in array type bit size evaluation");
Width = EltInfo.Width * Size;
Align = EltInfo.Align;
if (!getTargetInfo().getCXXABI().isMicrosoft() ||
getTargetInfo().getPointerWidth(0) == 64)
Width = llvm::RoundUpToAlignment(Width, Align);
break;
}
case Type::ExtVector:
case Type::Vector: {
const VectorType *VT = cast<VectorType>(T);
TypeInfo EltInfo = getTypeInfo(VT->getElementType());
Width = EltInfo.Width * VT->getNumElements();
Align = Width;
// HLSL Change Begins.
// Vector align to its element.
if (getLangOpts().HLSL) {
Width = llvm::RoundUpToAlignment(EltInfo.Width, EltInfo.Align) * VT->getNumElements(); // Match data layout's behaviour
Align = EltInfo.Align;
}
// HLSL Change Ends.
// If the alignment is not a power of 2, round up to the next power of 2.
// This happens for non-power-of-2 length vectors.
if (Align & (Align-1)) {
Align = llvm::NextPowerOf2(Align);
Width = llvm::RoundUpToAlignment(Width, Align);
}
// Adjust the alignment based on the target max.
uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
if (TargetVectorAlign && TargetVectorAlign < Align)
Align = TargetVectorAlign;
break;
}
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("Unknown builtin type!");
case BuiltinType::Void:
// GCC extension: alignof(void) = 8 bits.
Width = 0;
Align = 8;
break;
case BuiltinType::Bool:
Width = Target->getBoolWidth();
Align = Target->getBoolAlign();
break;
case BuiltinType::Char_S:
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::SChar:
Width = Target->getCharWidth();
Align = Target->getCharAlign();
break;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
Width = Target->getWCharWidth();
Align = Target->getWCharAlign();
break;
case BuiltinType::Char16:
Width = Target->getChar16Width();
Align = Target->getChar16Align();
break;
case BuiltinType::Char32:
Width = Target->getChar32Width();
Align = Target->getChar32Align();
break;
case BuiltinType::UShort:
case BuiltinType::Short:
Width = Target->getShortWidth();
Align = Target->getShortAlign();
break;
case BuiltinType::UInt:
case BuiltinType::Int:
case BuiltinType::Int8_4Packed: // HLSL Change
case BuiltinType::UInt8_4Packed: // HLSL Change
Width = Target->getIntWidth();
Align = Target->getIntAlign();
break;
case BuiltinType::ULong:
case BuiltinType::Long:
Width = Target->getLongWidth();
Align = Target->getLongAlign();
break;
case BuiltinType::ULongLong:
case BuiltinType::LongLong:
Width = Target->getLongLongWidth();
Align = Target->getLongLongAlign();
break;
case BuiltinType::Int128:
case BuiltinType::UInt128:
Width = 128;
Align = 128; // int128_t is 128-bit aligned on all targets.
break;
case BuiltinType::Half:
Width = Target->getHalfWidth();
Align = Target->getHalfAlign();
break;
case BuiltinType::HalfFloat: // HLSL Change
case BuiltinType::Float:
Width = Target->getFloatWidth();
Align = Target->getFloatAlign();
break;
case BuiltinType::Double:
Width = Target->getDoubleWidth();
Align = Target->getDoubleAlign();
break;
// HLSL Change Starts
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
Width = 16;
if (!getLangOpts().UseMinPrecision)
Align = 16;
else
Align = 32;
break;
case BuiltinType::Min12Int:
case BuiltinType::Min16Int:
case BuiltinType::Min16UInt:
Width = 16;
if (!getLangOpts().UseMinPrecision)
Align = 16;
else
Align = 32;
break;
// Treat literals as largest size possible here, as it will be used
// to determine MaxWidth in GetExprRange
case BuiltinType::LitFloat:
Width = 64;
Align = 64;
break;
case BuiltinType::LitInt:
Width = 64;
Align = 64;
break;
// HLSL Change Ends
case BuiltinType::LongDouble:
Width = Target->getLongDoubleWidth();
Align = Target->getLongDoubleAlign();
break;
case BuiltinType::NullPtr:
Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
Align = Target->getPointerAlign(0); // == sizeof(void*)
break;
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
Width = Target->getPointerWidth(0);
Align = Target->getPointerAlign(0);
break;
case BuiltinType::OCLSampler:
// Samplers are modeled as integers.
Width = Target->getIntWidth();
Align = Target->getIntAlign();
break;
case BuiltinType::OCLEvent:
case BuiltinType::OCLImage1d:
case BuiltinType::OCLImage1dArray:
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
case BuiltinType::OCLImage3d:
// Currently these types are pointers to opaque types.
Width = Target->getPointerWidth(0);
Align = Target->getPointerAlign(0);
break;
}
break;
case Type::ObjCObjectPointer:
Width = Target->getPointerWidth(0);
Align = Target->getPointerAlign(0);
break;
case Type::BlockPointer: {
unsigned AS = getTargetAddressSpace(
cast<BlockPointerType>(T)->getPointeeType());
Width = Target->getPointerWidth(AS);
Align = Target->getPointerAlign(AS);
break;
}
case Type::LValueReference:
case Type::RValueReference: {
// alignof and sizeof should never enter this code path here, so we go
// the pointer route.
unsigned AS = getTargetAddressSpace(
cast<ReferenceType>(T)->getPointeeType());
Width = Target->getPointerWidth(AS);
Align = Target->getPointerAlign(AS);
break;
}
case Type::Pointer: {
unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
Width = Target->getPointerWidth(AS);
Align = Target->getPointerAlign(AS);
break;
}
case Type::MemberPointer: {
const MemberPointerType *MPT = cast<MemberPointerType>(T);
std::tie(Width, Align) = ABI->getMemberPointerWidthAndAlign(MPT);
break;
}
case Type::Complex: {
// Complex types have the same alignment as their elements, but twice the
// size.
TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
Width = EltInfo.Width * 2;
Align = EltInfo.Align;
break;
}
case Type::ObjCObject:
return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
case Type::Adjusted:
case Type::Decayed:
return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
case Type::ObjCInterface: {
const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
break;
}
case Type::Record:
case Type::Enum: {
const TagType *TT = cast<TagType>(T);
if (TT->getDecl()->isInvalidDecl()) {
Width = 8;
Align = 8;
break;
}
if (const EnumType *ET = dyn_cast<EnumType>(TT)) {
const EnumDecl *ED = ET->getDecl();
TypeInfo Info =
getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType());
if (unsigned AttrAlign = ED->getMaxAlignment()) {
Info.Align = AttrAlign;
Info.AlignIsRequired = true;
}
return Info;
}
const RecordType *RT = cast<RecordType>(TT);
const RecordDecl *RD = RT->getDecl();
const ASTRecordLayout &Layout = getASTRecordLayout(RD);
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
AlignIsRequired = RD->hasAttr<AlignedAttr>();
break;
}
case Type::SubstTemplateTypeParm:
return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
getReplacementType().getTypePtr());
case Type::Auto: {
const AutoType *A = cast<AutoType>(T);
assert(!A->getDeducedType().isNull() &&
"cannot request the size of an undeduced or dependent auto type");
return getTypeInfo(A->getDeducedType().getTypePtr());
}
case Type::Paren:
return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
case Type::Typedef: {
const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
// If the typedef has an aligned attribute on it, it overrides any computed
// alignment we have. This violates the GCC documentation (which says that
// attribute(aligned) can only round up) but matches its implementation.
if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
Align = AttrAlign;
AlignIsRequired = true;
} else {
Align = Info.Align;
AlignIsRequired = Info.AlignIsRequired;
}
Width = Info.Width;
break;
}
case Type::Elaborated:
return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
case Type::Attributed:
return getTypeInfo(
cast<AttributedType>(T)->getEquivalentType().getTypePtr());
case Type::Atomic: {
// Start with the base type information.
TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
Width = Info.Width;
Align = Info.Align;
// If the size of the type doesn't exceed the platform's max
// atomic promotion width, make the size and alignment more
// favorable to atomic operations:
if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth()) {
// Round the size up to a power of 2.
if (!llvm::isPowerOf2_64(Width))
Width = llvm::NextPowerOf2(Width);
// Set the alignment equal to the size.
Align = static_cast<unsigned>(Width);
}
}
}
assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
return TypeInfo(Width, Align, AlignIsRequired);
}
unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
// Target ppc64 with QPX: simd default alignment for pointer to double is 32.
if ((getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64 ||
getTargetInfo().getTriple().getArch() == llvm::Triple::ppc64le) &&
getTargetInfo().getABI() == "elfv1-qpx" &&
T->isSpecificBuiltinType(BuiltinType::Double))
SimdAlign = 256;
return SimdAlign;
}
/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
return CharUnits::fromQuantity(BitSize / getCharWidth());
}
/// toBits - Convert a size in characters to a size in characters.
int64_t ASTContext::toBits(CharUnits CharSize) const {
return CharSize.getQuantity() * getCharWidth();
}
/// getTypeSizeInChars - Return the size of the specified type, in characters.
/// This method does not work on incomplete types.
CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
return getTypeInfoInChars(T).first;
}
CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
return getTypeInfoInChars(T).first;
}
/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
/// characters. This method does not work on incomplete types.
CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
return toCharUnitsFromBits(getTypeAlign(T));
}
CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
return toCharUnitsFromBits(getTypeAlign(T));
}
/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
/// type for the current target in bits. This can be different than the ABI
/// alignment in cases where it is beneficial for performance to overalign
/// a data type.
unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
TypeInfo TI = getTypeInfo(T);
unsigned ABIAlign = TI.Align;
T = T->getBaseElementTypeUnsafe();
// The preferred alignment of member pointers is that of a pointer.
if (T->isMemberPointerType())
return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
if (Target->getTriple().getArch() == llvm::Triple::xcore)
return ABIAlign; // Never overalign on XCore.
// Double and long long should be naturally aligned if possible.
if (const ComplexType *CT = T->getAs<ComplexType>())
T = CT->getElementType().getTypePtr();
if (const EnumType *ET = T->getAs<EnumType>())
T = ET->getDecl()->getIntegerType().getTypePtr();
if (T->isSpecificBuiltinType(BuiltinType::Double) ||
T->isSpecificBuiltinType(BuiltinType::LongLong) ||
T->isSpecificBuiltinType(BuiltinType::ULongLong))
// Don't increase the alignment if an alignment attribute was specified on a
// typedef declaration.
if (!TI.AlignIsRequired)
return std::max(ABIAlign, (unsigned)getTypeSize(T));
return ABIAlign;
}
/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
/// for __attribute__((aligned)) on this target, to be used if no alignment
/// value is specified.
unsigned ASTContext::getTargetDefaultAlignForAttributeAligned(void) const {
return getTargetInfo().getDefaultAlignForAttributeAligned();
}
/// getAlignOfGlobalVar - Return the alignment in bits that should be given
/// to a global variable of the specified type.
unsigned ASTContext::getAlignOfGlobalVar(QualType T) const {
return std::max(getTypeAlign(T), getTargetInfo().getMinGlobalAlign());
}
/// getAlignOfGlobalVarInChars - Return the alignment in characters that
/// should be given to a global variable of the specified type.
CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const {
return toCharUnitsFromBits(getAlignOfGlobalVar(T));
}
CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
CharUnits Offset = CharUnits::Zero();
const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
Offset += Layout->getBaseClassOffset(Base);
Layout = &getASTRecordLayout(Base);
}
return Offset;
}
/// DeepCollectObjCIvars -
/// This routine first collects all declared, but not synthesized, ivars in
/// super class and then collects all ivars, including those synthesized for
/// current class. This routine is used for implementation of current class
/// when all ivars, declared and synthesized are known.
///
void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
bool leafClass,
SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
DeepCollectObjCIvars(SuperClass, false, Ivars);
if (!leafClass) {
for (const auto *I : OI->ivars())
Ivars.push_back(I);
} else {
ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
Iv= Iv->getNextIvar())
Ivars.push_back(Iv);
}
}
/// CollectInheritedProtocols - Collect all protocols in current class and
/// those inherited by it.
void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
// We can use protocol_iterator here instead of
// all_referenced_protocol_iterator since we are walking all categories.
for (auto *Proto : OI->all_referenced_protocols()) {
CollectInheritedProtocols(Proto, Protocols);
}
// Categories of this Interface.
for (const auto *Cat : OI->visible_categories())
CollectInheritedProtocols(Cat, Protocols);
if (ObjCInterfaceDecl *SD = OI->getSuperClass())
while (SD) {
CollectInheritedProtocols(SD, Protocols);
SD = SD->getSuperClass();
}
} else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
for (auto *Proto : OC->protocols()) {
CollectInheritedProtocols(Proto, Protocols);
}
} else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
// Insert the protocol.
if (!Protocols.insert(
const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
return;
for (auto *Proto : OP->protocols())
CollectInheritedProtocols(Proto, Protocols);
}
}
unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
unsigned count = 0;
// Count ivars declared in class extension.
for (const auto *Ext : OI->known_extensions())
count += Ext->ivar_size();
// Count ivar defined in this class's implementation. This
// includes synthesized ivars.
if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
count += ImplDecl->ivar_size();
return count;
}
bool ASTContext::isSentinelNullExpr(const Expr *E) {
if (!E)
return false;
// nullptr_t is always treated as null.
if (E->getType()->isNullPtrType()) return true;
if (E->getType()->isAnyPointerType() &&
E->IgnoreParenCasts()->isNullPointerConstant(*this,
Expr::NPC_ValueDependentIsNull))
return true;
// Unfortunately, __null has type 'int'.
if (isa<GNUNullExpr>(E)) return true;
return false;
}
/// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
I = ObjCImpls.find(D);
if (I != ObjCImpls.end())
return cast<ObjCImplementationDecl>(I->second);
return nullptr;
}
/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
I = ObjCImpls.find(D);
if (I != ObjCImpls.end())
return cast<ObjCCategoryImplDecl>(I->second);
return nullptr;
}
/// \brief Set the implementation of ObjCInterfaceDecl.
void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
ObjCImplementationDecl *ImplD) {
assert(IFaceD && ImplD && "Passed null params");
ObjCImpls[IFaceD] = ImplD;
}
/// \brief Set the implementation of ObjCCategoryDecl.
void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
ObjCCategoryImplDecl *ImplD) {
assert(CatD && ImplD && "Passed null params");
ObjCImpls[CatD] = ImplD;
}
const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
const NamedDecl *ND) const {
if (const ObjCInterfaceDecl *ID =
dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
return ID;
if (const ObjCCategoryDecl *CD =
dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
return CD->getClassInterface();
if (const ObjCImplDecl *IMD =
dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
return IMD->getClassInterface();
return nullptr;
}
/// \brief Get the copy initialization expression of VarDecl,or NULL if
/// none exists.
Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
assert(VD && "Passed null params");
assert(VD->hasAttr<BlocksAttr>() &&
"getBlockVarCopyInits - not __block var");
llvm::DenseMap<const VarDecl*, Expr*>::iterator
I = BlockVarCopyInits.find(VD);
return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : nullptr;
}
/// \brief Set the copy inialization expression of a block var decl.
void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
assert(VD && Init && "Passed null params");
assert(VD->hasAttr<BlocksAttr>() &&
"setBlockVarCopyInits - not __block var");
BlockVarCopyInits[VD] = Init;
}
TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
unsigned DataSize) const {
if (!DataSize)
DataSize = TypeLoc::getFullDataSizeForType(T);
else
assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
"incorrect data size provided to CreateTypeSourceInfo!");
TypeSourceInfo *TInfo =
(TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
new (TInfo) TypeSourceInfo(T);
return TInfo;
}
TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
SourceLocation L) const {
TypeSourceInfo *DI = CreateTypeSourceInfo(T);
DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
return DI;
}
const ASTRecordLayout &
ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
return getObjCLayout(D, nullptr);
}
const ASTRecordLayout &
ASTContext::getASTObjCImplementationLayout(
const ObjCImplementationDecl *D) const {
return getObjCLayout(D->getClassInterface(), D);
}
//===----------------------------------------------------------------------===//
// Type creation/memoization methods
//===----------------------------------------------------------------------===//
QualType
ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
unsigned fastQuals = quals.getFastQualifiers();
quals.removeFastQualifiers();
// Check if we've already instantiated this type.
llvm::FoldingSetNodeID ID;
ExtQuals::Profile(ID, baseType, quals);
void *insertPos = nullptr;
if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
assert(eq->getQualifiers() == quals);
return QualType(eq, fastQuals);
}
// If the base type is not canonical, make the appropriate canonical type.
QualType canon;
if (!baseType->isCanonicalUnqualified()) {
SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
canonSplit.Quals.addConsistentQualifiers(quals);
canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
// Re-find the insert position.
(void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
}
ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
ExtQualNodes.InsertNode(eq, insertPos);
return QualType(eq, fastQuals);
}
QualType
ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const {
QualType CanT = getCanonicalType(T);
if (CanT.getAddressSpace() == AddressSpace)
return T;
// If we are composing extended qualifiers together, merge together
// into one ExtQuals node.
QualifierCollector Quals;
const Type *TypeNode = Quals.strip(T);
// If this type already has an address space specified, it cannot get
// another one.
assert(!Quals.hasAddressSpace() &&
"Type cannot be in multiple addr spaces!");
Quals.addAddressSpace(AddressSpace);
return getExtQualType(TypeNode, Quals);
}
QualType ASTContext::getObjCGCQualType(QualType T,
Qualifiers::GC GCAttr) const {
QualType CanT = getCanonicalType(T);
if (CanT.getObjCGCAttr() == GCAttr)
return T;
if (const PointerType *ptr = T->getAs<PointerType>()) {
QualType Pointee = ptr->getPointeeType();
if (Pointee->isAnyPointerType()) {
QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
return getPointerType(ResultType);
}
}
// If we are composing extended qualifiers together, merge together
// into one ExtQuals node.
QualifierCollector Quals;
const Type *TypeNode = Quals.strip(T);
// If this type already has an ObjCGC specified, it cannot get
// another one.
assert(!Quals.hasObjCGCAttr() &&
"Type cannot have multiple ObjCGCs!");
Quals.addObjCGCAttr(GCAttr);
return getExtQualType(TypeNode, Quals);
}
const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
FunctionType::ExtInfo Info) {
if (T->getExtInfo() == Info)
return T;
QualType Result;
if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
} else {
const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.ExtInfo = Info;
Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI, FPT->getParamMods());
}
return cast<FunctionType>(Result.getTypePtr());
}
void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
QualType ResultType) {
FD = FD->getMostRecentDecl();
while (true) {
const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI, FPT->getParamMods()));
if (FunctionDecl *Next = FD->getPreviousDecl())
FD = Next;
else
break;
}
if (ASTMutationListener *L = getASTMutationListener())
L->DeducedReturnType(FD, ResultType);
}
/// Get a function type and produce the equivalent function type with the
/// specified exception specification. Type sugar that can be present on a
/// declaration of a function with an exception specification is permitted
/// and preserved. Other type sugar (for instance, typedefs) is not.
static QualType getFunctionTypeWithExceptionSpec(
ASTContext &Context, QualType Orig,
const FunctionProtoType::ExceptionSpecInfo &ESI) {
// Might have some parens.
if (auto *PT = dyn_cast<ParenType>(Orig))
return Context.getParenType(
getFunctionTypeWithExceptionSpec(Context, PT->getInnerType(), ESI));
// Might have a calling-convention attribute.
if (auto *AT = dyn_cast<AttributedType>(Orig))
return Context.getAttributedType(
AT->getAttrKind(),
getFunctionTypeWithExceptionSpec(Context, AT->getModifiedType(), ESI),
getFunctionTypeWithExceptionSpec(Context, AT->getEquivalentType(),
ESI));
// Anything else must be a function type. Rebuild it with the new exception
// specification.
const FunctionProtoType *Proto = cast<FunctionProtoType>(Orig);
return Context.getFunctionType(
Proto->getReturnType(), Proto->getParamTypes(),
Proto->getExtProtoInfo().withExceptionSpec(ESI),
Proto->getParamMods()); // HLSL Change
}
void ASTContext::adjustExceptionSpec(
FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
bool AsWritten) {
// Update the type.
QualType Updated =
getFunctionTypeWithExceptionSpec(*this, FD->getType(), ESI);
FD->setType(Updated);
if (!AsWritten)
return;
// Update the type in the type source information too.
if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
// If the type and the type-as-written differ, we may need to update
// the type-as-written too.
if (TSInfo->getType() != FD->getType())
Updated = getFunctionTypeWithExceptionSpec(*this, TSInfo->getType(), ESI);
// FIXME: When we get proper type location information for exceptions,
// we'll also have to rebuild the TypeSourceInfo. For now, we just patch
// up the TypeSourceInfo;
assert(TypeLoc::getFullDataSizeForType(Updated) ==
TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
"TypeLoc size mismatch from updating exception specification");
TSInfo->overrideType(Updated);
}
}
/// getComplexType - Return the uniqued reference to the type for a complex
/// number with the specified element type.
QualType ASTContext::getComplexType(QualType T) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
ComplexType::Profile(ID, T);
void *InsertPos = nullptr;
if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(CT, 0);
// If the pointee type isn't canonical, this won't be a canonical type either,
// so fill in the canonical type field.
QualType Canonical;
if (!T.isCanonical()) {
Canonical = getComplexType(getCanonicalType(T));
// Get the new insert position for the node we care about.
ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
Types.push_back(New);
ComplexTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
/// getPointerType - Return the uniqued reference to the type for a pointer to
/// the specified type.
QualType ASTContext::getPointerType(QualType T) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
PointerType::Profile(ID, T);
void *InsertPos = nullptr;
if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(PT, 0);
// If the pointee type isn't canonical, this won't be a canonical type either,
// so fill in the canonical type field.
QualType Canonical;
if (!T.isCanonical()) {
Canonical = getPointerType(getCanonicalType(T));
// Get the new insert position for the node we care about.
PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical);
Types.push_back(New);
PointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
llvm::FoldingSetNodeID ID;
AdjustedType::Profile(ID, Orig, New);
void *InsertPos = nullptr;
AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
if (AT)
return QualType(AT, 0);
QualType Canonical = getCanonicalType(New);
// Get the new insert position for the node we care about.
AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!AT && "Shouldn't be in the map!");
AT = new (*this, TypeAlignment)
AdjustedType(Type::Adjusted, Orig, New, Canonical);
Types.push_back(AT);
AdjustedTypes.InsertNode(AT, InsertPos);
return QualType(AT, 0);
}
QualType ASTContext::getDecayedType(QualType T) const {
assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
if (getLangOpts().HLSL) return T; // HLSL Change - no decay for arrays (or anything for that matter)
QualType Decayed;
// C99 6.7.5.3p7:
// A declaration of a parameter as "array of type" shall be
// adjusted to "qualified pointer to type", where the type
// qualifiers (if any) are those specified within the [ and ] of
// the array type derivation.
if (T->isArrayType())
Decayed = getArrayDecayedType(T);
// C99 6.7.5.3p8:
// A declaration of a parameter as "function returning type"
// shall be adjusted to "pointer to function returning type", as
// in 6.3.2.1.
if (T->isFunctionType())
Decayed = getPointerType(T);
llvm::FoldingSetNodeID ID;
AdjustedType::Profile(ID, T, Decayed);
void *InsertPos = nullptr;
AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
if (AT)
return QualType(AT, 0);
QualType Canonical = getCanonicalType(Decayed);
// Get the new insert position for the node we care about.
AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!AT && "Shouldn't be in the map!");
AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
Types.push_back(AT);
AdjustedTypes.InsertNode(AT, InsertPos);
return QualType(AT, 0);
}
/// getBlockPointerType - Return the uniqued reference to the type for
/// a pointer to the specified block.
QualType ASTContext::getBlockPointerType(QualType T) const {
assert(T->isFunctionType() && "block of function types only");
assert(!getLangOpts().HLSL && "HLSL does not support blocks");
// Unique pointers, to guarantee there is only one block of a particular
// structure.
llvm::FoldingSetNodeID ID;
BlockPointerType::Profile(ID, T);
void *InsertPos = nullptr;
if (BlockPointerType *PT =
BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(PT, 0);
// If the block pointee type isn't canonical, this won't be a canonical
// type either so fill in the canonical type field.
QualType Canonical;
if (!T.isCanonical()) {
Canonical = getBlockPointerType(getCanonicalType(T));
// Get the new insert position for the node we care about.
BlockPointerType *NewIP =
BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
BlockPointerType *New
= new (*this, TypeAlignment) BlockPointerType(T, Canonical);
Types.push_back(New);
BlockPointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
/// getLValueReferenceType - Return the uniqued reference to the type for an
/// lvalue reference to the specified type.
QualType
ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
assert(getCanonicalType(T) != OverloadTy &&
"Unresolved overloaded function type");
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
ReferenceType::Profile(ID, T, SpelledAsLValue);
void *InsertPos = nullptr;
if (LValueReferenceType *RT =
LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(RT, 0);
const ReferenceType *InnerRef = T->getAs<ReferenceType>();
// If the referencee type isn't canonical, this won't be a canonical type
// either, so fill in the canonical type field.
QualType Canonical;
if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
// Get the new insert position for the node we care about.
LValueReferenceType *NewIP =
LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
LValueReferenceType *New
= new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
SpelledAsLValue);
Types.push_back(New);
LValueReferenceTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
/// getRValueReferenceType - Return the uniqued reference to the type for an
/// rvalue reference to the specified type.
QualType ASTContext::getRValueReferenceType(QualType T) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
ReferenceType::Profile(ID, T, false);
void *InsertPos = nullptr;
if (RValueReferenceType *RT =
RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(RT, 0);
const ReferenceType *InnerRef = T->getAs<ReferenceType>();
// If the referencee type isn't canonical, this won't be a canonical type
// either, so fill in the canonical type field.
QualType Canonical;
if (InnerRef || !T.isCanonical()) {
QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
// Get the new insert position for the node we care about.
RValueReferenceType *NewIP =
RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
RValueReferenceType *New
= new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
Types.push_back(New);
RValueReferenceTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
/// getMemberPointerType - Return the uniqued reference to the type for a
/// member pointer to the specified type, in the specified class.
QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
MemberPointerType::Profile(ID, T, Cls);
void *InsertPos = nullptr;
if (MemberPointerType *PT =
MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(PT, 0);
// If the pointee or class type isn't canonical, this won't be a canonical
// type either, so fill in the canonical type field.
QualType Canonical;
if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls));
// Get the new insert position for the node we care about.
MemberPointerType *NewIP =
MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
MemberPointerType *New
= new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
Types.push_back(New);
MemberPointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
/// getConstantArrayType - Return the unique reference to the type for an
/// array of the specified element type.
QualType ASTContext::getConstantArrayType(QualType EltTy,
const llvm::APInt &ArySizeIn,
ArrayType::ArraySizeModifier ASM,
unsigned IndexTypeQuals) const {
assert((EltTy->isDependentType() ||
EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
"Constant array of VLAs is illegal!");
// Convert the array size into a canonical width matching the pointer size for
// the target.
llvm::APInt ArySize(ArySizeIn);
ArySize =
ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy)));
llvm::FoldingSetNodeID ID;
ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
void *InsertPos = nullptr;
if (ConstantArrayType *ATP =
ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(ATP, 0);
// If the element type isn't canonical or has qualifiers, this won't
// be a canonical type either, so fill in the canonical type field.
QualType Canon;
if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(EltTy).split();
Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
ASM, IndexTypeQuals);
Canon = getQualifiedType(Canon, canonSplit.Quals);
// Get the new insert position for the node we care about.
ConstantArrayType *NewIP =
ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
ConstantArrayType *New = new(*this,TypeAlignment)
ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
ConstantArrayTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
/// getVariableArrayDecayedType - Turns the given type, which may be
/// variably-modified, into the corresponding type with all the known
/// sizes replaced with [*].
QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
// Vastly most common case.
if (!type->isVariablyModifiedType()) return type;
QualType result;
SplitQualType split = type.getSplitDesugaredType();
const Type *ty = split.Ty;
switch (ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("didn't desugar past all non-canonical types?");
// These types should never be variably-modified.
case Type::Builtin:
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
case Type::DependentSizedExtVector:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Record:
case Type::Enum:
case Type::UnresolvedUsing:
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
case Type::UnaryTransform:
case Type::DependentName:
case Type::InjectedClassName:
case Type::TemplateSpecialization:
case Type::DependentTemplateSpecialization:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
case Type::Auto:
case Type::PackExpansion:
llvm_unreachable("type should never be variably-modified");
// These types can be variably-modified but should never need to
// further decay.
case Type::FunctionNoProto:
case Type::FunctionProto:
case Type::BlockPointer:
case Type::MemberPointer:
return type;
// These types can be variably-modified. All these modifications
// preserve structure except as noted by comments.
// TODO: if we ever care about optimizing VLAs, there are no-op
// optimizations available here.
case Type::Pointer:
result = getPointerType(getVariableArrayDecayedType(
cast<PointerType>(ty)->getPointeeType()));
break;
case Type::LValueReference: {
const LValueReferenceType *lv = cast<LValueReferenceType>(ty);
result = getLValueReferenceType(
getVariableArrayDecayedType(lv->getPointeeType()),
lv->isSpelledAsLValue());
break;
}
case Type::RValueReference: {
const RValueReferenceType *lv = cast<RValueReferenceType>(ty);
result = getRValueReferenceType(
getVariableArrayDecayedType(lv->getPointeeType()));
break;
}
case Type::Atomic: {
const AtomicType *at = cast<AtomicType>(ty);
result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
break;
}
case Type::ConstantArray: {
const ConstantArrayType *cat = cast<ConstantArrayType>(ty);
result = getConstantArrayType(
getVariableArrayDecayedType(cat->getElementType()),
cat->getSize(),
cat->getSizeModifier(),
cat->getIndexTypeCVRQualifiers());
break;
}
case Type::DependentSizedArray: {
const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty);
result = getDependentSizedArrayType(
getVariableArrayDecayedType(dat->getElementType()),
dat->getSizeExpr(),
dat->getSizeModifier(),
dat->getIndexTypeCVRQualifiers(),
dat->getBracketsRange());
break;
}
// Turn incomplete types into [*] types.
case Type::IncompleteArray: {
const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty);
result = getVariableArrayType(
getVariableArrayDecayedType(iat->getElementType()),
/*size*/ nullptr,
ArrayType::Normal,
iat->getIndexTypeCVRQualifiers(),
SourceRange());
break;
}
// Turn VLA types into [*] types.
case Type::VariableArray: {
const VariableArrayType *vat = cast<VariableArrayType>(ty);
result = getVariableArrayType(
getVariableArrayDecayedType(vat->getElementType()),
/*size*/ nullptr,
ArrayType::Star,
vat->getIndexTypeCVRQualifiers(),
vat->getBracketsRange());
break;
}
}
// Apply the top-level qualifiers from the original.
return getQualifiedType(result, split.Quals);
}
/// getVariableArrayType - Returns a non-unique reference to the type for a
/// variable array of the specified element type.
QualType ASTContext::getVariableArrayType(QualType EltTy,
Expr *NumElts,
ArrayType::ArraySizeModifier ASM,
unsigned IndexTypeQuals,
SourceRange Brackets) const {
// Since we don't unique expressions, it isn't possible to unique VLA's
// that have an expression provided for their size.
QualType Canon;
// Be sure to pull qualifiers off the element type.
if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(EltTy).split();
Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
IndexTypeQuals, Brackets);
Canon = getQualifiedType(Canon, canonSplit.Quals);
}
VariableArrayType *New = new(*this, TypeAlignment)
VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
VariableArrayTypes.push_back(New);
Types.push_back(New);
return QualType(New, 0);
}
/// getDependentSizedArrayType - Returns a non-unique reference to
/// the type for a dependently-sized array of the specified element
/// type.
QualType ASTContext::getDependentSizedArrayType(QualType elementType,
Expr *numElements,
ArrayType::ArraySizeModifier ASM,
unsigned elementTypeQuals,
SourceRange brackets) const {
assert((!numElements || numElements->isTypeDependent() ||
numElements->isValueDependent()) &&
"Size must be type- or value-dependent!");
// Dependently-sized array types that do not have a specified number
// of elements will have their sizes deduced from a dependent
// initializer. We do no canonicalization here at all, which is okay
// because they can't be used in most locations.
if (!numElements) {
DependentSizedArrayType *newType
= new (*this, TypeAlignment)
DependentSizedArrayType(*this, elementType, QualType(),
numElements, ASM, elementTypeQuals,
brackets);
Types.push_back(newType);
return QualType(newType, 0);
}
// Otherwise, we actually build a new type every time, but we
// also build a canonical type.
SplitQualType canonElementType = getCanonicalType(elementType).split();
void *insertPos = nullptr;
llvm::FoldingSetNodeID ID;
DependentSizedArrayType::Profile(ID, *this,
QualType(canonElementType.Ty, 0),
ASM, elementTypeQuals, numElements);
// Look for an existing type with these properties.
DependentSizedArrayType *canonTy =
DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
// If we don't have one, build one.
if (!canonTy) {
canonTy = new (*this, TypeAlignment)
DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
QualType(), numElements, ASM, elementTypeQuals,
brackets);
DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
Types.push_back(canonTy);
}
// Apply qualifiers from the element type to the array.
QualType canon = getQualifiedType(QualType(canonTy,0),
canonElementType.Quals);
// If we didn't need extra canonicalization for the element type,
// then just use that as our result.
if (QualType(canonElementType.Ty, 0) == elementType)
return canon;
// Otherwise, we need to build a type which follows the spelling
// of the element type.
DependentSizedArrayType *sugaredType
= new (*this, TypeAlignment)
DependentSizedArrayType(*this, elementType, canon, numElements,
ASM, elementTypeQuals, brackets);
Types.push_back(sugaredType);
return QualType(sugaredType, 0);
}
QualType ASTContext::getIncompleteArrayType(QualType elementType,
ArrayType::ArraySizeModifier ASM,
unsigned elementTypeQuals) const {
llvm::FoldingSetNodeID ID;
IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
void *insertPos = nullptr;
if (IncompleteArrayType *iat =
IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
return QualType(iat, 0);
// If the element type isn't canonical, this won't be a canonical type
// either, so fill in the canonical type field. We also have to pull
// qualifiers off the element type.
QualType canon;
if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(elementType).split();
canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
ASM, elementTypeQuals);
canon = getQualifiedType(canon, canonSplit.Quals);
// Get the new insert position for the node we care about.
IncompleteArrayType *existing =
IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
assert(!existing && "Shouldn't be in the map!"); (void) existing;
}
IncompleteArrayType *newType = new (*this, TypeAlignment)
IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
IncompleteArrayTypes.InsertNode(newType, insertPos);
Types.push_back(newType);
return QualType(newType, 0);
}
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
VectorType::VectorKind VecKind) const {
assert(vecType->isBuiltinType());
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
void *InsertPos = nullptr;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
// If the element type isn't canonical, this won't be a canonical type either,
// so fill in the canonical type field.
QualType Canonical;
if (!vecType.isCanonical()) {
Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
// Get the new insert position for the node we care about.
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
VectorType *New = new (*this, TypeAlignment)
VectorType(vecType, NumElts, Canonical, VecKind);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
/// getExtVectorType - Return the unique reference to an extended vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType
ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
assert(vecType->isBuiltinType() || vecType->isDependentType());
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
VectorType::GenericVector);
void *InsertPos = nullptr;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
// If the element type isn't canonical, this won't be a canonical type either,
// so fill in the canonical type field.
QualType Canonical;
if (!vecType.isCanonical()) {
Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
// Get the new insert position for the node we care about.
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
ExtVectorType *New = new (*this, TypeAlignment)
ExtVectorType(vecType, NumElts, Canonical);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
QualType
ASTContext::getDependentSizedExtVectorType(QualType vecType,
Expr *SizeExpr,
SourceLocation AttrLoc) const {
llvm::FoldingSetNodeID ID;
DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType),
SizeExpr);
void *InsertPos = nullptr;
DependentSizedExtVectorType *Canon
= DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
DependentSizedExtVectorType *New;
if (Canon) {
// We already have a canonical version of this array type; use it as
// the canonical type for a newly-built type.
New = new (*this, TypeAlignment)
DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
SizeExpr, AttrLoc);
} else {
QualType CanonVecTy = getCanonicalType(vecType);
if (CanonVecTy == vecType) {
New = new (*this, TypeAlignment)
DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
AttrLoc);
DependentSizedExtVectorType *CanonCheck
= DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
(void)CanonCheck;
DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
} else {
QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
SourceLocation());
New = new (*this, TypeAlignment)
DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
}
}
Types.push_back(New);
return QualType(New, 0);
}
/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
///
QualType
ASTContext::getFunctionNoProtoType(QualType ResultTy,
const FunctionType::ExtInfo &Info) const {
const CallingConv CallConv = Info.getCC();
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
FunctionNoProtoType::Profile(ID, ResultTy, Info);
void *InsertPos = nullptr;
if (FunctionNoProtoType *FT =
FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(FT, 0);
QualType Canonical;
if (!ResultTy.isCanonical()) {
Canonical = getFunctionNoProtoType(getCanonicalType(ResultTy), Info);
// Get the new insert position for the node we care about.
FunctionNoProtoType *NewIP =
FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
FunctionProtoType::ExtInfo newInfo = Info.withCallingConv(CallConv);
FunctionNoProtoType *New = new (*this, TypeAlignment)
FunctionNoProtoType(ResultTy, Canonical, newInfo);
Types.push_back(New);
FunctionNoProtoTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
/// \brief Determine whether \p T is canonical as the result type of a function.
static bool isCanonicalResultType(QualType T) {
return T.isCanonical() &&
(T.getObjCLifetime() == Qualifiers::OCL_None ||
T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
}
QualType
ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
const FunctionProtoType::ExtProtoInfo &EPI,
ArrayRef<hlsl::ParameterModifier> ParamMods) const { // HLSL Change - param mods
size_t NumArgs = ArgArray.size();
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, ParamMods, // HLSL Change - param mods
*this);
void *InsertPos = nullptr;
if (FunctionProtoType *FTP =
FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(FTP, 0);
// Determine whether the type being created is already canonical or not.
bool isCanonical =
EPI.ExceptionSpec.Type == EST_None && isCanonicalResultType(ResultTy) &&
!EPI.HasTrailingReturn;
for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
if (!ArgArray[i].isCanonicalAsParam() &&
// HLSL Change Begins.
// Don't decay array to pointer.
// But do need to get canonical version of array.
!(getLangOpts().HLSL && ArgArray[i]->isArrayType() &&
ArgArray[i].isCanonical()))
// HLSL Change Ends.
isCanonical = false;
// If this type isn't canonical, get the canonical version of it.
// The exception spec is not part of the canonical type.
QualType Canonical;
if (!isCanonical) {
SmallVector<QualType, 16> CanonicalArgs;
CanonicalArgs.reserve(NumArgs);
for (unsigned i = 0; i != NumArgs; ++i)
CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
CanonicalEPI.HasTrailingReturn = false;
CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
// Result types do not have ARC lifetime qualifiers.
QualType CanResultTy = getCanonicalType(ResultTy);
if (ResultTy.getQualifiers().hasObjCLifetime()) {
Qualifiers Qs = CanResultTy.getQualifiers();
Qs.removeObjCLifetime();
CanResultTy = getQualifiedType(CanResultTy.getUnqualifiedType(), Qs);
}
Canonical = getFunctionType(CanResultTy, CanonicalArgs, CanonicalEPI, ParamMods); // HLSL Change - param mods
// Get the new insert position for the node we care about.
FunctionProtoType *NewIP =
FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
// FunctionProtoType objects are allocated with extra bytes after
// them for three variable size arrays at the end:
// - parameter types
// - exception types
// - consumed-arguments flags
// Instead of the exception types, there could be a noexcept
// expression, or information used to resolve the exception
// specification.
size_t Size = sizeof(FunctionProtoType) +
NumArgs * sizeof(QualType);
if (EPI.ExceptionSpec.Type == EST_Dynamic) {
Size += EPI.ExceptionSpec.Exceptions.size() * sizeof(QualType);
} else if (EPI.ExceptionSpec.Type == EST_ComputedNoexcept) {
Size += sizeof(Expr*);
} else if (EPI.ExceptionSpec.Type == EST_Uninstantiated) {
Size += 2 * sizeof(FunctionDecl*);
} else if (EPI.ExceptionSpec.Type == EST_Unevaluated) {
Size += sizeof(FunctionDecl*);
}
// HLSL Change Starts
if (getLangOpts().HLSL) {
assert((Size == sizeof(FunctionProtoType) + NumArgs * sizeof(QualType)) && "else exception spec was added");
Size += NumArgs * sizeof(hlsl::ParameterModifier);
}
// HLSL Change Ends
if (EPI.ConsumedParameters)
Size += NumArgs * sizeof(bool);
FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
FunctionProtoType::ExtProtoInfo newEPI = EPI;
new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI, ParamMods); // HLSL - param mods
Types.push_back(FTP);
FunctionProtoTypes.InsertNode(FTP, InsertPos);
return QualType(FTP, 0);
}
#ifndef NDEBUG
static bool NeedsInjectedClassNameType(const RecordDecl *D) {
if (!isa<CXXRecordDecl>(D)) return false;
const CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
if (isa<ClassTemplatePartialSpecializationDecl>(RD))
return true;
if (RD->getDescribedClassTemplate() &&
!isa<ClassTemplateSpecializationDecl>(RD))
return true;
return false;
}
#endif
/// getInjectedClassNameType - Return the unique reference to the
/// injected class name type for the specified templated declaration.
QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
QualType TST) const {
assert(NeedsInjectedClassNameType(Decl));
if (Decl->TypeForDecl) {
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
} else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
assert(PrevDecl->TypeForDecl && "previous declaration has no type");
Decl->TypeForDecl = PrevDecl->TypeForDecl;
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
} else {
Type *newType =
new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
Decl->TypeForDecl = newType;
Types.push_back(newType);
}
return QualType(Decl->TypeForDecl, 0);
}
/// getTypeDeclType - Return the unique reference to the type for the
/// specified type declaration.
QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
assert(Decl && "Passed null for Decl param");
assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl))
return getTypedefType(Typedef);
assert(!isa<TemplateTypeParmDecl>(Decl) &&
"Template type parameter types are always available.");
if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
assert(Record->isFirstDecl() && "struct/union has previous declaration");
assert(!NeedsInjectedClassNameType(Record));
return getRecordType(Record);
} else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
assert(Enum->isFirstDecl() && "enum has previous declaration");
return getEnumType(Enum);
} else if (const UnresolvedUsingTypenameDecl *Using =
dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
Decl->TypeForDecl = newType;
Types.push_back(newType);
} else
llvm_unreachable("TypeDecl without a type?");
return QualType(Decl->TypeForDecl, 0);
}
/// getTypedefType - Return the unique reference to the type for the
/// specified typedef name decl.
QualType
ASTContext::getTypedefType(const TypedefNameDecl *Decl,
QualType Canonical) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
if (Canonical.isNull())
Canonical = getCanonicalType(Decl->getUnderlyingType());
TypedefType *newType = new(*this, TypeAlignment)
TypedefType(Type::Typedef, Decl, Canonical);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
}
QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
RecordType *newType = new (*this, TypeAlignment) RecordType(Decl);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
}
QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
EnumType *newType = new (*this, TypeAlignment) EnumType(Decl);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
}
QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
QualType modifiedType,
QualType equivalentType) {
llvm::FoldingSetNodeID id;
AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
void *insertPos = nullptr;
AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
if (type) return QualType(type, 0);
QualType canon = getCanonicalType(equivalentType);
type = new (*this, TypeAlignment)
AttributedType(canon, attrKind, modifiedType, equivalentType);
Types.push_back(type);
AttributedTypes.InsertNode(type, insertPos);
return QualType(type, 0);
}
/// \brief Retrieve a substitution-result type.
QualType
ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
QualType Replacement) const {
assert(Replacement.isCanonical()
&& "replacement types must always be canonical");
llvm::FoldingSetNodeID ID;
SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
void *InsertPos = nullptr;
SubstTemplateTypeParmType *SubstParm
= SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
if (!SubstParm) {
SubstParm = new (*this, TypeAlignment)
SubstTemplateTypeParmType(Parm, Replacement);
Types.push_back(SubstParm);
SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
}
return QualType(SubstParm, 0);
}
/// \brief Retrieve a
QualType ASTContext::getSubstTemplateTypeParmPackType(
const TemplateTypeParmType *Parm,
const TemplateArgument &ArgPack) {
#ifndef NDEBUG
for (const auto &P : ArgPack.pack_elements()) {
assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
}
#endif
llvm::FoldingSetNodeID ID;
SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
void *InsertPos = nullptr;
if (SubstTemplateTypeParmPackType *SubstParm
= SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(SubstParm, 0);
QualType Canon;
if (!Parm->isCanonicalUnqualified()) {
Canon = getCanonicalType(QualType(Parm, 0));
Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
ArgPack);
SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
}
SubstTemplateTypeParmPackType *SubstParm
= new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
ArgPack);
Types.push_back(SubstParm);
SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
return QualType(SubstParm, 0);
}
/// \brief Retrieve the template type parameter type for a template
/// parameter or parameter pack with the given depth, index, and (optionally)
/// name.
QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
bool ParameterPack,
TemplateTypeParmDecl *TTPDecl) const {
llvm::FoldingSetNodeID ID;
TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
void *InsertPos = nullptr;
TemplateTypeParmType *TypeParm
= TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
if (TypeParm)
return QualType(TypeParm, 0);
if (TTPDecl) {
QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
TemplateTypeParmType *TypeCheck
= TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!TypeCheck && "Template type parameter canonical type broken");
(void)TypeCheck;
} else
TypeParm = new (*this, TypeAlignment)
TemplateTypeParmType(Depth, Index, ParameterPack);
Types.push_back(TypeParm);
TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
return QualType(TypeParm, 0);
}
TypeSourceInfo *
ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
SourceLocation NameLoc,
const TemplateArgumentListInfo &Args,
QualType Underlying) const {
assert(!Name.getAsDependentTemplateName() &&
"No dependent template names here!");
QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
TemplateSpecializationTypeLoc TL =
DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
TL.setTemplateKeywordLoc(SourceLocation());
TL.setTemplateNameLoc(NameLoc);
TL.setLAngleLoc(Args.getLAngleLoc());
TL.setRAngleLoc(Args.getRAngleLoc());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
TL.setArgLocInfo(i, Args[i].getLocInfo());
return DI;
}
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgumentListInfo &Args,
QualType Underlying) const {
assert(!Template.getAsDependentTemplateName() &&
"No dependent template names here!");
unsigned NumArgs = Args.size();
SmallVector<TemplateArgument, 4> ArgVec;
ArgVec.reserve(NumArgs);
for (unsigned i = 0; i != NumArgs; ++i)
ArgVec.push_back(Args[i].getArgument());
return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs,
Underlying);
}
#ifndef NDEBUG
static bool hasAnyPackExpansions(const TemplateArgument *Args,
unsigned NumArgs) {
for (unsigned I = 0; I != NumArgs; ++I)
if (Args[I].isPackExpansion())
return true;
return true;
}
#endif
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgument *Args,
unsigned NumArgs,
QualType Underlying) const {
assert(!Template.getAsDependentTemplateName() &&
"No dependent template names here!");
// Look through qualified template names.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Template = TemplateName(QTN->getTemplateDecl());
bool IsTypeAlias =
Template.getAsTemplateDecl() &&
isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
QualType CanonType;
if (!Underlying.isNull())
CanonType = getCanonicalType(Underlying);
else {
// We can get here with an alias template when the specialization contains
// a pack expansion that does not match up with a parameter pack.
assert((!IsTypeAlias || hasAnyPackExpansions(Args, NumArgs)) &&
"Caller must compute aliased type");
IsTypeAlias = false;
CanonType = getCanonicalTemplateSpecializationType(Template, Args,
NumArgs);
}
// Allocate the (non-canonical) template specialization type, but don't
// try to unique it: these types typically have location information that
// we don't unique and don't want to lose.
void *Mem = Allocate(sizeof(TemplateSpecializationType) +
sizeof(TemplateArgument) * NumArgs +
(IsTypeAlias? sizeof(QualType) : 0),
TypeAlignment);
TemplateSpecializationType *Spec
= new (Mem) TemplateSpecializationType(Template, Args, NumArgs, CanonType,
IsTypeAlias ? Underlying : QualType());
Types.push_back(Spec);
return QualType(Spec, 0);
}
QualType
ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
const TemplateArgument *Args,
unsigned NumArgs) const {
assert(!Template.getAsDependentTemplateName() &&
"No dependent template names here!");
// Look through qualified template names.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Template = TemplateName(QTN->getTemplateDecl());
// Build the canonical template specialization type.
TemplateName CanonTemplate = getCanonicalTemplateName(Template);
SmallVector<TemplateArgument, 4> CanonArgs;
CanonArgs.reserve(NumArgs);
for (unsigned I = 0; I != NumArgs; ++I)
CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
// Determine whether this canonical template specialization type already
// exists.
llvm::FoldingSetNodeID ID;
TemplateSpecializationType::Profile(ID, CanonTemplate,
CanonArgs.data(), NumArgs, *this);
void *InsertPos = nullptr;
TemplateSpecializationType *Spec
= TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
if (!Spec) {
// Allocate a new canonical template specialization type.
void *Mem = Allocate((sizeof(TemplateSpecializationType) +
sizeof(TemplateArgument) * NumArgs),
TypeAlignment);
Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
CanonArgs.data(), NumArgs,
QualType(), QualType());
Types.push_back(Spec);
TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
}
assert(Spec->isDependentType() &&
"Non-dependent template-id type must have a canonical type");
return QualType(Spec, 0);
}
QualType
ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
QualType NamedType) const {
llvm::FoldingSetNodeID ID;
ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
void *InsertPos = nullptr;
ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
QualType Canon = NamedType;
if (!Canon.isCanonical()) {
Canon = getCanonicalType(NamedType);
ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!CheckT && "Elaborated canonical type broken");
(void)CheckT;
}
T = new (*this, TypeAlignment) ElaboratedType(Keyword, NNS, NamedType, Canon);
Types.push_back(T);
ElaboratedTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
QualType
ASTContext::getParenType(QualType InnerType) const {
llvm::FoldingSetNodeID ID;
ParenType::Profile(ID, InnerType);
void *InsertPos = nullptr;
ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
QualType Canon = InnerType;
if (!Canon.isCanonical()) {
Canon = getCanonicalType(InnerType);
ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!CheckT && "Paren canonical type broken");
(void)CheckT;
}
T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
Types.push_back(T);
ParenTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
QualType Canon) const {
if (Canon.isNull()) {
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
ElaboratedTypeKeyword CanonKeyword = Keyword;
if (Keyword == ETK_None)
CanonKeyword = ETK_Typename;
if (CanonNNS != NNS || CanonKeyword != Keyword)
Canon = getDependentNameType(CanonKeyword, CanonNNS, Name);
}
llvm::FoldingSetNodeID ID;
DependentNameType::Profile(ID, Keyword, NNS, Name);
void *InsertPos = nullptr;
DependentNameType *T
= DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
Types.push_back(T);
DependentNameTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
QualType
ASTContext::getDependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
const TemplateArgumentListInfo &Args) const {
// TODO: avoid this copy
SmallVector<TemplateArgument, 16> ArgCopy;
for (unsigned I = 0, E = Args.size(); I != E; ++I)
ArgCopy.push_back(Args[I].getArgument());
return getDependentTemplateSpecializationType(Keyword, NNS, Name,
ArgCopy.size(),
ArgCopy.data());
}
QualType
ASTContext::getDependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
const IdentifierInfo *Name,
unsigned NumArgs,
const TemplateArgument *Args) const {
assert((!NNS || NNS->isDependent()) &&
"nested-name-specifier must be dependent");
llvm::FoldingSetNodeID ID;
DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
Name, NumArgs, Args);
void *InsertPos = nullptr;
DependentTemplateSpecializationType *T
= DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
ElaboratedTypeKeyword CanonKeyword = Keyword;
if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
bool AnyNonCanonArgs = false;
SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
for (unsigned I = 0; I != NumArgs; ++I) {
CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
if (!CanonArgs[I].structurallyEquals(Args[I]))
AnyNonCanonArgs = true;
}
QualType Canon;
if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
Name, NumArgs,
CanonArgs.data());
// Find the insert position again.
DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
}
void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
sizeof(TemplateArgument) * NumArgs),
TypeAlignment);
T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
Name, NumArgs, Args, Canon);
Types.push_back(T);
DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
QualType ASTContext::getPackExpansionType(QualType Pattern,
Optional<unsigned> NumExpansions) {
llvm::FoldingSetNodeID ID;
PackExpansionType::Profile(ID, Pattern, NumExpansions);
assert(Pattern->containsUnexpandedParameterPack() &&
"Pack expansions must expand one or more parameter packs");
void *InsertPos = nullptr;
PackExpansionType *T
= PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
QualType Canon;
if (!Pattern.isCanonical()) {
Canon = getCanonicalType(Pattern);
// The canonical type might not contain an unexpanded parameter pack, if it
// contains an alias template specialization which ignores one of its
// parameters.
if (Canon->containsUnexpandedParameterPack()) {
Canon = getPackExpansionType(Canon, NumExpansions);
// Find the insert position again, in case we inserted an element into
// PackExpansionTypes and invalidated our insert position.
PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
}
}
T = new (*this, TypeAlignment)
PackExpansionType(Pattern, Canon, NumExpansions);
Types.push_back(T);
PackExpansionTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
/// CmpProtocolNames - Comparison predicate for sorting protocols
/// alphabetically.
static int __cdecl CmpProtocolNames(ObjCProtocolDecl *const *LHS, // HLSL Change - __cdecl
ObjCProtocolDecl *const *RHS) {
return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
}
static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols) {
if (NumProtocols == 0) return true;
if (Protocols[0]->getCanonicalDecl() != Protocols[0])
return false;
for (unsigned i = 1; i != NumProtocols; ++i)
if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
Protocols[i]->getCanonicalDecl() != Protocols[i])
return false;
return true;
}
static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols,
unsigned &NumProtocols) {
ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols;
// Sort protocols, keyed by name.
llvm::array_pod_sort(Protocols, ProtocolsEnd, CmpProtocolNames);
// Canonicalize.
for (unsigned I = 0, N = NumProtocols; I != N; ++I)
Protocols[I] = Protocols[I]->getCanonicalDecl();
// Remove duplicates.
ProtocolsEnd = std::unique(Protocols, ProtocolsEnd);
NumProtocols = ProtocolsEnd-Protocols;
}
QualType ASTContext::getObjCObjectType(QualType BaseType,
ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols) const {
return getObjCObjectType(BaseType, { },
llvm::makeArrayRef(Protocols, NumProtocols),
/*isKindOf=*/false);
}
QualType ASTContext::getObjCObjectType(
QualType baseType,
ArrayRef<QualType> typeArgs,
ArrayRef<ObjCProtocolDecl *> protocols,
bool isKindOf) const {
// If the base type is an interface and there aren't any protocols or
// type arguments to add, then the interface type will do just fine.
if (typeArgs.empty() && protocols.empty() && !isKindOf &&
isa<ObjCInterfaceType>(baseType))
return baseType;
// Look in the folding set for an existing type.
llvm::FoldingSetNodeID ID;
ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf);
void *InsertPos = nullptr;
if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(QT, 0);
// Determine the type arguments to be used for canonicalization,
// which may be explicitly specified here or written on the base
// type.
ArrayRef<QualType> effectiveTypeArgs = typeArgs;
if (effectiveTypeArgs.empty()) {
if (auto baseObject = baseType->getAs<ObjCObjectType>())
effectiveTypeArgs = baseObject->getTypeArgs();
}
// Build the canonical type, which has the canonical base type and a
// sorted-and-uniqued list of protocols and the type arguments
// canonicalized.
QualType canonical;
bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
effectiveTypeArgs.end(),
[&](QualType type) {
return type.isCanonical();
});
bool protocolsSorted = areSortedAndUniqued(protocols.data(),
protocols.size());
if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
// Determine the canonical type arguments.
ArrayRef<QualType> canonTypeArgs;
SmallVector<QualType, 4> canonTypeArgsVec;
if (!typeArgsAreCanonical) {
canonTypeArgsVec.reserve(effectiveTypeArgs.size());
for (auto typeArg : effectiveTypeArgs)
canonTypeArgsVec.push_back(getCanonicalType(typeArg));
canonTypeArgs = canonTypeArgsVec;
} else {
canonTypeArgs = effectiveTypeArgs;
}
ArrayRef<ObjCProtocolDecl *> canonProtocols;
SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
if (!protocolsSorted) {
canonProtocolsVec.insert(canonProtocolsVec.begin(),
protocols.begin(),
protocols.end());
unsigned uniqueCount = protocols.size();
SortAndUniqueProtocols(&canonProtocolsVec[0], uniqueCount);
canonProtocols = llvm::makeArrayRef(&canonProtocolsVec[0], uniqueCount);
} else {
canonProtocols = protocols;
}
canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs,
canonProtocols, isKindOf);
// Regenerate InsertPos.
ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
}
unsigned size = sizeof(ObjCObjectTypeImpl);
size += typeArgs.size() * sizeof(QualType);
size += protocols.size() * sizeof(ObjCProtocolDecl *);
void *mem = Allocate(size, TypeAlignment);
ObjCObjectTypeImpl *T =
new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
isKindOf);
Types.push_back(T);
ObjCObjectTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
/// protocol list adopt all protocols in QT's qualified-id protocol
/// list.
bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
ObjCInterfaceDecl *IC) {
if (!QT->isObjCQualifiedIdType())
return false;
if (const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>()) {
// If both the right and left sides have qualifiers.
for (auto *Proto : OPT->quals()) {
if (!IC->ClassImplementsProtocol(Proto, false))
return false;
}
return true;
}
return false;
}
/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
/// QT's qualified-id protocol list adopt all protocols in IDecl's list
/// of protocols.
bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
ObjCInterfaceDecl *IDecl) {
if (!QT->isObjCQualifiedIdType())
return false;
const ObjCObjectPointerType *OPT = QT->getAs<ObjCObjectPointerType>();
if (!OPT)
return false;
if (!IDecl->hasDefinition())
return false;
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
CollectInheritedProtocols(IDecl, InheritedProtocols);
if (InheritedProtocols.empty())
return false;
// Check that if every protocol in list of id<plist> conforms to a protcol
// of IDecl's, then bridge casting is ok.
bool Conforms = false;
for (auto *Proto : OPT->quals()) {
Conforms = false;
for (auto *PI : InheritedProtocols) {
if (ProtocolCompatibleWithProtocol(Proto, PI)) {
Conforms = true;
break;
}
}
if (!Conforms)
break;
}
if (Conforms)
return true;
for (auto *PI : InheritedProtocols) {
// If both the right and left sides have qualifiers.
bool Adopts = false;
for (auto *Proto : OPT->quals()) {
// return 'true' if 'PI' is in the inheritance hierarchy of Proto
if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
break;
}
if (!Adopts)
return false;
}
return true;
}
/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
/// the given object type.
QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
llvm::FoldingSetNodeID ID;
ObjCObjectPointerType::Profile(ID, ObjectT);
void *InsertPos = nullptr;
if (ObjCObjectPointerType *QT =
ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(QT, 0);
// Find the canonical object type.
QualType Canonical;
if (!ObjectT.isCanonical()) {
Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
// Regenerate InsertPos.
ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
}
// No match.
void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
ObjCObjectPointerType *QType =
new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
Types.push_back(QType);
ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
return QualType(QType, 0);
}
/// getObjCInterfaceType - Return the unique reference to the type for the
/// specified ObjC interface decl. The list of protocols is optional.
QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
ObjCInterfaceDecl *PrevDecl) const {
if (Decl->TypeForDecl)
return QualType(Decl->TypeForDecl, 0);
if (PrevDecl) {
assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
Decl->TypeForDecl = PrevDecl->TypeForDecl;
return QualType(PrevDecl->TypeForDecl, 0);
}
// Prefer the definition, if there is one.
if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
Decl = Def;
void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
Decl->TypeForDecl = T;
Types.push_back(T);
return QualType(T, 0);
}
/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
/// TypeOfExprType AST's (since expression's are never shared). For example,
/// multiple declarations that refer to "typeof(x)" all contain different
/// DeclRefExpr's. This doesn't effect the type checker, since it operates
/// on canonical type's (which are always unique).
QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
TypeOfExprType *toe;
if (tofExpr->isTypeDependent()) {
llvm::FoldingSetNodeID ID;
DependentTypeOfExprType::Profile(ID, *this, tofExpr);
void *InsertPos = nullptr;
DependentTypeOfExprType *Canon
= DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
if (Canon) {
// We already have a "canonical" version of an identical, dependent
// typeof(expr) type. Use that as our canonical type.
toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
QualType((TypeOfExprType*)Canon, 0));
} else {
// Build a new, canonical typeof(expr) type.
Canon
= new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
toe = Canon;
}
} else {
QualType Canonical = getCanonicalType(tofExpr->getType());
toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
}
Types.push_back(toe);
return QualType(toe, 0);
}
/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
/// TypeOfType nodes. The only motivation to unique these nodes would be
/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
/// an issue. This doesn't affect the type checker, since it operates
/// on canonical types (which are always unique).
QualType ASTContext::getTypeOfType(QualType tofType) const {
QualType Canonical = getCanonicalType(tofType);
TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
Types.push_back(tot);
return QualType(tot, 0);
}
/// \brief Unlike many "get<Type>" functions, we don't unique DecltypeType
/// nodes. This would never be helpful, since each such type has its own
/// expression, and would not give a significant memory saving, since there
/// is an Expr tree under each such type.
QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
DecltypeType *dt;
// C++11 [temp.type]p2:
// If an expression e involves a template parameter, decltype(e) denotes a
// unique dependent type. Two such decltype-specifiers refer to the same
// type only if their expressions are equivalent (14.5.6.1).
if (e->isInstantiationDependent()) {
llvm::FoldingSetNodeID ID;
DependentDecltypeType::Profile(ID, *this, e);
void *InsertPos = nullptr;
DependentDecltypeType *Canon
= DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
if (!Canon) {
// Build a new, canonical typeof(expr) type.
Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
DependentDecltypeTypes.InsertNode(Canon, InsertPos);
}
dt = new (*this, TypeAlignment)
DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
} else {
dt = new (*this, TypeAlignment)
DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
}
Types.push_back(dt);
return QualType(dt, 0);
}
/// getUnaryTransformationType - We don't unique these, since the memory
/// savings are minimal and these are rare.
QualType ASTContext::getUnaryTransformType(QualType BaseType,
QualType UnderlyingType,
UnaryTransformType::UTTKind Kind)
const {
UnaryTransformType *Ty =
new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType,
Kind,
UnderlyingType->isDependentType() ?
QualType() : getCanonicalType(UnderlyingType));
Types.push_back(Ty);
return QualType(Ty, 0);
}
/// getAutoType - Return the uniqued reference to the 'auto' type which has been
/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
/// canonical deduced-but-dependent 'auto' type.
QualType ASTContext::getAutoType(QualType DeducedType, bool IsDecltypeAuto,
bool IsDependent) const {
if (DeducedType.isNull() && !IsDecltypeAuto && !IsDependent)
return getAutoDeductType();
// Look in the folding set for an existing type.
void *InsertPos = nullptr;
llvm::FoldingSetNodeID ID;
AutoType::Profile(ID, DeducedType, IsDecltypeAuto, IsDependent);
if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType,
IsDecltypeAuto,
IsDependent);
Types.push_back(AT);
if (InsertPos)
AutoTypes.InsertNode(AT, InsertPos);
return QualType(AT, 0);
}
/// getAtomicType - Return the uniqued reference to the atomic type for
/// the given value type.
QualType ASTContext::getAtomicType(QualType T) const {
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
AtomicType::Profile(ID, T);
void *InsertPos = nullptr;
if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
// If the atomic value type isn't canonical, this won't be a canonical type
// either, so fill in the canonical type field.
QualType Canonical;
if (!T.isCanonical()) {
Canonical = getAtomicType(getCanonicalType(T));
// Get the new insert position for the node we care about.
AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
Types.push_back(New);
AtomicTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
/// getAutoDeductType - Get type pattern for deducing against 'auto'.
QualType ASTContext::getAutoDeductType() const {
if (AutoDeductTy.isNull())
AutoDeductTy = QualType(
new (*this, TypeAlignment) AutoType(QualType(), /*decltype(auto)*/false,
/*dependent*/false),
0);
return AutoDeductTy;
}
/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
QualType ASTContext::getAutoRRefDeductType() const {
if (AutoRRefDeductTy.isNull())
AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType());
assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
return AutoRRefDeductTy;
}
/// getTagDeclType - Return the unique reference to the type for the
/// specified TagDecl (struct/union/class/enum) decl.
QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
assert (Decl);
// FIXME: What is the design on getTagDeclType when it requires casting
// away const? mutable?
return getTypeDeclType(const_cast<TagDecl*>(Decl));
}
/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
/// needs to agree with the definition in <stddef.h>.
CanQualType ASTContext::getSizeType() const {
return getFromTargetType(Target->getSizeType());
}
/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
CanQualType ASTContext::getIntMaxType() const {
return getFromTargetType(Target->getIntMaxType());
}
/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
CanQualType ASTContext::getUIntMaxType() const {
return getFromTargetType(Target->getUIntMaxType());
}
/// getSignedWCharType - Return the type of "signed wchar_t".
/// Used when in C++, as a GCC extension.
QualType ASTContext::getSignedWCharType() const {
// FIXME: derive from "Target" ?
return WCharTy;
}
/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
/// Used when in C++, as a GCC extension.
QualType ASTContext::getUnsignedWCharType() const {
// FIXME: derive from "Target" ?
return UnsignedIntTy;
}
QualType ASTContext::getIntPtrType() const {
return getFromTargetType(Target->getIntPtrType());
}
QualType ASTContext::getUIntPtrType() const {
return getCorrespondingUnsignedType(getIntPtrType());
}
/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
QualType ASTContext::getPointerDiffType() const {
return getFromTargetType(Target->getPtrDiffType(0));
}
/// \brief Return the unique type for "pid_t" defined in
/// <sys/types.h>. We need this to compute the correct type for vfork().
QualType ASTContext::getProcessIDType() const {
return getFromTargetType(Target->getProcessIDType());
}
//===----------------------------------------------------------------------===//
// Type Operators
//===----------------------------------------------------------------------===//
CanQualType ASTContext::getCanonicalParamType(QualType T) const {
// Push qualifiers into arrays, and then discard any remaining
// qualifiers.
T = getCanonicalType(T);
T = getVariableArrayDecayedType(T);
const Type *Ty = T.getTypePtr();
QualType Result;
if (isa<ArrayType>(Ty)) {
// HLSL Change Starts
if (getLangOpts().HLSL)
Result = QualType(Ty, 0);
else
// HLSL Change Ends
Result = getArrayDecayedType(QualType(Ty, 0));
} else if (isa<FunctionType>(Ty)) {
Result = getPointerType(QualType(Ty, 0));
} else {
Result = QualType(Ty, 0);
}
return CanQualType::CreateUnsafe(Result);
}
QualType ASTContext::getUnqualifiedArrayType(QualType type,
Qualifiers &quals) {
SplitQualType splitType = type.getSplitUnqualifiedType();
// FIXME: getSplitUnqualifiedType() actually walks all the way to
// the unqualified desugared type and then drops it on the floor.
// We then have to strip that sugar back off with
// getUnqualifiedDesugaredType(), which is silly.
const ArrayType *AT =
dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
// If we don't have an array, just use the results in splitType.
if (!AT) {
quals = splitType.Quals;
return QualType(splitType.Ty, 0);
}
// Otherwise, recurse on the array's element type.
QualType elementType = AT->getElementType();
QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
// If that didn't change the element type, AT has no qualifiers, so we
// can just use the results in splitType.
if (elementType == unqualElementType) {
assert(quals.empty()); // from the recursive call
quals = splitType.Quals;
return QualType(splitType.Ty, 0);
}
// Otherwise, add in the qualifiers from the outermost type, then
// build the type back up.
quals.addConsistentQualifiers(splitType.Quals);
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
return getConstantArrayType(unqualElementType, CAT->getSize(),
CAT->getSizeModifier(), 0);
}
if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
}
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
return getVariableArrayType(unqualElementType,
VAT->getSizeExpr(),
VAT->getSizeModifier(),
VAT->getIndexTypeCVRQualifiers(),
VAT->getBracketsRange());
}
const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
DSAT->getSizeModifier(), 0,
SourceRange());
}
/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
/// they point to and return true. If T1 and T2 aren't pointer types
/// or pointer-to-member types, or if they are not similar at this
/// level, returns false and leaves T1 and T2 unchanged. Top-level
/// qualifiers on T1 and T2 are ignored. This function will typically
/// be called in a loop that successively "unwraps" pointer and
/// pointer-to-member types to compare them at each level.
bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
const PointerType *T1PtrType = T1->getAs<PointerType>(),
*T2PtrType = T2->getAs<PointerType>();
if (T1PtrType && T2PtrType) {
T1 = T1PtrType->getPointeeType();
T2 = T2PtrType->getPointeeType();
return true;
}
const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
*T2MPType = T2->getAs<MemberPointerType>();
if (T1MPType && T2MPType &&
hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
QualType(T2MPType->getClass(), 0))) {
T1 = T1MPType->getPointeeType();
T2 = T2MPType->getPointeeType();
return true;
}
if (getLangOpts().ObjC1) {
const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
*T2OPType = T2->getAs<ObjCObjectPointerType>();
if (T1OPType && T2OPType) {
T1 = T1OPType->getPointeeType();
T2 = T2OPType->getPointeeType();
return true;
}
}
// FIXME: Block pointers, too?
return false;
}
DeclarationNameInfo
ASTContext::getNameForTemplate(TemplateName Name,
SourceLocation NameLoc) const {
switch (Name.getKind()) {
case TemplateName::QualifiedTemplate:
case TemplateName::Template:
// DNInfo work in progress: CHECKME: what about DNLoc?
return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
NameLoc);
case TemplateName::OverloadedTemplate: {
OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
// DNInfo work in progress: CHECKME: what about DNLoc?
return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
}
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = Name.getAsDependentTemplateName();
DeclarationName DName;
if (DTN->isIdentifier()) {
DName = DeclarationNames.getIdentifier(DTN->getIdentifier());
return DeclarationNameInfo(DName, NameLoc);
} else {
DName = DeclarationNames.getCXXOperatorName(DTN->getOperator());
// DNInfo work in progress: FIXME: source locations?
DeclarationNameLoc DNLoc;
DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
return DeclarationNameInfo(DName, NameLoc, DNLoc);
}
}
case TemplateName::SubstTemplateTemplateParm: {
SubstTemplateTemplateParmStorage *subst
= Name.getAsSubstTemplateTemplateParm();
return DeclarationNameInfo(subst->getParameter()->getDeclName(),
NameLoc);
}
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *subst
= Name.getAsSubstTemplateTemplateParmPack();
return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
NameLoc);
}
}
llvm_unreachable("bad template name kind!");
}
TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
switch (Name.getKind()) {
case TemplateName::QualifiedTemplate:
case TemplateName::Template: {
TemplateDecl *Template = Name.getAsTemplateDecl();
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Template))
Template = getCanonicalTemplateTemplateParmDecl(TTP);
// The canonical template name is the canonical template declaration.
return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
}
case TemplateName::OverloadedTemplate:
llvm_unreachable("cannot canonicalize overloaded template");
case TemplateName::DependentTemplate: {
DependentTemplateName *DTN = Name.getAsDependentTemplateName();
assert(DTN && "Non-dependent template names must refer to template decls.");
return DTN->CanonicalTemplateName;
}
case TemplateName::SubstTemplateTemplateParm: {
SubstTemplateTemplateParmStorage *subst
= Name.getAsSubstTemplateTemplateParm();
return getCanonicalTemplateName(subst->getReplacement());
}
case TemplateName::SubstTemplateTemplateParmPack: {
SubstTemplateTemplateParmPackStorage *subst
= Name.getAsSubstTemplateTemplateParmPack();
TemplateTemplateParmDecl *canonParameter
= getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
TemplateArgument canonArgPack
= getCanonicalTemplateArgument(subst->getArgumentPack());
return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
}
}
llvm_unreachable("bad template name!");
}
bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
X = getCanonicalTemplateName(X);
Y = getCanonicalTemplateName(Y);
return X.getAsVoidPointer() == Y.getAsVoidPointer();
}
TemplateArgument
ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
switch (Arg.getKind()) {
case TemplateArgument::Null:
return Arg;
case TemplateArgument::Expression:
return Arg;
case TemplateArgument::Declaration: {
ValueDecl *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
return TemplateArgument(D, Arg.getParamTypeForDecl());
}
case TemplateArgument::NullPtr:
return TemplateArgument(getCanonicalType(Arg.getNullPtrType()),
/*isNullPtr*/true);
case TemplateArgument::Template:
return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
case TemplateArgument::TemplateExpansion:
return TemplateArgument(getCanonicalTemplateName(
Arg.getAsTemplateOrTemplatePattern()),
Arg.getNumTemplateExpansions());
case TemplateArgument::Integral:
return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType()));
case TemplateArgument::Type:
return TemplateArgument(getCanonicalType(Arg.getAsType()));
case TemplateArgument::Pack: {
if (Arg.pack_size() == 0)
return Arg;
TemplateArgument *CanonArgs
= new (*this) TemplateArgument[Arg.pack_size()];
unsigned Idx = 0;
for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
AEnd = Arg.pack_end();
A != AEnd; (void)++A, ++Idx)
CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
return TemplateArgument(CanonArgs, Arg.pack_size());
}
}
// Silence GCC warning
llvm_unreachable("Unhandled template argument kind");
}
NestedNameSpecifier *
ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
if (!NNS)
return nullptr;
switch (NNS->getKind()) {
case NestedNameSpecifier::Identifier:
// Canonicalize the prefix but keep the identifier the same.
return NestedNameSpecifier::Create(*this,
getCanonicalNestedNameSpecifier(NNS->getPrefix()),
NNS->getAsIdentifier());
case NestedNameSpecifier::Namespace:
// A namespace is canonical; build a nested-name-specifier with
// this namespace and no prefix.
return NestedNameSpecifier::Create(*this, nullptr,
NNS->getAsNamespace()->getOriginalNamespace());
case NestedNameSpecifier::NamespaceAlias:
// A namespace is canonical; build a nested-name-specifier with
// this namespace and no prefix.
return NestedNameSpecifier::Create(*this, nullptr,
NNS->getAsNamespaceAlias()->getNamespace()
->getOriginalNamespace());
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
// If we have some kind of dependent-named type (e.g., "typename T::type"),
// break it apart into its prefix and identifier, then reconsititute those
// as the canonical nested-name-specifier. This is required to canonicalize
// a dependent nested-name-specifier involving typedefs of dependent-name
// types, e.g.,
// typedef typename T::type T1;
// typedef typename T1::type T2;
if (const DependentNameType *DNT = T->getAs<DependentNameType>())
return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
const_cast<IdentifierInfo *>(DNT->getIdentifier()));
// Otherwise, just canonicalize the type, and force it to be a TypeSpec.
// FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
// first place?
return NestedNameSpecifier::Create(*this, nullptr, false,
const_cast<Type *>(T.getTypePtr()));
}
case NestedNameSpecifier::Global:
case NestedNameSpecifier::Super:
// The global specifier and __super specifer are canonical and unique.
return NNS;
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
const ArrayType *ASTContext::getAsArrayType(QualType T) const {
// Handle the non-qualified case efficiently.
if (!T.hasLocalQualifiers()) {
// Handle the common positive case fast.
if (const ArrayType *AT = dyn_cast<ArrayType>(T))
return AT;
}
// Handle the common negative case fast.
if (!isa<ArrayType>(T.getCanonicalType()))
return nullptr;
// Apply any qualifiers from the array type to the element type. This
// implements C99 6.7.3p8: "If the specification of an array type includes
// any type qualifiers, the element type is so qualified, not the array type."
// If we get here, we either have type qualifiers on the type, or we have
// sugar such as a typedef in the way. If we have type qualifiers on the type
// we must propagate them down into the element type.
SplitQualType split = T.getSplitDesugaredType();
Qualifiers qs = split.Quals;
// If we have a simple case, just return now.
const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty);
if (!ATy || qs.empty())
return ATy;
// Otherwise, we have an array and we have qualifiers on it. Push the
// qualifiers into the array element type and return a new array type.
QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
CAT->getSizeModifier(),
CAT->getIndexTypeCVRQualifiers()));
if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy))
return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
IAT->getSizeModifier(),
IAT->getIndexTypeCVRQualifiers()));
if (const DependentSizedArrayType *DSAT
= dyn_cast<DependentSizedArrayType>(ATy))
return cast<ArrayType>(
getDependentSizedArrayType(NewEltTy,
DSAT->getSizeExpr(),
DSAT->getSizeModifier(),
DSAT->getIndexTypeCVRQualifiers(),
DSAT->getBracketsRange()));
const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
return cast<ArrayType>(getVariableArrayType(NewEltTy,
VAT->getSizeExpr(),
VAT->getSizeModifier(),
VAT->getIndexTypeCVRQualifiers(),
VAT->getBracketsRange()));
}
QualType ASTContext::getAdjustedParameterType(QualType T) const {
if (T->isArrayType() || T->isFunctionType())
return getDecayedType(T);
return T;
}
QualType ASTContext::getSignatureParameterType(QualType T) const {
T = getVariableArrayDecayedType(T);
T = getAdjustedParameterType(T);
return T.getUnqualifiedType();
}
QualType ASTContext::getExceptionObjectType(QualType T) const {
// C++ [except.throw]p3:
// A throw-expression initializes a temporary object, called the exception
// object, the type of which is determined by removing any top-level
// cv-qualifiers from the static type of the operand of throw and adjusting
// the type from "array of T" or "function returning T" to "pointer to T"
// or "pointer to function returning T", [...]
T = getVariableArrayDecayedType(T);
if (T->isArrayType() || T->isFunctionType())
T = getDecayedType(T);
return T.getUnqualifiedType();
}
/// getArrayDecayedType - Return the properly qualified result of decaying the
/// specified array type to a pointer. This operation is non-trivial when
/// handling typedefs etc. The canonical type of "T" must be an array type,
/// this returns a pointer to a properly qualified element of the array.
///
/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
QualType ASTContext::getArrayDecayedType(QualType Ty) const {
// Get the element type with 'getAsArrayType' so that we don't lose any
// typedefs in the element type of the array. This also handles propagation
// of type qualifiers from the array type into the element type if present
// (C99 6.7.3p8).
const ArrayType *PrettyArrayType = getAsArrayType(Ty);
assert(PrettyArrayType && "Not an array type!");
QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
// int x[restrict 4] -> int *restrict
return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers());
}
QualType ASTContext::getBaseElementType(const ArrayType *array) const {
return getBaseElementType(array->getElementType());
}
QualType ASTContext::getBaseElementType(QualType type) const {
Qualifiers qs;
while (true) {
SplitQualType split = type.getSplitDesugaredType();
const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
if (!array) break;
type = array->getElementType();
qs.addConsistentQualifiers(split.Quals);
}
return getQualifiedType(type, qs);
}
/// getConstantArrayElementCount - Returns number of constant array elements.
uint64_t
ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
uint64_t ElementCount = 1;
do {
ElementCount *= CA->getSize().getZExtValue();
CA = dyn_cast_or_null<ConstantArrayType>(
CA->getElementType()->getAsArrayTypeUnsafe());
} while (CA);
return ElementCount;
}
/// getFloatingRank - Return a relative rank for floating point types.
/// This routine will assert if passed a built-in type that isn't a float.
static FloatingRank getFloatingRank(QualType T) {
if (const ComplexType *CT = T->getAs<ComplexType>())
return getFloatingRank(CT->getElementType());
assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
switch (T->getAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("getFloatingRank(): not a floating type");
case BuiltinType::Half: return HalfRank;
case BuiltinType::Float: return FloatRank;
case BuiltinType::Double: return DoubleRank;
case BuiltinType::LongDouble: return LongDoubleRank;
// HLSL Changes begin
case BuiltinType::Min10Float: return Min10FloatRank;
case BuiltinType::Min16Float: return Min16FloatRank;
case BuiltinType::HalfFloat: return HalfFloatRank;
case BuiltinType::LitFloat: return LitFloatRank;
// HLSL Changes end
}
}
/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
/// point or a complex type (based on typeDomain/typeSize).
/// 'typeDomain' is a real floating point or complex type.
/// 'typeSize' is a real floating point or complex type.
QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
QualType Domain) const {
FloatingRank EltRank = getFloatingRank(Size);
if (Domain->isComplexType()) {
switch (EltRank) {
case FloatRank: return FloatComplexTy;
case DoubleRank: return DoubleComplexTy;
case LongDoubleRank: return LongDoubleComplexTy;
// HLSL Changes begin
case HalfRank:
case LitFloatRank:
case Min10FloatRank:
case Min16FloatRank:
case HalfFloatRank:
llvm_unreachable("Complex type is not supported in HLSL.");
// HLSL Changes end
}
}
assert(Domain->isRealFloatingType() && "Unknown domain!");
switch (EltRank) {
case LitFloatRank: return LitFloatTy; // HLSL Change
case Min10FloatRank: return Min10FloatTy; // HLSL Change
case HalfRank: return HalfTy;
case FloatRank: return FloatTy;
case DoubleRank: return DoubleTy;
case LongDoubleRank: return LongDoubleTy;
}
llvm_unreachable("getFloatingRank(): illegal value for rank");
}
/// getFloatingTypeOrder - Compare the rank of the two specified floating
/// point types, ignoring the domain of the type (i.e. 'double' ==
/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
/// LHS < RHS, return -1.
int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
FloatingRank LHSR = getFloatingRank(LHS);
FloatingRank RHSR = getFloatingRank(RHS);
if (LHSR == RHSR)
return 0;
if (LHSR > RHSR)
return 1;
return -1;
}
/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
/// routine will assert if passed a built-in type that isn't an integer or enum,
/// or if it is not canonicalized.
unsigned ASTContext::getIntegerRank(const Type *T) const {
assert(T->isCanonicalUnqualified() && "T should be canonicalized");
switch (cast<BuiltinType>(T)->getKind()) {
default: llvm_unreachable("getIntegerRank(): not a built-in integer");
case BuiltinType::Bool:
return 1 + (getIntWidth(BoolTy) << 3);
case BuiltinType::Char_S:
case BuiltinType::Char_U:
case BuiltinType::SChar:
case BuiltinType::UChar:
return 2 + (getIntWidth(CharTy) << 3);
case BuiltinType::Short:
case BuiltinType::UShort:
return 3 + (getIntWidth(ShortTy) << 3);
case BuiltinType::LitInt: // HLSL Change
case BuiltinType::Int:
case BuiltinType::UInt:
return 4 + (getIntWidth(IntTy) << 3);
case BuiltinType::Long:
case BuiltinType::ULong:
return 5 + (getIntWidth(LongTy) << 3);
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
return 6 + (getIntWidth(LongLongTy) << 3);
case BuiltinType::Int128:
case BuiltinType::UInt128:
return 7 + (getIntWidth(Int128Ty) << 3);
}
}
/// \brief Whether this is a promotable bitfield reference according
/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
///
/// \returns the type this bit-field will promote to, or NULL if no
/// promotion occurs.
QualType ASTContext::isPromotableBitField(Expr *E) const {
if (E->isTypeDependent() || E->isValueDependent())
return QualType();
// FIXME: We should not do this unless E->refersToBitField() is true. This
// matters in C where getSourceBitField() will find bit-fields for various
// cases where the source expression is not a bit-field designator.
FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
if (!Field)
return QualType();
QualType FT = Field->getType();
uint64_t BitWidth = Field->getBitWidthValue(*this);
uint64_t IntSize = getTypeSize(IntTy);
// C++ [conv.prom]p5:
// A prvalue for an integral bit-field can be converted to a prvalue of type
// int if int can represent all the values of the bit-field; otherwise, it
// can be converted to unsigned int if unsigned int can represent all the
// values of the bit-field. If the bit-field is larger yet, no integral
// promotion applies to it.
// C11 6.3.1.1/2:
// [For a bit-field of type _Bool, int, signed int, or unsigned int:]
// If an int can represent all values of the original type (as restricted by
// the width, for a bit-field), the value is converted to an int; otherwise,
// it is converted to an unsigned int.
//
// FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
// We perform that promotion here to match GCC and C++.
if (BitWidth < IntSize)
return IntTy;
if (BitWidth == IntSize)
return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
// Types bigger than int are not subject to promotions, and therefore act
// like the base type. GCC has some weird bugs in this area that we
// deliberately do not follow (GCC follows a pre-standard resolution to
// C's DR315 which treats bit-width as being part of the type, and this leaks
// into their semantics in some cases).
return QualType();
}
/// getPromotedIntegerType - Returns the type that Promotable will
/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
/// integer type.
QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
assert(!Promotable.isNull());
assert(Promotable->isPromotableIntegerType());
if (const EnumType *ET = Promotable->getAs<EnumType>())
return ET->getDecl()->getPromotionType();
if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) {
// C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
// (3.9.1) can be converted to a prvalue of the first of the following
// types that can represent all the values of its underlying type:
// int, unsigned int, long int, unsigned long int, long long int, or
// unsigned long long int [...]
// FIXME: Is there some better way to compute this?
if (BT->getKind() == BuiltinType::WChar_S ||
BT->getKind() == BuiltinType::WChar_U ||
BT->getKind() == BuiltinType::Char16 ||
BT->getKind() == BuiltinType::Char32) {
bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
uint64_t FromSize = getTypeSize(BT);
QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
LongLongTy, UnsignedLongLongTy };
for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
if (FromSize < ToSize ||
(FromSize == ToSize &&
FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
return PromoteTypes[Idx];
}
llvm_unreachable("char type should fit into long long");
}
}
// At this point, we should have a signed or unsigned integer type.
if (Promotable->isSignedIntegerType())
return IntTy;
uint64_t PromotableSize = getIntWidth(Promotable);
uint64_t IntSize = getIntWidth(IntTy);
assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
}
/// \brief Recurses in pointer/array types until it finds an objc retainable
/// type and returns its ownership.
Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
while (!T.isNull()) {
if (T.getObjCLifetime() != Qualifiers::OCL_None)
return T.getObjCLifetime();
if (T->isArrayType())
T = getBaseElementType(T);
else if (const PointerType *PT = T->getAs<PointerType>())
T = PT->getPointeeType();
else if (const ReferenceType *RT = T->getAs<ReferenceType>())
T = RT->getPointeeType();
else
break;
}
return Qualifiers::OCL_None;
}
static const Type *getIntegerTypeForEnum(const EnumType *ET) {
// Incomplete enum types are not treated as integer types.
// FIXME: In C++, enum types are never integer types.
if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
return ET->getDecl()->getIntegerType().getTypePtr();
return nullptr;
}
/// getIntegerTypeOrder - Returns the highest ranked integer type:
/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
/// LHS < RHS, return -1.
int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
const Type *LHSC = getCanonicalType(LHS).getTypePtr();
const Type *RHSC = getCanonicalType(RHS).getTypePtr();
// Unwrap enums to their underlying type.
if (const EnumType *ET = dyn_cast<EnumType>(LHSC))
LHSC = getIntegerTypeForEnum(ET);
if (const EnumType *ET = dyn_cast<EnumType>(RHSC))
RHSC = getIntegerTypeForEnum(ET);
if (LHSC == RHSC) return 0;
bool LHSUnsigned = LHSC->isUnsignedIntegerType();
bool RHSUnsigned = RHSC->isUnsignedIntegerType();
unsigned LHSRank = getIntegerRank(LHSC);
unsigned RHSRank = getIntegerRank(RHSC);
if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
if (LHSRank == RHSRank) return 0;
return LHSRank > RHSRank ? 1 : -1;
}
// Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
if (LHSUnsigned) {
// If the unsigned [LHS] type is larger, return it.
if (LHSRank >= RHSRank)
return 1;
// If the signed type can represent all values of the unsigned type, it
// wins. Because we are dealing with 2's complement and types that are
// powers of two larger than each other, this is always safe.
return -1;
}
// If the unsigned [RHS] type is larger, return it.
if (RHSRank >= LHSRank)
return -1;
// If the signed type can represent all values of the unsigned type, it
// wins. Because we are dealing with 2's complement and types that are
// powers of two larger than each other, this is always safe.
return 1;
}
// getCFConstantStringType - Return the type used for constant CFStrings.
QualType ASTContext::getCFConstantStringType() const {
if (!CFConstantStringTypeDecl) {
CFConstantStringTypeDecl = buildImplicitRecord("NSConstantString");
CFConstantStringTypeDecl->startDefinition();
QualType FieldTypes[4];
// const int *isa;
FieldTypes[0] = getPointerType(IntTy.withConst());
// int flags;
FieldTypes[1] = IntTy;
// const char *str;
FieldTypes[2] = getPointerType(CharTy.withConst());
// long length;
FieldTypes[3] = LongTy;
// Create fields
for (unsigned i = 0; i < 4; ++i) {
FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl,
SourceLocation(),
SourceLocation(), nullptr,
FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
CFConstantStringTypeDecl->addDecl(Field);
}
CFConstantStringTypeDecl->completeDefinition();
}
return getTagDeclType(CFConstantStringTypeDecl);
}
QualType ASTContext::getObjCSuperType() const {
if (ObjCSuperType.isNull()) {
RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
TUDecl->addDecl(ObjCSuperTypeDecl);
ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
}
return ObjCSuperType;
}
void ASTContext::setCFConstantStringType(QualType T) {
const RecordType *Rec = T->getAs<RecordType>();
assert(Rec && "Invalid CFConstantStringType");
CFConstantStringTypeDecl = Rec->getDecl();
}
QualType ASTContext::getBlockDescriptorType() const {
if (BlockDescriptorType)
return getTagDeclType(BlockDescriptorType);
RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
RD = buildImplicitRecord("__block_descriptor");
RD->startDefinition();
QualType FieldTypes[] = {
UnsignedLongTy,
UnsignedLongTy,
};
static const char *const FieldNames[] = {
"reserved",
"Size"
};
for (size_t i = 0; i < 2; ++i) {
FieldDecl *Field = FieldDecl::Create(
*this, RD, SourceLocation(), SourceLocation(),
&Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
Field->setAccess(AS_public);
RD->addDecl(Field);
}
RD->completeDefinition();
BlockDescriptorType = RD;
return getTagDeclType(BlockDescriptorType);
}
QualType ASTContext::getBlockDescriptorExtendedType() const {
if (BlockDescriptorExtendedType)
return getTagDeclType(BlockDescriptorExtendedType);
RecordDecl *RD;
// FIXME: Needs the FlagAppleBlock bit.
RD = buildImplicitRecord("__block_descriptor_withcopydispose");
RD->startDefinition();
QualType FieldTypes[] = {
UnsignedLongTy,
UnsignedLongTy,
getPointerType(VoidPtrTy),
getPointerType(VoidPtrTy)
};
static const char *const FieldNames[] = {
"reserved",
"Size",
"CopyFuncPtr",
"DestroyFuncPtr"
};
for (size_t i = 0; i < 4; ++i) {
FieldDecl *Field = FieldDecl::Create(
*this, RD, SourceLocation(), SourceLocation(),
&Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false, ICIS_NoInit);
Field->setAccess(AS_public);
RD->addDecl(Field);
}
RD->completeDefinition();
BlockDescriptorExtendedType = RD;
return getTagDeclType(BlockDescriptorExtendedType);
}
/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
/// requires copy/dispose. Note that this must match the logic
/// in buildByrefHelpers.
bool ASTContext::BlockRequiresCopying(QualType Ty,
const VarDecl *D) {
if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
const Expr *copyExpr = getBlockVarCopyInits(D);
if (!copyExpr && record->hasTrivialDestructor()) return false;
return true;
}
if (!Ty->isObjCRetainableType()) return false;
Qualifiers qs = Ty.getQualifiers();
// If we have lifetime, that dominates.
if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
assert(getLangOpts().ObjCAutoRefCount);
switch (lifetime) {
case Qualifiers::OCL_None: llvm_unreachable("impossible");
// These are just bits as far as the runtime is concerned.
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
return false;
// Tell the runtime that this is ARC __weak, called by the
// byref routines.
case Qualifiers::OCL_Weak:
// ARC __strong __block variables need to be retained.
case Qualifiers::OCL_Strong:
return true;
}
llvm_unreachable("fell out of lifetime switch!");
}
return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
Ty->isObjCObjectPointerType());
}
bool ASTContext::getByrefLifetime(QualType Ty,
Qualifiers::ObjCLifetime &LifeTime,
bool &HasByrefExtendedLayout) const {
if (!getLangOpts().ObjC1 ||
getLangOpts().getGC() != LangOptions::NonGC)
return false;
HasByrefExtendedLayout = false;
if (Ty->isRecordType()) {
HasByrefExtendedLayout = true;
LifeTime = Qualifiers::OCL_None;
}
else if (getLangOpts().ObjCAutoRefCount)
LifeTime = Ty.getObjCLifetime();
// MRR.
else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
LifeTime = Qualifiers::OCL_ExplicitNone;
else
LifeTime = Qualifiers::OCL_None;
return true;
}
TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
if (!ObjCInstanceTypeDecl)
ObjCInstanceTypeDecl =
buildImplicitTypedef(getObjCIdType(), "instancetype");
return ObjCInstanceTypeDecl;
}
// This returns true if a type has been typedefed to BOOL:
// typedef <type> BOOL;
static bool isTypeTypedefedAsBOOL(QualType T) {
if (const TypedefType *TT = dyn_cast<TypedefType>(T))
if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
return II->isStr("BOOL");
return false;
}
/// getObjCEncodingTypeSize returns size of type for objective-c encoding
/// purpose.
CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
if (!type->isIncompleteArrayType() && type->isIncompleteType())
return CharUnits::Zero();
CharUnits sz = getTypeSizeInChars(type);
// Make all integer and enum types at least as large as an int
if (sz.isPositive() && type->isIntegralOrEnumerationType())
sz = std::max(sz, getTypeSizeInChars(IntTy));
// Treat arrays as pointers, since that's how they're passed in.
else if (type->isArrayType())
sz = getTypeSizeInChars(VoidPtrTy);
return sz;
}
bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
return getLangOpts().MSVCCompat && VD->isStaticDataMember() &&
VD->getType()->isIntegralOrEnumerationType() &&
VD->isFirstDecl() && !VD->isOutOfLine() && VD->hasInit();
}
static inline
std::string charUnitsToString(const CharUnits &CU) {
return llvm::itostr(CU.getQuantity());
}
/// getObjCEncodingForBlock - Return the encoded type for this block
/// declaration.
std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
std::string S;
const BlockDecl *Decl = Expr->getBlockDecl();
QualType BlockTy =
Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
// Encode result type.
if (getLangOpts().EncodeExtendedBlockSig)
getObjCEncodingForMethodParameter(
Decl::OBJC_TQ_None, BlockTy->getAs<FunctionType>()->getReturnType(), S,
true /*Extended*/);
else
getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getReturnType(), S);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
SourceLocation Loc;
CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
CharUnits ParmOffset = PtrSize;
for (auto PI : Decl->params()) {
QualType PType = PI->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
continue;
assert (sz.isPositive() && "BlockExpr - Incomplete param type");
ParmOffset += sz;
}
// Size of the argument frame
S += charUnitsToString(ParmOffset);
// Block pointer and offset.
S += "@?0";
// Argument types.
ParmOffset = PtrSize;
for (auto PVDecl : Decl->params()) {
QualType PType = PVDecl->getOriginalType();
if (const ArrayType *AT =
dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
// Use array's original type only if it has known number of
// elements.
if (!isa<ConstantArrayType>(AT))
PType = PVDecl->getType();
} else if (PType->isFunctionType())
PType = PVDecl->getType();
if (getLangOpts().EncodeExtendedBlockSig)
getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType,
S, true /*Extended*/);
else
getObjCEncodingForType(PType, S);
S += charUnitsToString(ParmOffset);
ParmOffset += getObjCEncodingTypeSize(PType);
}
return S;
}
bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
std::string& S) {
// Encode result type.
getObjCEncodingForType(Decl->getReturnType(), S);
CharUnits ParmOffset;
// Compute size of all parameters.
for (auto PI : Decl->params()) {
QualType PType = PI->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
continue;
assert (sz.isPositive() &&
"getObjCEncodingForFunctionDecl - Incomplete param type");
ParmOffset += sz;
}
S += charUnitsToString(ParmOffset);
ParmOffset = CharUnits::Zero();
// Argument types.
for (auto PVDecl : Decl->params()) {
QualType PType = PVDecl->getOriginalType();
if (const ArrayType *AT =
dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
// Use array's original type only if it has known number of
// elements.
if (!isa<ConstantArrayType>(AT))
PType = PVDecl->getType();
} else if (PType->isFunctionType())
PType = PVDecl->getType();
getObjCEncodingForType(PType, S);
S += charUnitsToString(ParmOffset);
ParmOffset += getObjCEncodingTypeSize(PType);
}
return false;
}
/// getObjCEncodingForMethodParameter - Return the encoded type for a single
/// method parameter or return type. If Extended, include class names and
/// block object types.
void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
QualType T, std::string& S,
bool Extended) const {
// Encode type qualifer, 'in', 'inout', etc. for the parameter.
getObjCEncodingForTypeQualifier(QT, S);
// Encode parameter type.
getObjCEncodingForTypeImpl(T, S, true, true, nullptr,
true /*OutermostType*/,
false /*EncodingProperty*/,
false /*StructField*/,
Extended /*EncodeBlockParameters*/,
Extended /*EncodeClassNames*/);
}
/// getObjCEncodingForMethodDecl - Return the encoded type for this method
/// declaration.
bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
std::string& S,
bool Extended) const {
// FIXME: This is not very efficient.
// Encode return type.
getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
Decl->getReturnType(), S, Extended);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
SourceLocation Loc;
CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
// The first two arguments (self and _cmd) are pointers; account for
// their size.
CharUnits ParmOffset = 2 * PtrSize;
for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
E = Decl->sel_param_end(); PI != E; ++PI) {
QualType PType = (*PI)->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
continue;
assert (sz.isPositive() &&
"getObjCEncodingForMethodDecl - Incomplete param type");
ParmOffset += sz;
}
S += charUnitsToString(ParmOffset);
S += "@0:";
S += charUnitsToString(PtrSize);
// Argument types.
ParmOffset = 2 * PtrSize;
for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
E = Decl->sel_param_end(); PI != E; ++PI) {
const ParmVarDecl *PVDecl = *PI;
QualType PType = PVDecl->getOriginalType();
if (const ArrayType *AT =
dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
// Use array's original type only if it has known number of
// elements.
if (!isa<ConstantArrayType>(AT))
PType = PVDecl->getType();
} else if (PType->isFunctionType())
PType = PVDecl->getType();
getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(),
PType, S, Extended);
S += charUnitsToString(ParmOffset);
ParmOffset += getObjCEncodingTypeSize(PType);
}
return false;
}
ObjCPropertyImplDecl *
ASTContext::getObjCPropertyImplDeclForPropertyDecl(
const ObjCPropertyDecl *PD,
const Decl *Container) const {
if (!Container)
return nullptr;
if (const ObjCCategoryImplDecl *CID =
dyn_cast<ObjCCategoryImplDecl>(Container)) {
for (auto *PID : CID->property_impls())
if (PID->getPropertyDecl() == PD)
return PID;
} else {
const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
for (auto *PID : OID->property_impls())
if (PID->getPropertyDecl() == PD)
return PID;
}
return nullptr;
}
/// getObjCEncodingForPropertyDecl - Return the encoded type for this
/// property declaration. If non-NULL, Container must be either an
/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
/// NULL when getting encodings for protocol properties.
/// Property attributes are stored as a comma-delimited C string. The simple
/// attributes readonly and bycopy are encoded as single characters. The
/// parametrized attributes, getter=name, setter=name, and ivar=name, are
/// encoded as single characters, followed by an identifier. Property types
/// are also encoded as a parametrized attribute. The characters used to encode
/// these attributes are defined by the following enumeration:
/// @code
/// enum PropertyAttributes {
/// kPropertyReadOnly = 'R', // property is read-only.
/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
/// kPropertyByref = '&', // property is a reference to the value last assigned
/// kPropertyDynamic = 'D', // property is dynamic
/// kPropertyGetter = 'G', // followed by getter selector name
/// kPropertySetter = 'S', // followed by setter selector name
/// kPropertyInstanceVariable = 'V' // followed by instance variable name
/// kPropertyType = 'T' // followed by old-style type encoding.
/// kPropertyWeak = 'W' // 'weak' property
/// kPropertyStrong = 'P' // property GC'able
/// kPropertyNonAtomic = 'N' // property non-atomic
/// };
/// @endcode
void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
const Decl *Container,
std::string& S) const {
// Collect information from the property implementation decl(s).
bool Dynamic = false;
ObjCPropertyImplDecl *SynthesizePID = nullptr;
if (ObjCPropertyImplDecl *PropertyImpDecl =
getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
Dynamic = true;
else
SynthesizePID = PropertyImpDecl;
}
// FIXME: This is not very efficient.
S = "T";
// Encode result type.
// GCC has some special rules regarding encoding of properties which
// closely resembles encoding of ivars.
getObjCEncodingForPropertyType(PD->getType(), S);
if (PD->isReadOnly()) {
S += ",R";
if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy)
S += ",C";
if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain)
S += ",&";
if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
S += ",W";
} else {
switch (PD->getSetterKind()) {
case ObjCPropertyDecl::Assign: break;
case ObjCPropertyDecl::Copy: S += ",C"; break;
case ObjCPropertyDecl::Retain: S += ",&"; break;
case ObjCPropertyDecl::Weak: S += ",W"; break;
}
}
// It really isn't clear at all what this means, since properties
// are "dynamic by default".
if (Dynamic)
S += ",D";
if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
S += ",N";
if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
S += ",G";
S += PD->getGetterName().getAsString();
}
if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
S += ",S";
S += PD->getSetterName().getAsString();
}
if (SynthesizePID) {
const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
S += ",V";
S += OID->getNameAsString();
}
// FIXME: OBJCGC: weak & strong
}
/// getLegacyIntegralTypeEncoding -
/// Another legacy compatibility encoding: 32-bit longs are encoded as
/// 'l' or 'L' , but not always. For typedefs, we need to use
/// 'i' or 'I' instead if encoding a struct field, or a pointer!
///
void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
if (isa<TypedefType>(PointeeTy.getTypePtr())) {
if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
PointeeTy = UnsignedIntTy;
else
if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
PointeeTy = IntTy;
}
}
}
void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
const FieldDecl *Field,
QualType *NotEncodedT) const {
// We follow the behavior of gcc, expanding structures which are
// directly pointed to, and expanding embedded structures. Note that
// these rules are sufficient to prevent recursive encoding of the
// same type.
getObjCEncodingForTypeImpl(T, S, true, true, Field,
true /* outermost type */, false, false,
false, false, false, NotEncodedT);
}
void ASTContext::getObjCEncodingForPropertyType(QualType T,
std::string& S) const {
// Encode result type.
// GCC has some special rules regarding encoding of properties which
// closely resembles encoding of ivars.
getObjCEncodingForTypeImpl(T, S, true, true, nullptr,
true /* outermost type */,
true /* encoding property */);
}
static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
BuiltinType::Kind kind) {
switch (kind) {
case BuiltinType::Void: return 'v';
case BuiltinType::Bool: return 'B';
case BuiltinType::Char_U:
case BuiltinType::UChar: return 'C';
case BuiltinType::Char16:
case BuiltinType::UShort: return 'S';
case BuiltinType::Char32:
case BuiltinType::UInt: return 'I';
case BuiltinType::ULong:
return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
case BuiltinType::UInt128: return 'T';
case BuiltinType::ULongLong: return 'Q';
case BuiltinType::Char_S:
case BuiltinType::SChar: return 'c';
case BuiltinType::Short: return 's';
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
case BuiltinType::Int: return 'i';
case BuiltinType::Long:
return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
case BuiltinType::LongLong: return 'q';
case BuiltinType::Int128: return 't';
case BuiltinType::Float: return 'f';
case BuiltinType::Double: return 'd';
case BuiltinType::LongDouble: return 'D';
case BuiltinType::NullPtr: return '*'; // like char*
case BuiltinType::Half:
// FIXME: potentially need @encodes for these!
return ' ';
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
llvm_unreachable("@encoding ObjC primitive type");
// OpenCL and placeholder types don't need @encodings.
case BuiltinType::OCLImage1d:
case BuiltinType::OCLImage1dArray:
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLEvent:
case BuiltinType::OCLSampler:
case BuiltinType::Dependent:
#define BUILTIN_TYPE(KIND, ID)
#define PLACEHOLDER_TYPE(KIND, ID) \
case BuiltinType::KIND:
#include "clang/AST/BuiltinTypes.def"
llvm_unreachable("invalid builtin type for @encode");
// HLSL Change Start
case BuiltinType::Min12Int:
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
case BuiltinType::Min16Int:
case BuiltinType::Min16UInt:
case BuiltinType::HalfFloat:
case BuiltinType::LitInt:
case BuiltinType::LitFloat:
case BuiltinType::Int8_4Packed:
case BuiltinType::UInt8_4Packed:
llvm_unreachable("@encoding HLSL primitive type");
// HLSL Change Ends
}
llvm_unreachable("invalid BuiltinType::Kind value");
}
static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
EnumDecl *Enum = ET->getDecl();
// The encoding of an non-fixed enum type is always 'i', regardless of size.
if (!Enum->isFixed())
return 'i';
// The encoding of a fixed enum type matches its fixed underlying type.
const BuiltinType *BT = Enum->getIntegerType()->castAs<BuiltinType>();
return getObjCEncodingForPrimitiveKind(C, BT->getKind());
}
static void EncodeBitField(const ASTContext *Ctx, std::string& S,
QualType T, const FieldDecl *FD) {
assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
S += 'b';
// The NeXT runtime encodes bit fields as b followed by the number of bits.
// The GNU runtime requires more information; bitfields are encoded as b,
// then the offset (in bits) of the first element, then the type of the
// bitfield, then the size in bits. For example, in this structure:
//
// struct
// {
// int integer;
// int flags:2;
// };
// On a 32-bit system, the encoding for flags would be b2 for the NeXT
// runtime, but b32i2 for the GNU runtime. The reason for this extra
// information is not especially sensible, but we're stuck with it for
// compatibility with GCC, although providing it breaks anything that
// actually uses runtime introspection and wants to work on both runtimes...
if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
const RecordDecl *RD = FD->getParent();
const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex()));
if (const EnumType *ET = T->getAs<EnumType>())
S += ObjCEncodingForEnumType(Ctx, ET);
else {
const BuiltinType *BT = T->castAs<BuiltinType>();
S += getObjCEncodingForPrimitiveKind(Ctx, BT->getKind());
}
}
S += llvm::utostr(FD->getBitWidthValue(*Ctx));
}
// FIXME: Use SmallString for accumulating string.
void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
bool ExpandPointedToStructures,
bool ExpandStructures,
const FieldDecl *FD,
bool OutermostType,
bool EncodingProperty,
bool StructField,
bool EncodeBlockParameters,
bool EncodeClassNames,
bool EncodePointerToObjCTypedef,
QualType *NotEncodedT) const {
CanQualType CT = getCanonicalType(T);
switch (CT->getTypeClass()) {
case Type::Builtin:
case Type::Enum:
if (FD && FD->isBitField())
return EncodeBitField(this, S, T, FD);
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CT))
S += getObjCEncodingForPrimitiveKind(this, BT->getKind());
else
S += ObjCEncodingForEnumType(this, cast<EnumType>(CT));
return;
case Type::Complex: {
const ComplexType *CT = T->castAs<ComplexType>();
S += 'j';
getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, nullptr);
return;
}
case Type::Atomic: {
const AtomicType *AT = T->castAs<AtomicType>();
S += 'A';
getObjCEncodingForTypeImpl(AT->getValueType(), S, false, false, nullptr);
return;
}
// encoding for pointer or reference types.
case Type::Pointer:
case Type::LValueReference:
case Type::RValueReference: {
QualType PointeeTy;
if (isa<PointerType>(CT)) {
const PointerType *PT = T->castAs<PointerType>();
if (PT->isObjCSelType()) {
S += ':';
return;
}
PointeeTy = PT->getPointeeType();
} else {
PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
}
bool isReadOnly = false;
// For historical/compatibility reasons, the read-only qualifier of the
// pointee gets emitted _before_ the '^'. The read-only qualifier of
// the pointer itself gets ignored, _unless_ we are looking at a typedef!
// Also, do not emit the 'r' for anything but the outermost type!
if (isa<TypedefType>(T.getTypePtr())) {
if (OutermostType && T.isConstQualified()) {
isReadOnly = true;
S += 'r';
}
} else if (OutermostType) {
QualType P = PointeeTy;
while (P->getAs<PointerType>())
P = P->getAs<PointerType>()->getPointeeType();
if (P.isConstQualified()) {
isReadOnly = true;
S += 'r';
}
}
if (isReadOnly) {
// Another legacy compatibility encoding. Some ObjC qualifier and type
// combinations need to be rearranged.
// Rewrite "in const" from "nr" to "rn"
if (StringRef(S).endswith("nr"))
S.replace(S.end()-2, S.end(), "rn");
}
if (PointeeTy->isCharType()) {
// char pointer types should be encoded as '*' unless it is a
// type that has been typedef'd to 'BOOL'.
if (!isTypeTypedefedAsBOOL(PointeeTy)) {
S += '*';
return;
}
} else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) {
// GCC binary compat: Need to convert "struct objc_class *" to "#".
if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
S += '#';
return;
}
// GCC binary compat: Need to convert "struct objc_object *" to "@".
if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) {
S += '@';
return;
}
// fall through...
}
S += '^';
getLegacyIntegralTypeEncoding(PointeeTy);
getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures,
nullptr, false, false, false, false, false, false,
NotEncodedT);
return;
}
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray: {
const ArrayType *AT = cast<ArrayType>(CT);
if (isa<IncompleteArrayType>(AT) && !StructField) {
// Incomplete arrays are encoded as a pointer to the array element.
S += '^';
getObjCEncodingForTypeImpl(AT->getElementType(), S,
false, ExpandStructures, FD);
} else {
S += '[';
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
S += llvm::utostr(CAT->getSize().getZExtValue());
else {
//Variable length arrays are encoded as a regular array with 0 elements.
assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
"Unknown array type!");
S += '0';
}
getObjCEncodingForTypeImpl(AT->getElementType(), S,
false, ExpandStructures, FD,
false, false, false, false, false, false,
NotEncodedT);
S += ']';
}
return;
}
case Type::FunctionNoProto:
case Type::FunctionProto:
S += '?';
return;
case Type::Record: {
RecordDecl *RDecl = cast<RecordType>(CT)->getDecl();
S += RDecl->isUnion() ? '(' : '{';
// Anonymous structures print as '?'
if (const IdentifierInfo *II = RDecl->getIdentifier()) {
S += II->getName();
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
llvm::raw_string_ostream OS(S);
TemplateSpecializationType::PrintTemplateArgumentList(OS,
TemplateArgs.data(),
TemplateArgs.size(),
(*this).getPrintingPolicy());
}
} else {
S += '?';
}
if (ExpandStructures) {
S += '=';
if (!RDecl->isUnion()) {
getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT);
} else {
for (const auto *Field : RDecl->fields()) {
if (FD) {
S += '"';
S += Field->getNameAsString();
S += '"';
}
// Special case bit-fields.
if (Field->isBitField()) {
getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
Field);
} else {
QualType qt = Field->getType();
getLegacyIntegralTypeEncoding(qt);
getObjCEncodingForTypeImpl(qt, S, false, true,
FD, /*OutermostType*/false,
/*EncodingProperty*/false,
/*StructField*/true,
false, false, false, NotEncodedT);
}
}
}
}
S += RDecl->isUnion() ? ')' : '}';
return;
}
case Type::BlockPointer: {
const BlockPointerType *BT = T->castAs<BlockPointerType>();
S += "@?"; // Unlike a pointer-to-function, which is "^?".
if (EncodeBlockParameters) {
const FunctionType *FT = BT->getPointeeType()->castAs<FunctionType>();
S += '<';
// Block return type
getObjCEncodingForTypeImpl(
FT->getReturnType(), S, ExpandPointedToStructures, ExpandStructures,
FD, false /* OutermostType */, EncodingProperty,
false /* StructField */, EncodeBlockParameters, EncodeClassNames, false,
NotEncodedT);
// Block self
S += "@?";
// Block parameters
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
for (const auto &I : FPT->param_types())
getObjCEncodingForTypeImpl(
I, S, ExpandPointedToStructures, ExpandStructures, FD,
false /* OutermostType */, EncodingProperty,
false /* StructField */, EncodeBlockParameters, EncodeClassNames,
false, NotEncodedT);
}
S += '>';
}
return;
}
case Type::ObjCObject: {
// hack to match legacy encoding of *id and *Class
QualType Ty = getObjCObjectPointerType(CT);
if (Ty->isObjCIdType()) {
S += "{objc_object=}";
return;
}
else if (Ty->isObjCClassType()) {
S += "{objc_class=}";
return;
}
LLVM_FALLTHROUGH; // HLSL Change
}
case Type::ObjCInterface: {
// Ignore protocol qualifiers when mangling at this level.
// @encode(class_name)
ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
S += '{';
S += OI->getObjCRuntimeNameAsString();
S += '=';
SmallVector<const ObjCIvarDecl*, 32> Ivars;
DeepCollectObjCIvars(OI, true, Ivars);
for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
const FieldDecl *Field = cast<FieldDecl>(Ivars[i]);
if (Field->isBitField())
getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
else
getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD,
false, false, false, false, false,
EncodePointerToObjCTypedef,
NotEncodedT);
}
S += '}';
return;
}
case Type::ObjCObjectPointer: {
const ObjCObjectPointerType *OPT = T->castAs<ObjCObjectPointerType>();
if (OPT->isObjCIdType()) {
S += '@';
return;
}
if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
// FIXME: Consider if we need to output qualifiers for 'Class<p>'.
// Since this is a binary compatibility issue, need to consult with runtime
// folks. Fortunately, this is a *very* obsure construct.
S += '#';
return;
}
if (OPT->isObjCQualifiedIdType()) {
getObjCEncodingForTypeImpl(getObjCIdType(), S,
ExpandPointedToStructures,
ExpandStructures, FD);
if (FD || EncodingProperty || EncodeClassNames) {
// Note that we do extended encoding of protocol qualifer list
// Only when doing ivar or property encoding.
S += '"';
for (const auto *I : OPT->quals()) {
S += '<';
S += I->getObjCRuntimeNameAsString();
S += '>';
}
S += '"';
}
return;
}
QualType PointeeTy = OPT->getPointeeType();
if (!EncodingProperty &&
isa<TypedefType>(PointeeTy.getTypePtr()) &&
!EncodePointerToObjCTypedef) {
// Another historical/compatibility reason.
// We encode the underlying type which comes out as
// {...};
S += '^';
if (FD && OPT->getInterfaceDecl()) {
// Prevent recursive encoding of fields in some rare cases.
ObjCInterfaceDecl *OI = OPT->getInterfaceDecl();
SmallVector<const ObjCIvarDecl*, 32> Ivars;
DeepCollectObjCIvars(OI, true, Ivars);
for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
if (cast<FieldDecl>(Ivars[i]) == FD) {
S += '{';
S += OI->getObjCRuntimeNameAsString();
S += '}';
return;
}
}
}
getObjCEncodingForTypeImpl(PointeeTy, S,
false, ExpandPointedToStructures,
nullptr,
false, false, false, false, false,
/*EncodePointerToObjCTypedef*/true);
return;
}
S += '@';
if (OPT->getInterfaceDecl() &&
(FD || EncodingProperty || EncodeClassNames)) {
S += '"';
S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
for (const auto *I : OPT->quals()) {
S += '<';
S += I->getObjCRuntimeNameAsString();
S += '>';
}
S += '"';
}
return;
}
// gcc just blithely ignores member pointers.
// FIXME: we shoul do better than that. 'M' is available.
case Type::MemberPointer:
// This matches gcc's encoding, even though technically it is insufficient.
//FIXME. We should do a better job than gcc.
case Type::Vector:
case Type::ExtVector:
// Until we have a coherent encoding of these three types, issue warning.
{ if (NotEncodedT)
*NotEncodedT = T;
return;
}
// We could see an undeduced auto type here during error recovery.
// Just ignore it.
case Type::Auto:
return;
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
#define DEPENDENT_TYPE(KIND, BASE) \
case Type::KIND:
#define NON_CANONICAL_TYPE(KIND, BASE) \
case Type::KIND:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
case Type::KIND:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("@encode for dependent type!");
}
llvm_unreachable("bad type kind!");
}
void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
std::string &S,
const FieldDecl *FD,
bool includeVBases,
QualType *NotEncodedT) const {
assert(RDecl && "Expected non-null RecordDecl");
assert(!RDecl->isUnion() && "Should not be called for unions");
if (!RDecl->getDefinition())
return;
CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
if (CXXRec) {
for (const auto &BI : CXXRec->bases()) {
if (!BI.isVirtual()) {
CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
if (base->isEmpty())
continue;
uint64_t offs = toBits(layout.getBaseClassOffset(base));
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, base));
}
}
}
unsigned i = 0;
for (auto *Field : RDecl->fields()) {
uint64_t offs = layout.getFieldOffset(i);
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, Field));
++i;
}
if (CXXRec && includeVBases) {
for (const auto &BI : CXXRec->vbases()) {
CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
if (base->isEmpty())
continue;
uint64_t offs = toBits(layout.getVBaseClassOffset(base));
if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) &&
FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end())
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
std::make_pair(offs, base));
}
}
CharUnits size;
if (CXXRec) {
size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
} else {
size = layout.getSize();
}
#ifndef NDEBUG
uint64_t CurOffs = 0;
#endif
std::multimap<uint64_t, NamedDecl *>::iterator
CurLayObj = FieldOrBaseOffsets.begin();
if (CXXRec && CXXRec->isDynamicClass() &&
(CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
if (FD) {
S += "\"_vptr$";
std::string recname = CXXRec->getNameAsString();
if (recname.empty()) recname = "?";
S += recname;
S += '"';
}
S += "^^?";
#ifndef NDEBUG
CurOffs += getTypeSize(VoidPtrTy);
#endif
}
if (!RDecl->hasFlexibleArrayMember()) {
// Mark the end of the structure.
uint64_t offs = toBits(size);
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, nullptr));
}
for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
#ifndef NDEBUG
assert(CurOffs <= CurLayObj->first);
if (CurOffs < CurLayObj->first) {
uint64_t padding = CurLayObj->first - CurOffs;
// FIXME: There doesn't seem to be a way to indicate in the encoding that
// packing/alignment of members is different that normal, in which case
// the encoding will be out-of-sync with the real layout.
// If the runtime switches to just consider the size of types without
// taking into account alignment, we could make padding explicit in the
// encoding (e.g. using arrays of chars). The encoding strings would be
// longer then though.
CurOffs += padding;
}
#endif
NamedDecl *dcl = CurLayObj->second;
if (!dcl)
break; // reached end of structure.
if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) {
// We expand the bases without their virtual bases since those are going
// in the initial structure. Note that this differs from gcc which
// expands virtual bases each time one is encountered in the hierarchy,
// making the encoding type bigger than it really is.
getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false,
NotEncodedT);
assert(!base->isEmpty());
#ifndef NDEBUG
CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
#endif
} else {
FieldDecl *field = cast<FieldDecl>(dcl);
if (FD) {
S += '"';
S += field->getNameAsString();
S += '"';
}
if (field->isBitField()) {
EncodeBitField(this, S, field->getType(), field);
#ifndef NDEBUG
CurOffs += field->getBitWidthValue(*this);
#endif
} else {
QualType qt = field->getType();
getLegacyIntegralTypeEncoding(qt);
getObjCEncodingForTypeImpl(qt, S, false, true, FD,
/*OutermostType*/false,
/*EncodingProperty*/false,
/*StructField*/true,
false, false, false, NotEncodedT);
#ifndef NDEBUG
CurOffs += getTypeSize(field->getType());
#endif
}
}
}
}
void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
std::string& S) const {
if (QT & Decl::OBJC_TQ_In)
S += 'n';
if (QT & Decl::OBJC_TQ_Inout)
S += 'N';
if (QT & Decl::OBJC_TQ_Out)
S += 'o';
if (QT & Decl::OBJC_TQ_Bycopy)
S += 'O';
if (QT & Decl::OBJC_TQ_Byref)
S += 'R';
if (QT & Decl::OBJC_TQ_Oneway)
S += 'V';
}
TypedefDecl *ASTContext::getObjCIdDecl() const {
if (!ObjCIdDecl) {
QualType T = getObjCObjectType(ObjCBuiltinIdTy, { }, { });
T = getObjCObjectPointerType(T);
ObjCIdDecl = buildImplicitTypedef(T, "id");
}
return ObjCIdDecl;
}
TypedefDecl *ASTContext::getObjCSelDecl() const {
if (!ObjCSelDecl) {
QualType T = getPointerType(ObjCBuiltinSelTy);
ObjCSelDecl = buildImplicitTypedef(T, "SEL");
}
return ObjCSelDecl;
}
TypedefDecl *ASTContext::getObjCClassDecl() const {
if (!ObjCClassDecl) {
QualType T = getObjCObjectType(ObjCBuiltinClassTy, { }, { });
T = getObjCObjectPointerType(T);
ObjCClassDecl = buildImplicitTypedef(T, "Class");
}
return ObjCClassDecl;
}
ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
if (!ObjCProtocolClassDecl) {
ObjCProtocolClassDecl
= ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(),
&Idents.get("Protocol"),
/*typeParamList=*/nullptr,
/*PrevDecl=*/nullptr,
SourceLocation(), true);
}
return ObjCProtocolClassDecl;
}
//===----------------------------------------------------------------------===//
// __builtin_va_list Construction Functions
//===----------------------------------------------------------------------===//
static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
// typedef char* __builtin_va_list;
QualType T = Context->getPointerType(Context->CharTy);
return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
// typedef void* __builtin_va_list;
QualType T = Context->getPointerType(Context->VoidTy);
return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
static TypedefDecl *
CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
// struct __va_list
RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
if (Context->getLangOpts().CPlusPlus) {
// namespace std { struct __va_list {
NamespaceDecl *NS;
NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
Context->getTranslationUnitDecl(),
/*Inline*/ false, SourceLocation(),
SourceLocation(), &Context->Idents.get("std"),
/*PrevDecl*/ nullptr);
NS->setImplicit();
VaListTagDecl->setDeclContext(NS);
}
VaListTagDecl->startDefinition();
const size_t NumFields = 5;
QualType FieldTypes[NumFields];
const char *FieldNames[NumFields];
// void *__stack;
FieldTypes[0] = Context->getPointerType(Context->VoidTy);
FieldNames[0] = "__stack";
// void *__gr_top;
FieldTypes[1] = Context->getPointerType(Context->VoidTy);
FieldNames[1] = "__gr_top";
// void *__vr_top;
FieldTypes[2] = Context->getPointerType(Context->VoidTy);
FieldNames[2] = "__vr_top";
// int __gr_offs;
FieldTypes[3] = Context->IntTy;
FieldNames[3] = "__gr_offs";
// int __vr_offs;
FieldTypes[4] = Context->IntTy;
FieldNames[4] = "__vr_offs";
// Create fields
for (unsigned i = 0; i < NumFields; ++i) {
FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
VaListTagDecl,
SourceLocation(),
SourceLocation(),
&Context->Idents.get(FieldNames[i]),
FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
Context->VaListTagTy = VaListTagType;
// } __builtin_va_list;
return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list");
}
static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef struct __va_list_tag {
RecordDecl *VaListTagDecl;
VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
const size_t NumFields = 5;
QualType FieldTypes[NumFields];
const char *FieldNames[NumFields];
// unsigned char gpr;
FieldTypes[0] = Context->UnsignedCharTy;
FieldNames[0] = "gpr";
// unsigned char fpr;
FieldTypes[1] = Context->UnsignedCharTy;
FieldNames[1] = "fpr";
// unsigned short reserved;
FieldTypes[2] = Context->UnsignedShortTy;
FieldNames[2] = "reserved";
// void* overflow_arg_area;
FieldTypes[3] = Context->getPointerType(Context->VoidTy);
FieldNames[3] = "overflow_arg_area";
// void* reg_save_area;
FieldTypes[4] = Context->getPointerType(Context->VoidTy);
FieldNames[4] = "reg_save_area";
// Create fields
for (unsigned i = 0; i < NumFields; ++i) {
FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl,
SourceLocation(),
SourceLocation(),
&Context->Idents.get(FieldNames[i]),
FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
Context->VaListTagTy = VaListTagType;
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
QualType VaListTagTypedefType =
Context->getTypedefType(VaListTagTypedefDecl);
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
QualType VaListTagArrayType
= Context->getConstantArrayType(VaListTagTypedefType,
Size, ArrayType::Normal, 0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
static TypedefDecl *
CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef struct __va_list_tag {
RecordDecl *VaListTagDecl;
VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
const size_t NumFields = 4;
QualType FieldTypes[NumFields];
const char *FieldNames[NumFields];
// unsigned gp_offset;
FieldTypes[0] = Context->UnsignedIntTy;
FieldNames[0] = "gp_offset";
// unsigned fp_offset;
FieldTypes[1] = Context->UnsignedIntTy;
FieldNames[1] = "fp_offset";
// void* overflow_arg_area;
FieldTypes[2] = Context->getPointerType(Context->VoidTy);
FieldNames[2] = "overflow_arg_area";
// void* reg_save_area;
FieldTypes[3] = Context->getPointerType(Context->VoidTy);
FieldNames[3] = "reg_save_area";
// Create fields
for (unsigned i = 0; i < NumFields; ++i) {
FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
VaListTagDecl,
SourceLocation(),
SourceLocation(),
&Context->Idents.get(FieldNames[i]),
FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
Context->VaListTagTy = VaListTagType;
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
QualType VaListTagTypedefType =
Context->getTypedefType(VaListTagTypedefDecl);
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
QualType VaListTagArrayType
= Context->getConstantArrayType(VaListTagTypedefType,
Size, ArrayType::Normal,0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef int __builtin_va_list[4];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4);
QualType IntArrayType
= Context->getConstantArrayType(Context->IntTy,
Size, ArrayType::Normal, 0);
return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list");
}
static TypedefDecl *
CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
// struct __va_list
RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list");
if (Context->getLangOpts().CPlusPlus) {
// namespace std { struct __va_list {
NamespaceDecl *NS;
NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
Context->getTranslationUnitDecl(),
/*Inline*/false, SourceLocation(),
SourceLocation(), &Context->Idents.get("std"),
/*PrevDecl*/ nullptr);
NS->setImplicit();
VaListDecl->setDeclContext(NS);
}
VaListDecl->startDefinition();
// void * __ap;
FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
VaListDecl,
SourceLocation(),
SourceLocation(),
&Context->Idents.get("__ap"),
Context->getPointerType(Context->VoidTy),
/*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
VaListDecl->addDecl(Field);
// };
VaListDecl->completeDefinition();
// typedef struct __va_list __builtin_va_list;
QualType T = Context->getRecordType(VaListDecl);
return Context->buildImplicitTypedef(T, "__builtin_va_list");
}
static TypedefDecl *
CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
// typedef struct __va_list_tag {
RecordDecl *VaListTagDecl;
VaListTagDecl = Context->buildImplicitRecord("__va_list_tag");
VaListTagDecl->startDefinition();
const size_t NumFields = 4;
QualType FieldTypes[NumFields];
const char *FieldNames[NumFields];
// long __gpr;
FieldTypes[0] = Context->LongTy;
FieldNames[0] = "__gpr";
// long __fpr;
FieldTypes[1] = Context->LongTy;
FieldNames[1] = "__fpr";
// void *__overflow_arg_area;
FieldTypes[2] = Context->getPointerType(Context->VoidTy);
FieldNames[2] = "__overflow_arg_area";
// void *__reg_save_area;
FieldTypes[3] = Context->getPointerType(Context->VoidTy);
FieldNames[3] = "__reg_save_area";
// Create fields
for (unsigned i = 0; i < NumFields; ++i) {
FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
VaListTagDecl,
SourceLocation(),
SourceLocation(),
&Context->Idents.get(FieldNames[i]),
FieldTypes[i], /*TInfo=*/nullptr,
/*BitWidth=*/nullptr,
/*Mutable=*/false,
ICIS_NoInit);
Field->setAccess(AS_public);
VaListTagDecl->addDecl(Field);
}
VaListTagDecl->completeDefinition();
QualType VaListTagType = Context->getRecordType(VaListTagDecl);
Context->VaListTagTy = VaListTagType;
// } __va_list_tag;
TypedefDecl *VaListTagTypedefDecl =
Context->buildImplicitTypedef(VaListTagType, "__va_list_tag");
QualType VaListTagTypedefType =
Context->getTypedefType(VaListTagTypedefDecl);
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
QualType VaListTagArrayType
= Context->getConstantArrayType(VaListTagTypedefType,
Size, ArrayType::Normal,0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
TargetInfo::BuiltinVaListKind Kind) {
switch (Kind) {
case TargetInfo::CharPtrBuiltinVaList:
return CreateCharPtrBuiltinVaListDecl(Context);
case TargetInfo::VoidPtrBuiltinVaList:
return CreateVoidPtrBuiltinVaListDecl(Context);
case TargetInfo::AArch64ABIBuiltinVaList:
return CreateAArch64ABIBuiltinVaListDecl(Context);
case TargetInfo::PowerABIBuiltinVaList:
return CreatePowerABIBuiltinVaListDecl(Context);
case TargetInfo::X86_64ABIBuiltinVaList:
return CreateX86_64ABIBuiltinVaListDecl(Context);
case TargetInfo::PNaClABIBuiltinVaList:
return CreatePNaClABIBuiltinVaListDecl(Context);
case TargetInfo::AAPCSABIBuiltinVaList:
return CreateAAPCSABIBuiltinVaListDecl(Context);
case TargetInfo::SystemZBuiltinVaList:
return CreateSystemZBuiltinVaListDecl(Context);
}
llvm_unreachable("Unhandled __builtin_va_list type kind");
}
TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
if (!BuiltinVaListDecl) {
BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind());
assert(BuiltinVaListDecl->isImplicit());
}
return BuiltinVaListDecl;
}
QualType ASTContext::getVaListTagType() const {
// Force the creation of VaListTagTy by building the __builtin_va_list
// declaration.
if (VaListTagTy.isNull())
(void) getBuiltinVaListDecl();
return VaListTagTy;
}
void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
assert(ObjCConstantStringType.isNull() &&
"'NSConstantString' type already set!");
ObjCConstantStringType = getObjCInterfaceType(Decl);
}
/// \brief Retrieve the template name that corresponds to a non-empty
/// lookup.
TemplateName
ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
UnresolvedSetIterator End) const {
unsigned size = End - Begin;
assert(size > 1 && "set is not overloaded!");
void *memory = Allocate(sizeof(OverloadedTemplateStorage) +
size * sizeof(FunctionTemplateDecl*));
OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size);
NamedDecl **Storage = OT->getStorage();
for (UnresolvedSetIterator I = Begin; I != End; ++I) {
NamedDecl *D = *I;
assert(isa<FunctionTemplateDecl>(D) ||
(isa<UsingShadowDecl>(D) &&
isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
*Storage++ = D;
}
return TemplateName(OT);
}
/// \brief Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
TemplateName
ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
bool TemplateKeyword,
TemplateDecl *Template) const {
assert(NNS && "Missing nested-name-specifier in qualified template name");
// FIXME: Canonicalization?
llvm::FoldingSetNodeID ID;
QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
void *InsertPos = nullptr;
QualifiedTemplateName *QTN =
QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
if (!QTN) {
QTN = new (*this, llvm::alignOf<QualifiedTemplateName>())
QualifiedTemplateName(NNS, TemplateKeyword, Template);
QualifiedTemplateNames.InsertNode(QTN, InsertPos);
}
return TemplateName(QTN);
}
/// \brief Retrieve the template name that represents a dependent
/// template name such as \c MetaFun::template apply.
TemplateName
ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
const IdentifierInfo *Name) const {
assert((!NNS || NNS->isDependent()) &&
"Nested name specifier must be dependent");
llvm::FoldingSetNodeID ID;
DependentTemplateName::Profile(ID, NNS, Name);
void *InsertPos = nullptr;
DependentTemplateName *QTN =
DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
if (QTN)
return TemplateName(QTN);
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
if (CanonNNS == NNS) {
QTN = new (*this, llvm::alignOf<DependentTemplateName>())
DependentTemplateName(NNS, Name);
} else {
TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
QTN = new (*this, llvm::alignOf<DependentTemplateName>())
DependentTemplateName(NNS, Name, Canon);
DependentTemplateName *CheckQTN =
DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
assert(!CheckQTN && "Dependent type name canonicalization broken");
(void)CheckQTN;
}
DependentTemplateNames.InsertNode(QTN, InsertPos);
return TemplateName(QTN);
}
/// \brief Retrieve the template name that represents a dependent
/// template name such as \c MetaFun::template operator+.
TemplateName
ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
OverloadedOperatorKind Operator) const {
assert((!NNS || NNS->isDependent()) &&
"Nested name specifier must be dependent");
llvm::FoldingSetNodeID ID;
DependentTemplateName::Profile(ID, NNS, Operator);
void *InsertPos = nullptr;
DependentTemplateName *QTN
= DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
if (QTN)
return TemplateName(QTN);
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
if (CanonNNS == NNS) {
QTN = new (*this, llvm::alignOf<DependentTemplateName>())
DependentTemplateName(NNS, Operator);
} else {
TemplateName Canon = getDependentTemplateName(CanonNNS, Operator);
QTN = new (*this, llvm::alignOf<DependentTemplateName>())
DependentTemplateName(NNS, Operator, Canon);
DependentTemplateName *CheckQTN
= DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
assert(!CheckQTN && "Dependent template name canonicalization broken");
(void)CheckQTN;
}
DependentTemplateNames.InsertNode(QTN, InsertPos);
return TemplateName(QTN);
}
TemplateName
ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
TemplateName replacement) const {
llvm::FoldingSetNodeID ID;
SubstTemplateTemplateParmStorage::Profile(ID, param, replacement);
void *insertPos = nullptr;
SubstTemplateTemplateParmStorage *subst
= SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos);
if (!subst) {
subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement);
SubstTemplateTemplateParms.InsertNode(subst, insertPos);
}
return TemplateName(subst);
}
TemplateName
ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
const TemplateArgument &ArgPack) const {
ASTContext &Self = const_cast<ASTContext &>(*this);
llvm::FoldingSetNodeID ID;
SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
void *InsertPos = nullptr;
SubstTemplateTemplateParmPackStorage *Subst
= SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
if (!Subst) {
Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param,
ArgPack.pack_size(),
ArgPack.pack_begin());
SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
}
return TemplateName(Subst);
}
/// getFromTargetType - Given one of the integer types provided by
/// TargetInfo, produce the corresponding type. The unsigned @p Type
/// is actually a value of type @c TargetInfo::IntType.
CanQualType ASTContext::getFromTargetType(unsigned Type) const {
switch (Type) {
case TargetInfo::NoInt: return CanQualType();
case TargetInfo::SignedChar: return SignedCharTy;
case TargetInfo::UnsignedChar: return UnsignedCharTy;
case TargetInfo::SignedShort: return ShortTy;
case TargetInfo::UnsignedShort: return UnsignedShortTy;
case TargetInfo::SignedInt: return IntTy;
case TargetInfo::UnsignedInt: return UnsignedIntTy;
case TargetInfo::SignedLong: return LongTy;
case TargetInfo::UnsignedLong: return UnsignedLongTy;
case TargetInfo::SignedLongLong: return LongLongTy;
case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
}
llvm_unreachable("Unhandled TargetInfo::IntType value");
}
//===----------------------------------------------------------------------===//
// Type Predicates.
//===----------------------------------------------------------------------===//
/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
/// garbage collection attribute.
///
Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
if (getLangOpts().getGC() == LangOptions::NonGC)
return Qualifiers::GCNone;
assert(getLangOpts().ObjC1);
Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
// Default behaviour under objective-C's gc is for ObjC pointers
// (or pointers to them) be treated as though they were declared
// as __strong.
if (GCAttrs == Qualifiers::GCNone) {
if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
return Qualifiers::Strong;
else if (Ty->isPointerType())
return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType());
} else {
// It's not valid to set GC attributes on anything that isn't a
// pointer.
#ifndef NDEBUG
QualType CT = Ty->getCanonicalTypeInternal();
while (const ArrayType *AT = dyn_cast<ArrayType>(CT))
CT = AT->getElementType();
assert(CT->isAnyPointerType() || CT->isBlockPointerType());
#endif
}
return GCAttrs;
}
//===----------------------------------------------------------------------===//
// Type Compatibility Testing
//===----------------------------------------------------------------------===//
/// areCompatVectorTypes - Return true if the two specified vector types are
/// compatible.
static bool areCompatVectorTypes(const VectorType *LHS,
const VectorType *RHS) {
assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
return LHS->getElementType() == RHS->getElementType() &&
LHS->getNumElements() == RHS->getNumElements();
}
bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
QualType SecondVec) {
assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
if (hasSameUnqualifiedType(FirstVec, SecondVec))
return true;
// Treat Neon vector types and most AltiVec vector types as if they are the
// equivalent GCC vector types.
const VectorType *First = FirstVec->getAs<VectorType>();
const VectorType *Second = SecondVec->getAs<VectorType>();
if (First->getNumElements() == Second->getNumElements() &&
hasSameType(First->getElementType(), Second->getElementType()) &&
First->getVectorKind() != VectorType::AltiVecPixel &&
First->getVectorKind() != VectorType::AltiVecBool &&
Second->getVectorKind() != VectorType::AltiVecPixel &&
Second->getVectorKind() != VectorType::AltiVecBool)
return true;
return false;
}
//===----------------------------------------------------------------------===//
// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
//===----------------------------------------------------------------------===//
/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
/// inheritance hierarchy of 'rProto'.
bool
ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
ObjCProtocolDecl *rProto) const {
if (declaresSameEntity(lProto, rProto))
return true;
for (auto *PI : rProto->protocols())
if (ProtocolCompatibleWithProtocol(lProto, PI))
return true;
return false;
}
/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
/// Class<pr1, ...>.
bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs,
QualType rhs) {
const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible");
for (auto *lhsProto : lhsQID->quals()) {
bool match = false;
for (auto *rhsProto : rhsOPT->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
match = true;
break;
}
}
if (!match)
return false;
}
return true;
}
/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
/// ObjCQualifiedIDType.
bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
bool compare) {
// Allow id<P..> and an 'id' or void* type in all cases.
if (lhs->isVoidPointerType() ||
lhs->isObjCIdType() || lhs->isObjCClassType())
return true;
else if (rhs->isVoidPointerType() ||
rhs->isObjCIdType() || rhs->isObjCClassType())
return true;
if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) {
const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
if (!rhsOPT) return false;
if (rhsOPT->qual_empty()) {
// If the RHS is a unqualified interface pointer "NSString*",
// make sure we check the class hierarchy.
if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
for (auto *I : lhsQID->quals()) {
// when comparing an id<P> on lhs with a static type on rhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
if (!rhsID->ClassImplementsProtocol(I, true))
return false;
}
}
// If there are no qualifiers and no interface, we have an 'id'.
return true;
}
// Both the right and left sides have qualifiers.
for (auto *lhsProto : lhsQID->quals()) {
bool match = false;
// when comparing an id<P> on lhs with a static type on rhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
for (auto *rhsProto : rhsOPT->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
(compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
match = true;
break;
}
}
// If the RHS is a qualified interface pointer "NSString<P>*",
// make sure we check the class hierarchy.
if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
for (auto *I : lhsQID->quals()) {
// when comparing an id<P> on lhs with a static type on rhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
if (rhsID->ClassImplementsProtocol(I, true)) {
match = true;
break;
}
}
}
if (!match)
return false;
}
return true;
}
const ObjCObjectPointerType *rhsQID = rhs->getAsObjCQualifiedIdType();
assert(rhsQID && "One of the LHS/RHS should be id<x>");
if (const ObjCObjectPointerType *lhsOPT =
lhs->getAsObjCInterfacePointerType()) {
// If both the right and left sides have qualifiers.
for (auto *lhsProto : lhsOPT->quals()) {
bool match = false;
// when comparing an id<P> on rhs with a static type on lhs,
// see if static class implements all of id's protocols, directly or
// through its super class and categories.
// First, lhs protocols in the qualifier list must be found, direct
// or indirect in rhs's qualifier list or it is a mismatch.
for (auto *rhsProto : rhsQID->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
(compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
match = true;
break;
}
}
if (!match)
return false;
}
// Static class's protocols, or its super class or category protocols
// must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) {
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
CollectInheritedProtocols(lhsID, LHSInheritedProtocols);
// This is rather dubious but matches gcc's behavior. If lhs has
// no type qualifier and its class has no static protocol(s)
// assume that it is mismatch.
if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty())
return false;
for (auto *lhsProto : LHSInheritedProtocols) {
bool match = false;
for (auto *rhsProto : rhsQID->quals()) {
if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
(compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
match = true;
break;
}
}
if (!match)
return false;
}
}
return true;
}
return false;
}
/// canAssignObjCInterfaces - Return true if the two interface types are
/// compatible for assignment from RHS to LHS. This handles validation of any
/// protocol qualifiers on the LHS or RHS.
///
bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
const ObjCObjectPointerType *RHSOPT) {
const ObjCObjectType* LHS = LHSOPT->getObjectType();
const ObjCObjectType* RHS = RHSOPT->getObjectType();
// If either type represents the built-in 'id' or 'Class' types, return true.
if (LHS->isObjCUnqualifiedIdOrClass() ||
RHS->isObjCUnqualifiedIdOrClass())
return true;
// Function object that propagates a successful result or handles
// __kindof types.
auto finish = [&](bool succeeded) -> bool {
if (succeeded)
return true;
if (!RHS->isKindOfType())
return false;
// Strip off __kindof and protocol qualifiers, then check whether
// we can assign the other way.
return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this),
LHSOPT->stripObjCKindOfTypeAndQuals(*this));
};
if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
return finish(ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
QualType(RHSOPT,0),
false));
}
if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
return finish(ObjCQualifiedClassTypesAreCompatible(QualType(LHSOPT,0),
QualType(RHSOPT,0)));
}
// If we have 2 user-defined types, fall into that path.
if (LHS->getInterface() && RHS->getInterface()) {
return finish(canAssignObjCInterfaces(LHS, RHS));
}
return false;
}
/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
/// for providing type-safety for objective-c pointers used to pass/return
/// arguments in block literals. When passed as arguments, passing 'A*' where
/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
/// not OK. For the return type, the opposite is not OK.
bool ASTContext::canAssignObjCInterfacesInBlockPointer(
const ObjCObjectPointerType *LHSOPT,
const ObjCObjectPointerType *RHSOPT,
bool BlockReturnType) {
// Function object that propagates a successful result or handles
// __kindof types.
auto finish = [&](bool succeeded) -> bool {
if (succeeded)
return true;
const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
if (!Expected->isKindOfType())
return false;
// Strip off __kindof and protocol qualifiers, then check whether
// we can assign the other way.
return canAssignObjCInterfacesInBlockPointer(
RHSOPT->stripObjCKindOfTypeAndQuals(*this),
LHSOPT->stripObjCKindOfTypeAndQuals(*this),
BlockReturnType);
};
if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
return true;
if (LHSOPT->isObjCBuiltinType()) {
return finish(RHSOPT->isObjCBuiltinType() ||
RHSOPT->isObjCQualifiedIdType());
}
if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
return finish(ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
QualType(RHSOPT,0),
false));
const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
if (LHS && RHS) { // We have 2 user-defined types.
if (LHS != RHS) {
if (LHS->getDecl()->isSuperClassOf(RHS->getDecl()))
return finish(BlockReturnType);
if (RHS->getDecl()->isSuperClassOf(LHS->getDecl()))
return finish(!BlockReturnType);
}
else
return true;
}
return false;
}
/// Comparison routine for Objective-C protocols to be used with
/// llvm::array_pod_sort.
static int __cdecl compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, // HLSL Change - __cdecl
ObjCProtocolDecl * const *rhs) {
return (*lhs)->getName().compare((*rhs)->getName());
}
/// getIntersectionOfProtocols - This routine finds the intersection of set
/// of protocols inherited from two distinct objective-c pointer objects with
/// the given common base.
/// It is used to build composite qualifier list of the composite type of
/// the conditional expression involving two objective-c pointer objects.
static
void getIntersectionOfProtocols(ASTContext &Context,
const ObjCInterfaceDecl *CommonBase,
const ObjCObjectPointerType *LHSOPT,
const ObjCObjectPointerType *RHSOPT,
SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
const ObjCObjectType* LHS = LHSOPT->getObjectType();
const ObjCObjectType* RHS = RHSOPT->getObjectType();
assert(LHS->getInterface() && "LHS must have an interface base");
assert(RHS->getInterface() && "RHS must have an interface base");
// Add all of the protocols for the LHS.
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
// Start with the protocol qualifiers.
for (auto proto : LHS->quals()) {
Context.CollectInheritedProtocols(proto, LHSProtocolSet);
}
// Also add the protocols associated with the LHS interface.
Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet);
// Add all of the protocls for the RHS.
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
// Start with the protocol qualifiers.
for (auto proto : RHS->quals()) {
Context.CollectInheritedProtocols(proto, RHSProtocolSet);
}
// Also add the protocols associated with the RHS interface.
Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet);
// Compute the intersection of the collected protocol sets.
for (auto proto : LHSProtocolSet) {
if (RHSProtocolSet.count(proto))
IntersectionSet.push_back(proto);
}
// Compute the set of protocols that is implied by either the common type or
// the protocols within the intersection.
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols);
// Remove any implied protocols from the list of inherited protocols.
if (!ImpliedProtocols.empty()) {
IntersectionSet.erase(
std::remove_if(IntersectionSet.begin(),
IntersectionSet.end(),
[&](ObjCProtocolDecl *proto) -> bool {
return ImpliedProtocols.count(proto) > 0;
}),
IntersectionSet.end());
}
// Sort the remaining protocols by name.
llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(),
compareObjCProtocolsByName);
}
/// Determine whether the first type is a subtype of the second.
static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
QualType rhs) {
// Common case: two object pointers.
const ObjCObjectPointerType *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
if (lhsOPT && rhsOPT)
return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT);
// Two block pointers.
const BlockPointerType *lhsBlock = lhs->getAs<BlockPointerType>();
const BlockPointerType *rhsBlock = rhs->getAs<BlockPointerType>();
if (lhsBlock && rhsBlock)
return ctx.typesAreBlockPointerCompatible(lhs, rhs);
// If either is an unqualified 'id' and the other is a block, it's
// acceptable.
if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
(rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
return true;
return false;
}
// Check that the given Objective-C type argument lists are equivalent.
static bool sameObjCTypeArgs(ASTContext &ctx,
const ObjCInterfaceDecl *iface,
ArrayRef<QualType> lhsArgs,
ArrayRef<QualType> rhsArgs,
bool stripKindOf) {
if (lhsArgs.size() != rhsArgs.size())
return false;
ObjCTypeParamList *typeParams = iface->getTypeParamList();
for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
if (ctx.hasSameType(lhsArgs[i], rhsArgs[i]))
continue;
switch (typeParams->begin()[i]->getVariance()) {
case ObjCTypeParamVariance::Invariant:
if (!stripKindOf ||
!ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx),
rhsArgs[i].stripObjCKindOfType(ctx))) {
return false;
}
break;
case ObjCTypeParamVariance::Covariant:
if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i]))
return false;
break;
case ObjCTypeParamVariance::Contravariant:
if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i]))
return false;
break;
}
}
return true;
}
QualType ASTContext::areCommonBaseCompatible(
const ObjCObjectPointerType *Lptr,
const ObjCObjectPointerType *Rptr) {
const ObjCObjectType *LHS = Lptr->getObjectType();
const ObjCObjectType *RHS = Rptr->getObjectType();
const ObjCInterfaceDecl* LDecl = LHS->getInterface();
const ObjCInterfaceDecl* RDecl = RHS->getInterface();
if (!LDecl || !RDecl)
return QualType();
// Follow the left-hand side up the class hierarchy until we either hit a
// root or find the RHS. Record the ancestors in case we don't find it.
llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
LHSAncestors;
while (true) {
// Record this ancestor. We'll need this if the common type isn't in the
// path from the LHS to the root.
LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
if (declaresSameEntity(LHS->getInterface(), RDecl)) {
// Get the type arguments.
ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
bool anyChanges = false;
if (LHS->isSpecialized() && RHS->isSpecialized()) {
// Both have type arguments, compare them.
if (!sameObjCTypeArgs(*this, LHS->getInterface(),
LHS->getTypeArgs(), RHS->getTypeArgs(),
/*stripKindOf=*/true))
return QualType();
} else if (LHS->isSpecialized() != RHS->isSpecialized()) {
// If only one has type arguments, the result will not have type
// arguments.
LHSTypeArgs = { };
anyChanges = true;
}
// Compute the intersection of protocols.
SmallVector<ObjCProtocolDecl *, 8> Protocols;
getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr,
Protocols);
if (!Protocols.empty())
anyChanges = true;
// If anything in the LHS will have changed, build a new result type.
if (anyChanges) {
QualType Result = getObjCInterfaceType(LHS->getInterface());
Result = getObjCObjectType(Result, LHSTypeArgs, Protocols,
LHS->isKindOfType());
return getObjCObjectPointerType(Result);
}
return getObjCObjectPointerType(QualType(LHS, 0));
}
// Find the superclass.
QualType LHSSuperType = LHS->getSuperClassType();
if (LHSSuperType.isNull())
break;
LHS = LHSSuperType->castAs<ObjCObjectType>();
}
// We didn't find anything by following the LHS to its root; now check
// the RHS against the cached set of ancestors.
while (true) {
auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl());
if (KnownLHS != LHSAncestors.end()) {
LHS = KnownLHS->second;
// Get the type arguments.
ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
bool anyChanges = false;
if (LHS->isSpecialized() && RHS->isSpecialized()) {
// Both have type arguments, compare them.
if (!sameObjCTypeArgs(*this, LHS->getInterface(),
LHS->getTypeArgs(), RHS->getTypeArgs(),
/*stripKindOf=*/true))
return QualType();
} else if (LHS->isSpecialized() != RHS->isSpecialized()) {
// If only one has type arguments, the result will not have type
// arguments.
RHSTypeArgs = { };
anyChanges = true;
}
// Compute the intersection of protocols.
SmallVector<ObjCProtocolDecl *, 8> Protocols;
getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr,
Protocols);
if (!Protocols.empty())
anyChanges = true;
if (anyChanges) {
QualType Result = getObjCInterfaceType(RHS->getInterface());
Result = getObjCObjectType(Result, RHSTypeArgs, Protocols,
RHS->isKindOfType());
return getObjCObjectPointerType(Result);
}
return getObjCObjectPointerType(QualType(RHS, 0));
}
// Find the superclass of the RHS.
QualType RHSSuperType = RHS->getSuperClassType();
if (RHSSuperType.isNull())
break;
RHS = RHSSuperType->castAs<ObjCObjectType>();
}
return QualType();
}
bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
const ObjCObjectType *RHS) {
assert(LHS->getInterface() && "LHS is not an interface type");
assert(RHS->getInterface() && "RHS is not an interface type");
// Verify that the base decls are compatible: the RHS must be a subclass of
// the LHS.
ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface());
if (!IsSuperClass)
return false;
// If the LHS has protocol qualifiers, determine whether all of them are
// satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
// LHS).
if (LHS->getNumProtocols() > 0) {
// OK if conversion of LHS to SuperClass results in narrowing of types
// ; i.e., SuperClass may implement at least one of the protocols
// in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
// But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
// Also, if RHS has explicit quelifiers, include them for comparing with LHS's
// qualifiers.
for (auto *RHSPI : RHS->quals())
CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols);
// If there is no protocols associated with RHS, it is not a match.
if (SuperClassInheritedProtocols.empty())
return false;
for (const auto *LHSProto : LHS->quals()) {
bool SuperImplementsProtocol = false;
for (auto *SuperClassProto : SuperClassInheritedProtocols)
if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
SuperImplementsProtocol = true;
break;
}
if (!SuperImplementsProtocol)
return false;
}
}
// If the LHS is specialized, we may need to check type arguments.
if (LHS->isSpecialized()) {
// Follow the superclass chain until we've matched the LHS class in the
// hierarchy. This substitutes type arguments through.
const ObjCObjectType *RHSSuper = RHS;
while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface))
RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
// If the RHS is specializd, compare type arguments.
if (RHSSuper->isSpecialized() &&
!sameObjCTypeArgs(*this, LHS->getInterface(),
LHS->getTypeArgs(), RHSSuper->getTypeArgs(),
/*stripKindOf=*/true)) {
return false;
}
}
return true;
}
bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
// get the "pointed to" types
const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
if (!LHSOPT || !RHSOPT)
return false;
return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
canAssignObjCInterfaces(RHSOPT, LHSOPT);
}
bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
return canAssignObjCInterfaces(
getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(),
getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>());
}
/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
/// both shall have the identically qualified version of a compatible type.
/// C99 6.2.7p1: Two types have compatible types if their types are the
/// same. See 6.7.[2,3,5] for additional rules.
bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
bool CompareUnqualified) {
if (getLangOpts().CPlusPlus)
return hasSameType(LHS, RHS);
return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
}
bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
return typesAreCompatible(LHS, RHS);
}
bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
return !mergeTypes(LHS, RHS, true).isNull();
}
/// mergeTransparentUnionType - if T is a transparent union type and a member
/// of T is compatible with SubType, return the merged type, else return
/// QualType()
QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
bool OfBlockPointer,
bool Unqualified) {
if (const RecordType *UT = T->getAsUnionType()) {
RecordDecl *UD = UT->getDecl();
if (UD->hasAttr<TransparentUnionAttr>()) {
for (const auto *I : UD->fields()) {
QualType ET = I->getType().getUnqualifiedType();
QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
if (!MT.isNull())
return MT;
}
}
}
return QualType();
}
/// mergeFunctionParameterTypes - merge two types which appear as function
/// parameter types
QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
bool OfBlockPointer,
bool Unqualified) {
// GNU extension: two types are compatible if they appear as a function
// argument, one of the types is a transparent union type and the other
// type is compatible with a union member
QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer,
Unqualified);
if (!lmerge.isNull())
return lmerge;
QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer,
Unqualified);
if (!rmerge.isNull())
return rmerge;
return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
}
QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
bool OfBlockPointer,
bool Unqualified) {
const FunctionType *lbase = lhs->getAs<FunctionType>();
const FunctionType *rbase = rhs->getAs<FunctionType>();
const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase);
const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase);
bool allLTypes = true;
bool allRTypes = true;
// Check return type
QualType retType;
if (OfBlockPointer) {
QualType RHS = rbase->getReturnType();
QualType LHS = lbase->getReturnType();
bool UnqualifiedResult = Unqualified;
if (!UnqualifiedResult)
UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true);
}
else
retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false,
Unqualified);
if (retType.isNull()) return QualType();
if (Unqualified)
retType = retType.getUnqualifiedType();
CanQualType LRetType = getCanonicalType(lbase->getReturnType());
CanQualType RRetType = getCanonicalType(rbase->getReturnType());
if (Unqualified) {
LRetType = LRetType.getUnqualifiedType();
RRetType = RRetType.getUnqualifiedType();
}
if (getCanonicalType(retType) != LRetType)
allLTypes = false;
if (getCanonicalType(retType) != RRetType)
allRTypes = false;
// FIXME: double check this
// FIXME: should we error if lbase->getRegParmAttr() != 0 &&
// rbase->getRegParmAttr() != 0 &&
// lbase->getRegParmAttr() != rbase->getRegParmAttr()?
FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
// Compatible functions must have compatible calling conventions
if (lbaseInfo.getCC() != rbaseInfo.getCC())
return QualType();
// Regparm is part of the calling convention.
if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
return QualType();
if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
return QualType();
if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
return QualType();
// FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
if (lbaseInfo.getNoReturn() != NoReturn)
allLTypes = false;
if (rbaseInfo.getNoReturn() != NoReturn)
allRTypes = false;
FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
if (lproto && rproto) { // two C99 style function prototypes
assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
"C++ shouldn't be here");
// Compatible functions must have the same number of parameters
if (lproto->getNumParams() != rproto->getNumParams())
return QualType();
// Variadic and non-variadic functions aren't compatible
if (lproto->isVariadic() != rproto->isVariadic())
return QualType();
if (lproto->getTypeQuals() != rproto->getTypeQuals())
return QualType();
#if 0 // HLSL Change Starts - no ObjC support
if (LangOpts.ObjCAutoRefCount &&
!FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto))
return QualType();
#endif // HLSL Change Ends - no ObjC support
// Check parameter type compatibility
SmallVector<QualType, 10> types;
for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
QualType paramType = mergeFunctionParameterTypes(
lParamType, rParamType, OfBlockPointer, Unqualified);
if (paramType.isNull())
return QualType();
if (Unqualified)
paramType = paramType.getUnqualifiedType();
types.push_back(paramType);
if (Unqualified) {
lParamType = lParamType.getUnqualifiedType();
rParamType = rParamType.getUnqualifiedType();
}
if (getCanonicalType(paramType) != getCanonicalType(lParamType))
allLTypes = false;
if (getCanonicalType(paramType) != getCanonicalType(rParamType))
allRTypes = false;
}
if (allLTypes) return lhs;
if (allRTypes) return rhs;
FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
EPI.ExtInfo = einfo;
return getFunctionType(retType, types, EPI, lproto->getParamMods());
}
if (lproto) allRTypes = false;
if (rproto) allLTypes = false;
const FunctionProtoType *proto = lproto ? lproto : rproto;
if (proto) {
assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
if (proto->isVariadic()) return QualType();
// Check that the types are compatible with the types that
// would result from default argument promotions (C99 6.7.5.3p15).
// The only types actually affected are promotable integer
// types and floats, which would be passed as a different
// type depending on whether the prototype is visible.
for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
QualType paramTy = proto->getParamType(i);
// Look at the converted type of enum types, since that is the type used
// to pass enum values.
if (const EnumType *Enum = paramTy->getAs<EnumType>()) {
paramTy = Enum->getDecl()->getIntegerType();
if (paramTy.isNull())
return QualType();
}
if (paramTy->isPromotableIntegerType() ||
getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
return QualType();
}
if (allLTypes) return lhs;
if (allRTypes) return rhs;
FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
EPI.ExtInfo = einfo;
return getFunctionType(retType, proto->getParamTypes(), EPI, proto->getParamMods());
}
if (allLTypes) return lhs;
if (allRTypes) return rhs;
return getFunctionNoProtoType(retType, einfo);
}
/// Given that we have an enum type and a non-enum type, try to merge them.
static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
QualType other, bool isBlockReturnType) {
// C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
// a signed integer type, or an unsigned integer type.
// Compatibility is based on the underlying type, not the promotion
// type.
QualType underlyingType = ET->getDecl()->getIntegerType();
if (underlyingType.isNull()) return QualType();
if (Context.hasSameType(underlyingType, other))
return other;
// In block return types, we're more permissive and accept any
// integral type of the same size.
if (isBlockReturnType && other->isIntegerType() &&
Context.getTypeSize(underlyingType) == Context.getTypeSize(other))
return other;
return QualType();
}
QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
bool OfBlockPointer,
bool Unqualified, bool BlockReturnType) {
// C++ [expr]: If an expression initially has the type "reference to T", the
// type is adjusted to "T" prior to any further analysis, the expression
// designates the object or function denoted by the reference, and the
// expression is an lvalue unless the reference is an rvalue reference and
// the expression is a function call (possibly inside parentheses).
assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?");
assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?");
if (Unqualified) {
LHS = LHS.getUnqualifiedType();
RHS = RHS.getUnqualifiedType();
}
QualType LHSCan = getCanonicalType(LHS),
RHSCan = getCanonicalType(RHS);
// If two types are identical, they are compatible.
if (LHSCan == RHSCan)
return LHS;
// If the qualifiers are different, the types aren't compatible... mostly.
Qualifiers LQuals = LHSCan.getLocalQualifiers();
Qualifiers RQuals = RHSCan.getLocalQualifiers();
if (LQuals != RQuals) {
// If any of these qualifiers are different, we have a type
// mismatch.
if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
LQuals.getObjCLifetime() != RQuals.getObjCLifetime())
return QualType();
// Exactly one GC qualifier difference is allowed: __strong is
// okay if the other type has no GC qualifier but is an Objective
// C object pointer (i.e. implicitly strong by default). We fix
// this by pretending that the unqualified type was actually
// qualified __strong.
Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
return QualType();
if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong));
}
if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS);
}
return QualType();
}
// Okay, qualifiers are equal.
Type::TypeClass LHSClass = LHSCan->getTypeClass();
Type::TypeClass RHSClass = RHSCan->getTypeClass();
// We want to consider the two function types to be the same for these
// comparisons, just force one to the other.
if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
// Same as above for arrays
if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
LHSClass = Type::ConstantArray;
if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
RHSClass = Type::ConstantArray;
// ObjCInterfaces are just specialized ObjCObjects.
if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
// Canonicalize ExtVector -> Vector.
if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
// If the canonical type classes don't match.
if (LHSClass != RHSClass) {
// Note that we only have special rules for turning block enum
// returns into block int returns, not vice-versa.
if (const EnumType* ETy = LHS->getAs<EnumType>()) {
return mergeEnumWithInteger(*this, ETy, RHS, false);
}
if (const EnumType* ETy = RHS->getAs<EnumType>()) {
return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType);
}
// allow block pointer type to match an 'id' type.
if (OfBlockPointer && !BlockReturnType) {
if (LHS->isObjCIdType() && RHS->isBlockPointerType())
return LHS;
if (RHS->isObjCIdType() && LHS->isBlockPointerType())
return RHS;
}
return QualType();
}
// The canonical type classes match.
switch (LHSClass) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("Non-canonical and dependent types shouldn't get here");
case Type::Auto:
case Type::LValueReference:
case Type::RValueReference:
case Type::MemberPointer:
llvm_unreachable("C++ should never be in mergeTypes");
case Type::ObjCInterface:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::FunctionProto:
case Type::ExtVector:
llvm_unreachable("Types are eliminated above");
case Type::Pointer:
{
// Merge two pointer types, while trying to preserve typedef info
QualType LHSPointee = LHS->getAs<PointerType>()->getPointeeType();
QualType RHSPointee = RHS->getAs<PointerType>()->getPointeeType();
if (Unqualified) {
LHSPointee = LHSPointee.getUnqualifiedType();
RHSPointee = RHSPointee.getUnqualifiedType();
}
QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false,
Unqualified);
if (ResultType.isNull()) return QualType();
if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
return LHS;
if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
return RHS;
return getPointerType(ResultType);
}
case Type::BlockPointer:
{
// Merge two block pointer types, while trying to preserve typedef info
QualType LHSPointee = LHS->getAs<BlockPointerType>()->getPointeeType();
QualType RHSPointee = RHS->getAs<BlockPointerType>()->getPointeeType();
if (Unqualified) {
LHSPointee = LHSPointee.getUnqualifiedType();
RHSPointee = RHSPointee.getUnqualifiedType();
}
QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer,
Unqualified);
if (ResultType.isNull()) return QualType();
if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
return LHS;
if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
return RHS;
return getBlockPointerType(ResultType);
}
case Type::Atomic:
{
// Merge two pointer types, while trying to preserve typedef info
QualType LHSValue = LHS->getAs<AtomicType>()->getValueType();
QualType RHSValue = RHS->getAs<AtomicType>()->getValueType();
if (Unqualified) {
LHSValue = LHSValue.getUnqualifiedType();
RHSValue = RHSValue.getUnqualifiedType();
}
QualType ResultType = mergeTypes(LHSValue, RHSValue, false,
Unqualified);
if (ResultType.isNull()) return QualType();
if (getCanonicalType(LHSValue) == getCanonicalType(ResultType))
return LHS;
if (getCanonicalType(RHSValue) == getCanonicalType(ResultType))
return RHS;
return getAtomicType(ResultType);
}
case Type::ConstantArray:
{
const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
return QualType();
QualType LHSElem = getAsArrayType(LHS)->getElementType();
QualType RHSElem = getAsArrayType(RHS)->getElementType();
if (Unqualified) {
LHSElem = LHSElem.getUnqualifiedType();
RHSElem = RHSElem.getUnqualifiedType();
}
QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified);
if (ResultType.isNull()) return QualType();
if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
return LHS;
if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
return RHS;
if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(),
ArrayType::ArraySizeModifier(), 0);
if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(),
ArrayType::ArraySizeModifier(), 0);
const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
return LHS;
if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
return RHS;
if (LVAT) {
// FIXME: This isn't correct! But tricky to implement because
// the array's size has to be the size of LHS, but the type
// has to be different.
return LHS;
}
if (RVAT) {
// FIXME: This isn't correct! But tricky to implement because
// the array's size has to be the size of RHS, but the type
// has to be different.
return RHS;
}
if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS;
if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS;
return getIncompleteArrayType(ResultType,
ArrayType::ArraySizeModifier(), 0);
}
case Type::FunctionNoProto:
return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified);
case Type::Record:
case Type::Enum:
return QualType();
case Type::Builtin:
// Only exactly equal builtin types are compatible, which is tested above.
return QualType();
case Type::Complex:
// Distinct complex types are incompatible.
return QualType();
case Type::Vector:
// FIXME: The merged type should be an ExtVector!
if (areCompatVectorTypes(LHSCan->getAs<VectorType>(),
RHSCan->getAs<VectorType>()))
return LHS;
return QualType();
case Type::ObjCObject: {
// Check if the types are assignment compatible.
// FIXME: This should be type compatibility, e.g. whether
// "LHS x; RHS x;" at global scope is legal.
const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>();
const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>();
if (canAssignObjCInterfaces(LHSIface, RHSIface))
return LHS;
return QualType();
}
case Type::ObjCObjectPointer: {
if (OfBlockPointer) {
if (canAssignObjCInterfacesInBlockPointer(
LHS->getAs<ObjCObjectPointerType>(),
RHS->getAs<ObjCObjectPointerType>(),
BlockReturnType))
return LHS;
return QualType();
}
if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(),
RHS->getAs<ObjCObjectPointerType>()))
return LHS;
return QualType();
}
}
llvm_unreachable("Invalid Type::Class!");
}
bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
const FunctionProtoType *FromFunctionType,
const FunctionProtoType *ToFunctionType) {
if (FromFunctionType->hasAnyConsumedParams() !=
ToFunctionType->hasAnyConsumedParams())
return false;
FunctionProtoType::ExtProtoInfo FromEPI =
FromFunctionType->getExtProtoInfo();
FunctionProtoType::ExtProtoInfo ToEPI =
ToFunctionType->getExtProtoInfo();
if (FromEPI.ConsumedParameters && ToEPI.ConsumedParameters)
for (unsigned i = 0, n = FromFunctionType->getNumParams(); i != n; ++i) {
if (FromEPI.ConsumedParameters[i] != ToEPI.ConsumedParameters[i])
return false;
}
return true;
}
/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
/// 'RHS' attributes and returns the merged version; including for function
/// return types.
QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
QualType LHSCan = getCanonicalType(LHS),
RHSCan = getCanonicalType(RHS);
// If two types are identical, they are compatible.
if (LHSCan == RHSCan)
return LHS;
if (RHSCan->isFunctionType()) {
if (!LHSCan->isFunctionType())
return QualType();
QualType OldReturnType =
cast<FunctionType>(RHSCan.getTypePtr())->getReturnType();
QualType NewReturnType =
cast<FunctionType>(LHSCan.getTypePtr())->getReturnType();
QualType ResReturnType =
mergeObjCGCQualifiers(NewReturnType, OldReturnType);
if (ResReturnType.isNull())
return QualType();
if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
// id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
// In either case, use OldReturnType to build the new function type.
const FunctionType *F = LHS->getAs<FunctionType>();
if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.ExtInfo = getFunctionExtInfo(LHS);
QualType ResultType =
getFunctionType(OldReturnType, FPT->getParamTypes(), EPI, ArrayRef<hlsl::ParameterModifier>()); // HLSL Change
return ResultType;
}
}
return QualType();
}
// If the qualifiers are different, the types can still be merged.
Qualifiers LQuals = LHSCan.getLocalQualifiers();
Qualifiers RQuals = RHSCan.getLocalQualifiers();
if (LQuals != RQuals) {
// If any of these qualifiers are different, we have a type mismatch.
if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
LQuals.getAddressSpace() != RQuals.getAddressSpace())
return QualType();
// Exactly one GC qualifier difference is allowed: __strong is
// okay if the other type has no GC qualifier but is an Objective
// C object pointer (i.e. implicitly strong by default). We fix
// this by pretending that the unqualified type was actually
// qualified __strong.
Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
return QualType();
if (GC_L == Qualifiers::Strong)
return LHS;
if (GC_R == Qualifiers::Strong)
return RHS;
return QualType();
}
if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType();
QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType();
QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT);
if (ResQT == LHSBaseQT)
return LHS;
if (ResQT == RHSBaseQT)
return RHS;
}
return QualType();
}
//===----------------------------------------------------------------------===//
// Integer Predicates
//===----------------------------------------------------------------------===//
unsigned ASTContext::getIntWidth(QualType T) const {
if (const EnumType *ET = T->getAs<EnumType>())
T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
// For builtin types, just use the standard type sizing method
return (unsigned)getTypeSize(T);
}
QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
assert(T->hasSignedIntegerRepresentation() && "Unexpected type");
// Turn <4 x signed int> -> <4 x unsigned int>
if (const VectorType *VTy = T->getAs<VectorType>())
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
VTy->getNumElements(), VTy->getVectorKind());
// For enums, we return the unsigned version of the base type.
if (const EnumType *ETy = T->getAs<EnumType>())
T = ETy->getDecl()->getIntegerType();
const BuiltinType *BTy = T->getAs<BuiltinType>();
assert(BTy && "Unexpected signed integer type");
switch (BTy->getKind()) {
case BuiltinType::Char_S:
case BuiltinType::SChar:
return UnsignedCharTy;
case BuiltinType::Short:
return UnsignedShortTy;
case BuiltinType::Int:
return UnsignedIntTy;
case BuiltinType::Long:
return UnsignedLongTy;
case BuiltinType::LongLong:
return UnsignedLongLongTy;
case BuiltinType::Int128:
return UnsignedInt128Ty;
case BuiltinType::LitInt: // HLSL Change
return UnsignedIntTy;
default:
llvm_unreachable("Unexpected signed integer type");
}
}
ASTMutationListener::~ASTMutationListener() { }
void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
QualType ReturnType) {}
//===----------------------------------------------------------------------===//
// Builtin Type Computation
//===----------------------------------------------------------------------===//
/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
/// pointer over the consumed characters. This returns the resultant type. If
/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
/// a vector of "i*".
///
/// RequiresICE is filled in on return to indicate whether the value is required
/// to be an Integer Constant Expression.
static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
ASTContext::GetBuiltinTypeError &Error,
bool &RequiresICE,
bool AllowTypeModifiers) {
// Modifiers.
int HowLong = 0;
bool Signed = false, Unsigned = false;
RequiresICE = false;
// Read the prefixed modifiers first.
bool Done = false;
while (!Done) {
switch (*Str++) {
default: Done = true; --Str; break;
case 'I':
RequiresICE = true;
break;
case 'S':
assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
assert(!Signed && "Can't use 'S' modifier multiple times!");
Signed = true;
break;
case 'U':
assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
assert(!Unsigned && "Can't use 'U' modifier multiple times!");
Unsigned = true;
break;
case 'L':
assert(HowLong <= 2 && "Can't have LLLL modifier");
++HowLong;
break;
case 'W':
// This modifier represents int64 type.
assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
switch (Context.getTargetInfo().getInt64Type()) {
default:
llvm_unreachable("Unexpected integer type");
case TargetInfo::SignedLong:
HowLong = 1;
break;
case TargetInfo::SignedLongLong:
HowLong = 2;
break;
}
}
}
QualType Type;
// Read the base type.
switch (*Str++) {
default: llvm_unreachable("Unknown builtin type letter!");
case 'v':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'v'!");
Type = Context.VoidTy;
break;
case 'h':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'h'!");
Type = Context.HalfTy;
break;
case 'f':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'f'!");
Type = Context.FloatTy;
break;
case 'd':
assert(HowLong < 2 && !Signed && !Unsigned &&
"Bad modifiers used with 'd'!");
if (HowLong)
Type = Context.LongDoubleTy;
else
Type = Context.DoubleTy;
break;
case 's':
assert(HowLong == 0 && "Bad modifiers used with 's'!");
if (Unsigned)
Type = Context.UnsignedShortTy;
else
Type = Context.ShortTy;
break;
case 'i':
if (HowLong == 3)
Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
else if (HowLong == 2)
Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
else if (HowLong == 1)
Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
else
Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
break;
case 'c':
assert(HowLong == 0 && "Bad modifiers used with 'c'!");
if (Signed)
Type = Context.SignedCharTy;
else if (Unsigned)
Type = Context.UnsignedCharTy;
else
Type = Context.CharTy;
break;
case 'b': // boolean
assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
Type = Context.BoolTy;
break;
case 'z': // size_t.
assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
Type = Context.getSizeType();
break;
case 'F':
Type = Context.getCFConstantStringType();
break;
case 'G':
Type = Context.getObjCIdType();
break;
case 'H':
Type = Context.getObjCSelType();
break;
case 'M':
Type = Context.getObjCSuperType();
break;
case 'a':
Type = Context.getBuiltinVaListType();
assert(!Type.isNull() && "builtin va list type not initialized!");
break;
case 'A':
// This is a "reference" to a va_list; however, what exactly
// this means depends on how va_list is defined. There are two
// different kinds of va_list: ones passed by value, and ones
// passed by reference. An example of a by-value va_list is
// x86, where va_list is a char*. An example of by-ref va_list
// is x86-64, where va_list is a __va_list_tag[1]. For x86,
// we want this argument to be a char*&; for x86-64, we want
// it to be a __va_list_tag*.
Type = Context.getBuiltinVaListType();
assert(!Type.isNull() && "builtin va list type not initialized!");
if (Type->isArrayType())
Type = Context.getArrayDecayedType(Type);
else
Type = Context.getLValueReferenceType(Type);
break;
case 'V': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
assert(End != Str && "Missing vector size");
Str = End;
QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
RequiresICE, false);
assert(!RequiresICE && "Can't require vector ICE");
// TODO: No way to make AltiVec vectors in builtins yet.
Type = Context.getVectorType(ElementType, NumElements,
VectorType::GenericVector);
break;
}
case 'E': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
assert(End != Str && "Missing vector size");
Str = End;
QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
false);
Type = Context.getExtVectorType(ElementType, NumElements);
break;
}
case 'X': {
QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
false);
assert(!RequiresICE && "Can't require complex ICE");
Type = Context.getComplexType(ElementType);
break;
}
case 'Y' : {
Type = Context.getPointerDiffType();
break;
}
case 'P':
Type = Context.getFILEType();
if (Type.isNull()) {
Error = ASTContext::GE_Missing_stdio;
return QualType();
}
break;
case 'J':
if (Signed)
Type = Context.getsigjmp_bufType();
else
Type = Context.getjmp_bufType();
if (Type.isNull()) {
Error = ASTContext::GE_Missing_setjmp;
return QualType();
}
break;
case 'K':
assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
Type = Context.getucontext_tType();
if (Type.isNull()) {
Error = ASTContext::GE_Missing_ucontext;
return QualType();
}
break;
case 'p':
Type = Context.getProcessIDType();
break;
}
// If there are modifiers and if we're allowed to parse them, go for it.
Done = !AllowTypeModifiers;
while (!Done) {
switch (char c = *Str++) {
default: Done = true; --Str; break;
case '*':
case '&': {
// Both pointers and references can have their pointee types
// qualified with an address space.
char *End;
unsigned AddrSpace = strtoul(Str, &End, 10);
if (End != Str && AddrSpace != 0) {
Type = Context.getAddrSpaceQualType(Type, AddrSpace);
Str = End;
}
if (c == '*')
Type = Context.getPointerType(Type);
else
Type = Context.getLValueReferenceType(Type);
break;
}
// FIXME: There's no way to have a built-in with an rvalue ref arg.
case 'C':
Type = Type.withConst();
break;
case 'D':
Type = Context.getVolatileType(Type);
break;
case 'R':
Type = Type.withRestrict();
break;
}
}
assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
"Integer constant 'I' type must be an integer");
return Type;
}
/// GetBuiltinType - Return the type for the specified builtin.
QualType ASTContext::GetBuiltinType(unsigned Id,
GetBuiltinTypeError &Error,
unsigned *IntegerConstantArgs) const {
const char *TypeStr = BuiltinInfo.GetTypeString(Id);
SmallVector<QualType, 8> ArgTypes;
bool RequiresICE = false;
Error = GE_None;
QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error,
RequiresICE, true);
if (Error != GE_None)
return QualType();
assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
while (TypeStr[0] && TypeStr[0] != '.') {
QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true);
if (Error != GE_None)
return QualType();
// If this argument is required to be an IntegerConstantExpression and the
// caller cares, fill in the bitmask we return.
if (RequiresICE && IntegerConstantArgs)
*IntegerConstantArgs |= 1 << ArgTypes.size();
// Do array -> pointer decay. The builtin should use the decayed type.
if (Ty->isArrayType())
Ty = getArrayDecayedType(Ty);
ArgTypes.push_back(Ty);
}
if (Id == Builtin::BI__GetExceptionInfo)
return QualType();
assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
"'.' should only occur at end of builtin type list!");
FunctionType::ExtInfo EI(CC_C);
if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true);
bool Variadic = (TypeStr[0] == '.');
// We really shouldn't be making a no-proto type here, especially in C++.
if (ArgTypes.empty() && Variadic)
return getFunctionNoProtoType(ResType, EI);
FunctionProtoType::ExtProtoInfo EPI;
EPI.ExtInfo = EI;
EPI.Variadic = Variadic;
// HLSL Change: add empty parameter modifier; but this should probably be removed altogether
return getFunctionType(ResType, ArgTypes, EPI, ArrayRef<hlsl::ParameterModifier>());
}
static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
const FunctionDecl *FD) {
if (!FD->isExternallyVisible())
return GVA_Internal;
GVALinkage External = GVA_StrongExternal;
switch (FD->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
External = GVA_StrongExternal;
break;
case TSK_ExplicitInstantiationDefinition:
return GVA_StrongODR;
// C++11 [temp.explicit]p10:
// [ Note: The intent is that an inline function that is the subject of
// an explicit instantiation declaration will still be implicitly
// instantiated when used so that the body can be considered for
// inlining, but that no out-of-line copy of the inline function would be
// generated in the translation unit. -- end note ]
case TSK_ExplicitInstantiationDeclaration:
return GVA_AvailableExternally;
case TSK_ImplicitInstantiation:
External = GVA_DiscardableODR;
break;
}
if (!FD->isInlined())
return External;
if ((!Context.getLangOpts().CPlusPlus && !Context.getLangOpts().MSVCCompat &&
!FD->hasAttr<DLLExportAttr>()) ||
FD->hasAttr<GNUInlineAttr>()) {
// FIXME: This doesn't match gcc's behavior for dllexport inline functions.
// GNU or C99 inline semantics. Determine whether this symbol should be
// externally visible.
if (FD->isInlineDefinitionExternallyVisible())
return External;
// C99 inline semantics, where the symbol is not externally visible.
return GVA_AvailableExternally;
}
// Functions specified with extern and inline in -fms-compatibility mode
// forcibly get emitted. While the body of the function cannot be later
// replaced, the function definition cannot be discarded.
if (FD->isMSExternInline())
return GVA_StrongODR;
return GVA_DiscardableODR;
}
static GVALinkage adjustGVALinkageForDLLAttribute(GVALinkage L, const Decl *D) {
// See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
// dllexport/dllimport on inline functions.
if (D->hasAttr<DLLImportAttr>()) {
if (L == GVA_DiscardableODR || L == GVA_StrongODR)
return GVA_AvailableExternally;
} else if (D->hasAttr<DLLExportAttr>()) {
if (L == GVA_DiscardableODR)
return GVA_StrongODR;
}
return L;
}
GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
return adjustGVALinkageForDLLAttribute(basicGVALinkageForFunction(*this, FD),
FD);
}
static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
const VarDecl *VD) {
if (!VD->isExternallyVisible())
return GVA_Internal;
// HLSL Change Starts - samplers, textures and UAVs should be removed if
// unused, and fit an 'available externally' model.
if (hlsl::DXIL::ResourceClass::Invalid !=
hlsl::GetResourceClassForType(Context, VD->getType())) {
return GVA_AvailableExternally;
}
// HLSL Change Ends
if (VD->isStaticLocal()) {
GVALinkage StaticLocalLinkage = GVA_DiscardableODR;
const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
while (LexicalContext && !isa<FunctionDecl>(LexicalContext))
LexicalContext = LexicalContext->getLexicalParent();
// Let the static local variable inherit its linkage from the nearest
// enclosing function.
if (LexicalContext)
StaticLocalLinkage =
Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext));
// GVA_StrongODR function linkage is stronger than what we need,
// downgrade to GVA_DiscardableODR.
// This allows us to discard the variable if we never end up needing it.
return StaticLocalLinkage == GVA_StrongODR ? GVA_DiscardableODR
: StaticLocalLinkage;
}
// MSVC treats in-class initialized static data members as definitions.
// By giving them non-strong linkage, out-of-line definitions won't
// cause link errors.
if (Context.isMSStaticDataMemberInlineDefinition(VD))
return GVA_DiscardableODR;
switch (VD->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
return GVA_StrongExternal;
case TSK_ExplicitInstantiationDefinition:
return GVA_StrongODR;
case TSK_ExplicitInstantiationDeclaration:
return GVA_AvailableExternally;
case TSK_ImplicitInstantiation:
return GVA_DiscardableODR;
}
llvm_unreachable("Invalid Linkage!");
}
GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
return adjustGVALinkageForDLLAttribute(basicGVALinkageForVariable(*this, VD),
VD);
}
bool ASTContext::DeclMustBeEmitted(const Decl *D) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (!VD->isFileVarDecl())
return false;
// Global named register variables (GNU extension) are never emitted.
if (VD->getStorageClass() == SC_Register)
return false;
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// We never need to emit an uninstantiated function template.
if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
return false;
} else if (isa<OMPThreadPrivateDecl>(D))
return true;
else
return false;
// If this is a member of a class template, we do not need to emit it.
if (D->getDeclContext()->isDependentContext())
return false;
// Weak references don't produce any output by themselves.
if (D->hasAttr<WeakRefAttr>())
return false;
// Aliases and used decls are required.
if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
return true;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// Forward declarations aren't required.
if (!FD->doesThisDeclarationHaveABody())
return FD->doesDeclarationForceExternallyVisibleDefinition();
// Constructors and destructors are required.
if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
return true;
// The key function for a class is required. This rule only comes
// into play when inline functions can be key functions, though.
if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
const CXXRecordDecl *RD = MD->getParent();
if (MD->isOutOfLine() && RD->isDynamicClass()) {
const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
return true;
}
}
}
GVALinkage Linkage = GetGVALinkageForFunction(FD);
// static, static inline, always_inline, and extern inline functions can
// always be deferred. Normal inline functions can be deferred in C99/C++.
// Implicit template instantiations can also be deferred in C++.
if (Linkage == GVA_Internal || Linkage == GVA_AvailableExternally ||
Linkage == GVA_DiscardableODR)
return false;
// HLSL Change Starts
// Don't just return true because of visibility, unless building a library
return FD->getName() == getLangOpts().HLSLEntryFunction ||
IsPatchConstantFunctionDecl(FD) || getLangOpts().IsHLSLLibrary;
// HLSL Change Ends
}
const VarDecl *VD = cast<VarDecl>(D);
assert(VD->isFileVarDecl() && "Expected file scoped var");
if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
!isMSStaticDataMemberInlineDefinition(VD))
return false;
// Variables that can be needed in other TUs are required.
GVALinkage L = GetGVALinkageForVariable(VD);
if (L != GVA_Internal && L != GVA_AvailableExternally &&
L != GVA_DiscardableODR)
return true;
// Variables that have destruction with side-effects are required.
if (VD->getType().isDestructedType())
return true;
// Variables that have initialization with side-effects are required.
if (VD->getInit() && VD->getInit()->HasSideEffects(*this))
return true;
return false;
}
CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
bool IsCXXMethod) const {
// Pass through to the C++ ABI object
if (IsCXXMethod)
return ABI->getDefaultMethodCallConv(IsVariadic);
if (LangOpts.MRTD && !IsVariadic) return CC_X86StdCall;
return Target->getDefaultCallingConv(TargetInfo::CCMT_Unknown);
}
bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
// Pass through to the C++ ABI object
return ABI->isNearlyEmpty(RD);
}
VTableContextBase *ASTContext::getVTableContext() {
if (!VTContext.get()) {
if (Target->getCXXABI().isMicrosoft())
VTContext.reset(new MicrosoftVTableContext(*this));
else
VTContext.reset(new ItaniumVTableContext(*this));
}
return VTContext.get();
}
MangleContext *ASTContext::createMangleContext() {
switch (Target->getCXXABI().getKind()) {
case TargetCXXABI::GenericAArch64:
case TargetCXXABI::GenericItanium:
case TargetCXXABI::GenericARM:
case TargetCXXABI::GenericMIPS:
case TargetCXXABI::iOS:
case TargetCXXABI::iOS64:
return ItaniumMangleContext::create(*this, getDiagnostics());
case TargetCXXABI::Microsoft:
return MicrosoftMangleContext::create(*this, getDiagnostics());
}
llvm_unreachable("Unsupported ABI");
}
CXXABI::~CXXABI() {}
size_t ASTContext::getSideTableAllocatedMemory() const {
return ASTRecordLayouts.getMemorySize() +
llvm::capacity_in_bytes(ObjCLayouts) +
llvm::capacity_in_bytes(KeyFunctions) +
llvm::capacity_in_bytes(ObjCImpls) +
llvm::capacity_in_bytes(BlockVarCopyInits) +
llvm::capacity_in_bytes(DeclAttrs) +
llvm::capacity_in_bytes(TemplateOrInstantiation) +
llvm::capacity_in_bytes(InstantiatedFromUsingDecl) +
llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) +
llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) +
llvm::capacity_in_bytes(OverriddenMethods) +
llvm::capacity_in_bytes(Types) +
llvm::capacity_in_bytes(VariableArrayTypes) +
llvm::capacity_in_bytes(ClassScopeSpecializationPattern);
}
/// getIntTypeForBitwidth -
/// sets integer QualTy according to specified details:
/// bitwidth, signed/unsigned.
/// Returns empty type if there is no appropriate target types.
QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
unsigned Signed) const {
TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed);
CanQualType QualTy = getFromTargetType(Ty);
if (!QualTy && DestWidth == 128)
return Signed ? Int128Ty : UnsignedInt128Ty;
return QualTy;
}
/// getRealTypeForBitwidth -
/// sets floating point QualTy according to specified bitwidth.
/// Returns empty type if there is no appropriate target types.
QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth) const {
TargetInfo::RealType Ty = getTargetInfo().getRealTypeByWidth(DestWidth);
switch (Ty) {
case TargetInfo::Float:
return FloatTy;
case TargetInfo::Double:
return DoubleTy;
case TargetInfo::LongDouble:
return LongDoubleTy;
case TargetInfo::NoFloat:
return QualType();
}
llvm_unreachable("Unhandled TargetInfo::RealType value");
}
void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
if (Number > 1)
MangleNumbers[ND] = Number;
}
unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const {
llvm::DenseMap<const NamedDecl *, unsigned>::const_iterator I =
MangleNumbers.find(ND);
return I != MangleNumbers.end() ? I->second : 1;
}
void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
if (Number > 1)
StaticLocalNumbers[VD] = Number;
}
unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I =
StaticLocalNumbers.find(VD);
return I != StaticLocalNumbers.end() ? I->second : 1;
}
MangleNumberingContext &
ASTContext::getManglingNumberContext(const DeclContext *DC) {
assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
MangleNumberingContext *&MCtx = MangleNumberingContexts[DC];
if (!MCtx)
MCtx = createMangleNumberingContext();
return *MCtx;
}
MangleNumberingContext *ASTContext::createMangleNumberingContext() const {
return ABI->createMangleNumberingContext();
}
const CXXConstructorDecl *
ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
return ABI->getCopyConstructorForExceptionObject(
cast<CXXRecordDecl>(RD->getFirstDecl()));
}
void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
CXXConstructorDecl *CD) {
return ABI->addCopyConstructorForExceptionObject(
cast<CXXRecordDecl>(RD->getFirstDecl()),
cast<CXXConstructorDecl>(CD->getFirstDecl()));
}
void ASTContext::addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx, Expr *DAE) {
ABI->addDefaultArgExprForConstructor(
cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx, DAE);
}
Expr *ASTContext::getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx) {
return ABI->getDefaultArgExprForConstructor(
cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx);
}
void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
ParamIndices[D] = index;
}
unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
ParameterIndexTable::const_iterator I = ParamIndices.find(D);
assert(I != ParamIndices.end() &&
"ParmIndices lacks entry set by ParmVarDecl");
return I->second;
}
APValue *
ASTContext::getMaterializedTemporaryValue(const MaterializeTemporaryExpr *E,
bool MayCreate) {
assert(E && E->getStorageDuration() == SD_Static &&
"don't need to cache the computed value for this temporary");
if (MayCreate)
return &MaterializedTemporaryValues[E];
llvm::DenseMap<const MaterializeTemporaryExpr *, APValue>::iterator I =
MaterializedTemporaryValues.find(E);
return I == MaterializedTemporaryValues.end() ? nullptr : &I->second;
}
bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
const llvm::Triple &T = getTargetInfo().getTriple();
if (!T.isOSDarwin())
return false;
if (!(T.isiOS() && T.isOSVersionLT(7)) &&
!(T.isMacOSX() && T.isOSVersionLT(10, 9)))
return false;
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
CharUnits sizeChars = getTypeSizeInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getTypeAlignInChars(AtomicTy);
unsigned Align = alignChars.getQuantity();
unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits);
}
namespace {
/// \brief A \c RecursiveASTVisitor that builds a map from nodes to their
/// parents as defined by the \c RecursiveASTVisitor.
///
/// Note that the relationship described here is purely in terms of AST
/// traversal - there are other relationships (for example declaration context)
/// in the AST that are better modeled by special matchers.
///
/// FIXME: Currently only builds up the map using \c Stmt and \c Decl nodes.
class ParentMapASTVisitor : public RecursiveASTVisitor<ParentMapASTVisitor> {
public:
/// \brief Builds and returns the translation unit's parent map.
///
/// The caller takes ownership of the returned \c ParentMap.
static ASTContext::ParentMap *buildMap(TranslationUnitDecl &TU) {
ParentMapASTVisitor Visitor(new ASTContext::ParentMap);
Visitor.TraverseDecl(&TU);
return Visitor.Parents;
}
private:
typedef RecursiveASTVisitor<ParentMapASTVisitor> VisitorBase;
ParentMapASTVisitor(ASTContext::ParentMap *Parents) : Parents(Parents) {
}
bool shouldVisitTemplateInstantiations() const {
return true;
}
bool shouldVisitImplicitCode() const {
return true;
}
// Disables data recursion. We intercept Traverse* methods in the RAV, which
// are not triggered during data recursion.
bool shouldUseDataRecursionFor(clang::Stmt *S) const {
return false;
}
template <typename T>
bool TraverseNode(T *Node, bool(VisitorBase:: *traverse) (T *)) {
if (!Node)
return true;
if (ParentStack.size() > 0) {
// FIXME: Currently we add the same parent multiple times, but only
// when no memoization data is available for the type.
// For example when we visit all subexpressions of template
// instantiations; this is suboptimal, but benign: the only way to
// visit those is with hasAncestor / hasParent, and those do not create
// new matches.
// The plan is to enable DynTypedNode to be storable in a map or hash
// map. The main problem there is to implement hash functions /
// comparison operators for all types that DynTypedNode supports that
// do not have pointer identity.
auto &NodeOrVector = (*Parents)[Node];
if (NodeOrVector.isNull()) {
NodeOrVector = new ast_type_traits::DynTypedNode(ParentStack.back());
} else {
if (NodeOrVector.template is<ast_type_traits::DynTypedNode *>()) {
auto *Node =
NodeOrVector.template get<ast_type_traits::DynTypedNode *>();
auto *Vector = new ASTContext::ParentVector(1, *Node);
NodeOrVector = Vector;
delete Node;
}
assert(NodeOrVector.template is<ASTContext::ParentVector *>());
auto *Vector =
NodeOrVector.template get<ASTContext::ParentVector *>();
// Skip duplicates for types that have memoization data.
// We must check that the type has memoization data before calling
// std::find() because DynTypedNode::operator== can't compare all
// types.
bool Found = ParentStack.back().getMemoizationData() &&
std::find(Vector->begin(), Vector->end(),
ParentStack.back()) != Vector->end();
if (!Found)
Vector->push_back(ParentStack.back());
}
}
ParentStack.push_back(ast_type_traits::DynTypedNode::create(*Node));
bool Result = (this ->* traverse) (Node);
ParentStack.pop_back();
return Result;
}
bool TraverseDecl(Decl *DeclNode) {
return TraverseNode(DeclNode, &VisitorBase::TraverseDecl);
}
bool TraverseStmt(Stmt *StmtNode) {
return TraverseNode(StmtNode, &VisitorBase::TraverseStmt);
}
ASTContext::ParentMap *Parents;
llvm::SmallVector<ast_type_traits::DynTypedNode, 16> ParentStack;
friend class RecursiveASTVisitor<ParentMapASTVisitor>;
};
} // end namespace
ArrayRef<ast_type_traits::DynTypedNode>
ASTContext::getParents(const ast_type_traits::DynTypedNode &Node) {
assert(Node.getMemoizationData() &&
"Invariant broken: only nodes that support memoization may be "
"used in the parent map.");
if (!AllParents) {
// We always need to run over the whole translation unit, as
// hasAncestor can escape any subtree.
AllParents.reset(
ParentMapASTVisitor::buildMap(*getTranslationUnitDecl()));
}
ParentMap::const_iterator I = AllParents->find(Node.getMemoizationData());
if (I == AllParents->end()) {
return None;
}
if (auto *N = I->second.dyn_cast<ast_type_traits::DynTypedNode *>()) {
return llvm::makeArrayRef(N, 1);
}
return *I->second.get<ParentVector *>();
}
bool
ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
const ObjCMethodDecl *MethodImpl) {
// No point trying to match an unavailable/deprecated mothod.
if (MethodDecl->hasAttr<UnavailableAttr>()
|| MethodDecl->hasAttr<DeprecatedAttr>())
return false;
if (MethodDecl->getObjCDeclQualifier() !=
MethodImpl->getObjCDeclQualifier())
return false;
if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType()))
return false;
if (MethodDecl->param_size() != MethodImpl->param_size())
return false;
for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
EF = MethodDecl->param_end();
IM != EM && IF != EF; ++IM, ++IF) {
const ParmVarDecl *DeclVar = (*IF);
const ParmVarDecl *ImplVar = (*IM);
if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
return false;
if (!hasSameType(DeclVar->getType(), ImplVar->getType()))
return false;
}
return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
}
// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
// doesn't include ASTContext.h
template
clang::LazyGenerationalUpdatePtr<
const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
clang::LazyGenerationalUpdatePtr<
const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
const clang::ASTContext &Ctx, Decl *Value);
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CommentParser.cpp | //===--- CommentParser.cpp - Doxygen comment parser -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/CommentParser.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentDiagnostic.h"
#include "clang/AST/CommentSema.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/ErrorHandling.h"
namespace clang {
static inline bool isWhitespace(llvm::StringRef S) {
for (StringRef::const_iterator I = S.begin(), E = S.end(); I != E; ++I) {
if (!isWhitespace(*I))
return false;
}
return true;
}
namespace comments {
/// Re-lexes a sequence of tok::text tokens.
class TextTokenRetokenizer {
llvm::BumpPtrAllocator &Allocator;
Parser &P;
/// This flag is set when there are no more tokens we can fetch from lexer.
bool NoMoreInterestingTokens;
/// Token buffer: tokens we have processed and lookahead.
SmallVector<Token, 16> Toks;
/// A position in \c Toks.
struct Position {
unsigned CurToken;
const char *BufferStart;
const char *BufferEnd;
const char *BufferPtr;
SourceLocation BufferStartLoc;
};
/// Current position in Toks.
Position Pos;
bool isEnd() const {
return Pos.CurToken >= Toks.size();
}
/// Sets up the buffer pointers to point to current token.
void setupBuffer() {
assert(!isEnd());
const Token &Tok = Toks[Pos.CurToken];
Pos.BufferStart = Tok.getText().begin();
Pos.BufferEnd = Tok.getText().end();
Pos.BufferPtr = Pos.BufferStart;
Pos.BufferStartLoc = Tok.getLocation();
}
SourceLocation getSourceLocation() const {
const unsigned CharNo = Pos.BufferPtr - Pos.BufferStart;
return Pos.BufferStartLoc.getLocWithOffset(CharNo);
}
char peek() const {
assert(!isEnd());
assert(Pos.BufferPtr != Pos.BufferEnd);
return *Pos.BufferPtr;
}
void consumeChar() {
assert(!isEnd());
assert(Pos.BufferPtr != Pos.BufferEnd);
Pos.BufferPtr++;
if (Pos.BufferPtr == Pos.BufferEnd) {
Pos.CurToken++;
if (isEnd() && !addToken())
return;
assert(!isEnd());
setupBuffer();
}
}
/// Add a token.
/// Returns true on success, false if there are no interesting tokens to
/// fetch from lexer.
bool addToken() {
if (NoMoreInterestingTokens)
return false;
if (P.Tok.is(tok::newline)) {
// If we see a single newline token between text tokens, skip it.
Token Newline = P.Tok;
P.consumeToken();
if (P.Tok.isNot(tok::text)) {
P.putBack(Newline);
NoMoreInterestingTokens = true;
return false;
}
}
if (P.Tok.isNot(tok::text)) {
NoMoreInterestingTokens = true;
return false;
}
Toks.push_back(P.Tok);
P.consumeToken();
if (Toks.size() == 1)
setupBuffer();
return true;
}
void consumeWhitespace() {
while (!isEnd()) {
if (isWhitespace(peek()))
consumeChar();
else
break;
}
}
void formTokenWithChars(Token &Result,
SourceLocation Loc,
const char *TokBegin,
unsigned TokLength,
StringRef Text) {
Result.setLocation(Loc);
Result.setKind(tok::text);
Result.setLength(TokLength);
#ifndef NDEBUG
Result.TextPtr = "<UNSET>";
Result.IntVal = 7;
#endif
Result.setText(Text);
}
public:
TextTokenRetokenizer(llvm::BumpPtrAllocator &Allocator, Parser &P):
Allocator(Allocator), P(P), NoMoreInterestingTokens(false) {
Pos.CurToken = 0;
addToken();
}
/// Extract a word -- sequence of non-whitespace characters.
bool lexWord(Token &Tok) {
if (isEnd())
return false;
Position SavedPos = Pos;
consumeWhitespace();
SmallString<32> WordText;
const char *WordBegin = Pos.BufferPtr;
SourceLocation Loc = getSourceLocation();
while (!isEnd()) {
const char C = peek();
if (!isWhitespace(C)) {
WordText.push_back(C);
consumeChar();
} else
break;
}
const unsigned Length = WordText.size();
if (Length == 0) {
Pos = SavedPos;
return false;
}
char *TextPtr = Allocator.Allocate<char>(Length + 1);
memcpy(TextPtr, WordText.c_str(), Length + 1);
StringRef Text = StringRef(TextPtr, Length);
formTokenWithChars(Tok, Loc, WordBegin, Length, Text);
return true;
}
bool lexDelimitedSeq(Token &Tok, char OpenDelim, char CloseDelim) {
if (isEnd())
return false;
Position SavedPos = Pos;
consumeWhitespace();
SmallString<32> WordText;
const char *WordBegin = Pos.BufferPtr;
SourceLocation Loc = getSourceLocation();
bool Error = false;
if (!isEnd()) {
const char C = peek();
if (C == OpenDelim) {
WordText.push_back(C);
consumeChar();
} else
Error = true;
}
char C = '\0';
while (!Error && !isEnd()) {
C = peek();
WordText.push_back(C);
consumeChar();
if (C == CloseDelim)
break;
}
if (!Error && C != CloseDelim)
Error = true;
if (Error) {
Pos = SavedPos;
return false;
}
const unsigned Length = WordText.size();
char *TextPtr = Allocator.Allocate<char>(Length + 1);
memcpy(TextPtr, WordText.c_str(), Length + 1);
StringRef Text = StringRef(TextPtr, Length);
formTokenWithChars(Tok, Loc, WordBegin,
Pos.BufferPtr - WordBegin, Text);
return true;
}
/// Put back tokens that we didn't consume.
void putBackLeftoverTokens() {
if (isEnd())
return;
bool HavePartialTok = false;
Token PartialTok;
if (Pos.BufferPtr != Pos.BufferStart) {
formTokenWithChars(PartialTok, getSourceLocation(),
Pos.BufferPtr, Pos.BufferEnd - Pos.BufferPtr,
StringRef(Pos.BufferPtr,
Pos.BufferEnd - Pos.BufferPtr));
HavePartialTok = true;
Pos.CurToken++;
}
P.putBack(llvm::makeArrayRef(Toks.begin() + Pos.CurToken, Toks.end()));
Pos.CurToken = Toks.size();
if (HavePartialTok)
P.putBack(PartialTok);
}
};
Parser::Parser(Lexer &L, Sema &S, llvm::BumpPtrAllocator &Allocator,
const SourceManager &SourceMgr, DiagnosticsEngine &Diags,
const CommandTraits &Traits):
L(L), S(S), Allocator(Allocator), SourceMgr(SourceMgr), Diags(Diags),
Traits(Traits) {
consumeToken();
}
void Parser::parseParamCommandArgs(ParamCommandComment *PC,
TextTokenRetokenizer &Retokenizer) {
Token Arg;
// Check if argument looks like direction specification: [dir]
// e.g., [in], [out], [in,out]
if (Retokenizer.lexDelimitedSeq(Arg, '[', ']'))
S.actOnParamCommandDirectionArg(PC,
Arg.getLocation(),
Arg.getEndLocation(),
Arg.getText());
if (Retokenizer.lexWord(Arg))
S.actOnParamCommandParamNameArg(PC,
Arg.getLocation(),
Arg.getEndLocation(),
Arg.getText());
}
void Parser::parseTParamCommandArgs(TParamCommandComment *TPC,
TextTokenRetokenizer &Retokenizer) {
Token Arg;
if (Retokenizer.lexWord(Arg))
S.actOnTParamCommandParamNameArg(TPC,
Arg.getLocation(),
Arg.getEndLocation(),
Arg.getText());
}
void Parser::parseBlockCommandArgs(BlockCommandComment *BC,
TextTokenRetokenizer &Retokenizer,
unsigned NumArgs) {
typedef BlockCommandComment::Argument Argument;
Argument *Args =
new (Allocator.Allocate<Argument>(NumArgs)) Argument[NumArgs];
unsigned ParsedArgs = 0;
Token Arg;
while (ParsedArgs < NumArgs && Retokenizer.lexWord(Arg)) {
Args[ParsedArgs] = Argument(SourceRange(Arg.getLocation(),
Arg.getEndLocation()),
Arg.getText());
ParsedArgs++;
}
S.actOnBlockCommandArgs(BC, llvm::makeArrayRef(Args, ParsedArgs));
}
BlockCommandComment *Parser::parseBlockCommand() {
assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
ParamCommandComment *PC = nullptr;
TParamCommandComment *TPC = nullptr;
BlockCommandComment *BC = nullptr;
const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
CommandMarkerKind CommandMarker =
Tok.is(tok::backslash_command) ? CMK_Backslash : CMK_At;
if (Info->IsParamCommand) {
PC = S.actOnParamCommandStart(Tok.getLocation(),
Tok.getEndLocation(),
Tok.getCommandID(),
CommandMarker);
} else if (Info->IsTParamCommand) {
TPC = S.actOnTParamCommandStart(Tok.getLocation(),
Tok.getEndLocation(),
Tok.getCommandID(),
CommandMarker);
} else {
BC = S.actOnBlockCommandStart(Tok.getLocation(),
Tok.getEndLocation(),
Tok.getCommandID(),
CommandMarker);
}
consumeToken();
if (isTokBlockCommand()) {
// Block command ahead. We can't nest block commands, so pretend that this
// command has an empty argument.
ParagraphComment *Paragraph = S.actOnParagraphComment(None);
if (PC) {
S.actOnParamCommandFinish(PC, Paragraph);
return PC;
} else if (TPC) {
S.actOnTParamCommandFinish(TPC, Paragraph);
return TPC;
} else {
S.actOnBlockCommandFinish(BC, Paragraph);
return BC;
}
}
if (PC || TPC || Info->NumArgs > 0) {
// In order to parse command arguments we need to retokenize a few
// following text tokens.
TextTokenRetokenizer Retokenizer(Allocator, *this);
if (PC)
parseParamCommandArgs(PC, Retokenizer);
else if (TPC)
parseTParamCommandArgs(TPC, Retokenizer);
else
parseBlockCommandArgs(BC, Retokenizer, Info->NumArgs);
Retokenizer.putBackLeftoverTokens();
}
// If there's a block command ahead, we will attach an empty paragraph to
// this command.
bool EmptyParagraph = false;
if (isTokBlockCommand())
EmptyParagraph = true;
else if (Tok.is(tok::newline)) {
Token PrevTok = Tok;
consumeToken();
EmptyParagraph = isTokBlockCommand();
putBack(PrevTok);
}
ParagraphComment *Paragraph;
if (EmptyParagraph)
Paragraph = S.actOnParagraphComment(None);
else {
BlockContentComment *Block = parseParagraphOrBlockCommand();
// Since we have checked for a block command, we should have parsed a
// paragraph.
Paragraph = cast<ParagraphComment>(Block);
}
if (PC) {
S.actOnParamCommandFinish(PC, Paragraph);
return PC;
} else if (TPC) {
S.actOnTParamCommandFinish(TPC, Paragraph);
return TPC;
} else {
S.actOnBlockCommandFinish(BC, Paragraph);
return BC;
}
}
InlineCommandComment *Parser::parseInlineCommand() {
assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
const Token CommandTok = Tok;
consumeToken();
TextTokenRetokenizer Retokenizer(Allocator, *this);
Token ArgTok;
bool ArgTokValid = Retokenizer.lexWord(ArgTok);
InlineCommandComment *IC;
if (ArgTokValid) {
IC = S.actOnInlineCommand(CommandTok.getLocation(),
CommandTok.getEndLocation(),
CommandTok.getCommandID(),
ArgTok.getLocation(),
ArgTok.getEndLocation(),
ArgTok.getText());
} else {
IC = S.actOnInlineCommand(CommandTok.getLocation(),
CommandTok.getEndLocation(),
CommandTok.getCommandID());
}
Retokenizer.putBackLeftoverTokens();
return IC;
}
HTMLStartTagComment *Parser::parseHTMLStartTag() {
assert(Tok.is(tok::html_start_tag));
HTMLStartTagComment *HST =
S.actOnHTMLStartTagStart(Tok.getLocation(),
Tok.getHTMLTagStartName());
consumeToken();
SmallVector<HTMLStartTagComment::Attribute, 2> Attrs;
while (true) {
switch (Tok.getKind()) {
case tok::html_ident: {
Token Ident = Tok;
consumeToken();
if (Tok.isNot(tok::html_equals)) {
Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(),
Ident.getHTMLIdent()));
continue;
}
Token Equals = Tok;
consumeToken();
if (Tok.isNot(tok::html_quoted_string)) {
Diag(Tok.getLocation(),
diag::warn_doc_html_start_tag_expected_quoted_string)
<< SourceRange(Equals.getLocation());
Attrs.push_back(HTMLStartTagComment::Attribute(Ident.getLocation(),
Ident.getHTMLIdent()));
while (Tok.is(tok::html_equals) ||
Tok.is(tok::html_quoted_string))
consumeToken();
continue;
}
Attrs.push_back(HTMLStartTagComment::Attribute(
Ident.getLocation(),
Ident.getHTMLIdent(),
Equals.getLocation(),
SourceRange(Tok.getLocation(),
Tok.getEndLocation()),
Tok.getHTMLQuotedString()));
consumeToken();
continue;
}
case tok::html_greater:
S.actOnHTMLStartTagFinish(HST,
S.copyArray(llvm::makeArrayRef(Attrs)),
Tok.getLocation(),
/* IsSelfClosing = */ false);
consumeToken();
return HST;
case tok::html_slash_greater:
S.actOnHTMLStartTagFinish(HST,
S.copyArray(llvm::makeArrayRef(Attrs)),
Tok.getLocation(),
/* IsSelfClosing = */ true);
consumeToken();
return HST;
case tok::html_equals:
case tok::html_quoted_string:
Diag(Tok.getLocation(),
diag::warn_doc_html_start_tag_expected_ident_or_greater);
while (Tok.is(tok::html_equals) ||
Tok.is(tok::html_quoted_string))
consumeToken();
if (Tok.is(tok::html_ident) ||
Tok.is(tok::html_greater) ||
Tok.is(tok::html_slash_greater))
continue;
S.actOnHTMLStartTagFinish(HST,
S.copyArray(llvm::makeArrayRef(Attrs)),
SourceLocation(),
/* IsSelfClosing = */ false);
return HST;
default:
// Not a token from an HTML start tag. Thus HTML tag prematurely ended.
S.actOnHTMLStartTagFinish(HST,
S.copyArray(llvm::makeArrayRef(Attrs)),
SourceLocation(),
/* IsSelfClosing = */ false);
bool StartLineInvalid;
const unsigned StartLine = SourceMgr.getPresumedLineNumber(
HST->getLocation(),
&StartLineInvalid);
bool EndLineInvalid;
const unsigned EndLine = SourceMgr.getPresumedLineNumber(
Tok.getLocation(),
&EndLineInvalid);
if (StartLineInvalid || EndLineInvalid || StartLine == EndLine)
Diag(Tok.getLocation(),
diag::warn_doc_html_start_tag_expected_ident_or_greater)
<< HST->getSourceRange();
else {
Diag(Tok.getLocation(),
diag::warn_doc_html_start_tag_expected_ident_or_greater);
Diag(HST->getLocation(), diag::note_doc_html_tag_started_here)
<< HST->getSourceRange();
}
return HST;
}
}
}
HTMLEndTagComment *Parser::parseHTMLEndTag() {
assert(Tok.is(tok::html_end_tag));
Token TokEndTag = Tok;
consumeToken();
SourceLocation Loc;
if (Tok.is(tok::html_greater)) {
Loc = Tok.getLocation();
consumeToken();
}
return S.actOnHTMLEndTag(TokEndTag.getLocation(),
Loc,
TokEndTag.getHTMLTagEndName());
}
BlockContentComment *Parser::parseParagraphOrBlockCommand() {
SmallVector<InlineContentComment *, 8> Content;
while (true) {
switch (Tok.getKind()) {
case tok::verbatim_block_begin:
case tok::verbatim_line_name:
case tok::eof:
assert(Content.size() != 0);
break; // Block content or EOF ahead, finish this parapgaph.
case tok::unknown_command:
Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
Tok.getEndLocation(),
Tok.getUnknownCommandName()));
consumeToken();
continue;
case tok::backslash_command:
case tok::at_command: {
const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
if (Info->IsBlockCommand) {
if (Content.size() == 0)
return parseBlockCommand();
break; // Block command ahead, finish this parapgaph.
}
if (Info->IsVerbatimBlockEndCommand) {
Diag(Tok.getLocation(),
diag::warn_verbatim_block_end_without_start)
<< Tok.is(tok::at_command)
<< Info->Name
<< SourceRange(Tok.getLocation(), Tok.getEndLocation());
consumeToken();
continue;
}
if (Info->IsUnknownCommand) {
Content.push_back(S.actOnUnknownCommand(Tok.getLocation(),
Tok.getEndLocation(),
Info->getID()));
consumeToken();
continue;
}
assert(Info->IsInlineCommand);
Content.push_back(parseInlineCommand());
continue;
}
case tok::newline: {
consumeToken();
if (Tok.is(tok::newline) || Tok.is(tok::eof)) {
consumeToken();
break; // Two newlines -- end of paragraph.
}
// Also allow [tok::newline, tok::text, tok::newline] if the middle
// tok::text is just whitespace.
if (Tok.is(tok::text) && isWhitespace(Tok.getText())) {
Token WhitespaceTok = Tok;
consumeToken();
if (Tok.is(tok::newline) || Tok.is(tok::eof)) {
consumeToken();
break;
}
// We have [tok::newline, tok::text, non-newline]. Put back tok::text.
putBack(WhitespaceTok);
}
if (Content.size() > 0)
Content.back()->addTrailingNewline();
continue;
}
// Don't deal with HTML tag soup now.
case tok::html_start_tag:
Content.push_back(parseHTMLStartTag());
continue;
case tok::html_end_tag:
Content.push_back(parseHTMLEndTag());
continue;
case tok::text:
Content.push_back(S.actOnText(Tok.getLocation(),
Tok.getEndLocation(),
Tok.getText()));
consumeToken();
continue;
case tok::verbatim_block_line:
case tok::verbatim_block_end:
case tok::verbatim_line_text:
case tok::html_ident:
case tok::html_equals:
case tok::html_quoted_string:
case tok::html_greater:
case tok::html_slash_greater:
llvm_unreachable("should not see this token");
}
break;
}
return S.actOnParagraphComment(S.copyArray(llvm::makeArrayRef(Content)));
}
VerbatimBlockComment *Parser::parseVerbatimBlock() {
assert(Tok.is(tok::verbatim_block_begin));
VerbatimBlockComment *VB =
S.actOnVerbatimBlockStart(Tok.getLocation(),
Tok.getVerbatimBlockID());
consumeToken();
// Don't create an empty line if verbatim opening command is followed
// by a newline.
if (Tok.is(tok::newline))
consumeToken();
SmallVector<VerbatimBlockLineComment *, 8> Lines;
while (Tok.is(tok::verbatim_block_line) ||
Tok.is(tok::newline)) {
VerbatimBlockLineComment *Line;
if (Tok.is(tok::verbatim_block_line)) {
Line = S.actOnVerbatimBlockLine(Tok.getLocation(),
Tok.getVerbatimBlockText());
consumeToken();
if (Tok.is(tok::newline)) {
consumeToken();
}
} else {
// Empty line, just a tok::newline.
Line = S.actOnVerbatimBlockLine(Tok.getLocation(), "");
consumeToken();
}
Lines.push_back(Line);
}
if (Tok.is(tok::verbatim_block_end)) {
const CommandInfo *Info = Traits.getCommandInfo(Tok.getVerbatimBlockID());
S.actOnVerbatimBlockFinish(VB, Tok.getLocation(),
Info->Name,
S.copyArray(llvm::makeArrayRef(Lines)));
consumeToken();
} else {
// Unterminated \\verbatim block
S.actOnVerbatimBlockFinish(VB, SourceLocation(), "",
S.copyArray(llvm::makeArrayRef(Lines)));
}
return VB;
}
VerbatimLineComment *Parser::parseVerbatimLine() {
assert(Tok.is(tok::verbatim_line_name));
Token NameTok = Tok;
consumeToken();
SourceLocation TextBegin;
StringRef Text;
// Next token might not be a tok::verbatim_line_text if verbatim line
// starting command comes just before a newline or comment end.
if (Tok.is(tok::verbatim_line_text)) {
TextBegin = Tok.getLocation();
Text = Tok.getVerbatimLineText();
} else {
TextBegin = NameTok.getEndLocation();
Text = "";
}
VerbatimLineComment *VL = S.actOnVerbatimLine(NameTok.getLocation(),
NameTok.getVerbatimLineID(),
TextBegin,
Text);
consumeToken();
return VL;
}
BlockContentComment *Parser::parseBlockContent() {
switch (Tok.getKind()) {
case tok::text:
case tok::unknown_command:
case tok::backslash_command:
case tok::at_command:
case tok::html_start_tag:
case tok::html_end_tag:
return parseParagraphOrBlockCommand();
case tok::verbatim_block_begin:
return parseVerbatimBlock();
case tok::verbatim_line_name:
return parseVerbatimLine();
case tok::eof:
case tok::newline:
case tok::verbatim_block_line:
case tok::verbatim_block_end:
case tok::verbatim_line_text:
case tok::html_ident:
case tok::html_equals:
case tok::html_quoted_string:
case tok::html_greater:
case tok::html_slash_greater:
llvm_unreachable("should not see this token");
}
llvm_unreachable("bogus token kind");
}
FullComment *Parser::parseFullComment() {
// Skip newlines at the beginning of the comment.
while (Tok.is(tok::newline))
consumeToken();
SmallVector<BlockContentComment *, 8> Blocks;
while (Tok.isNot(tok::eof)) {
Blocks.push_back(parseBlockContent());
// Skip extra newlines after paragraph end.
while (Tok.is(tok::newline))
consumeToken();
}
return S.actOnFullComment(S.copyArray(llvm::makeArrayRef(Blocks)));
}
} // end namespace comments
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ASTDiagnostic.cpp | //===--- ASTDiagnostic.cpp - Diagnostic Printing Hooks for AST Nodes ------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a diagnostic formatting hook for AST elements.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
// Returns a desugared version of the QualType, and marks ShouldAKA as true
// whenever we remove significant sugar from the type.
static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
QualifierCollector QC;
while (true) {
const Type *Ty = QC.strip(QT);
// Don't aka just because we saw an elaborated type...
if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) {
QT = ET->desugar();
continue;
}
// ... or a paren type ...
if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
QT = PT->desugar();
continue;
}
// ...or a substituted template type parameter ...
if (const SubstTemplateTypeParmType *ST =
dyn_cast<SubstTemplateTypeParmType>(Ty)) {
QT = ST->desugar();
continue;
}
// ...or an attributed type...
if (const AttributedType *AT = dyn_cast<AttributedType>(Ty)) {
QT = AT->desugar();
continue;
}
// ...or an adjusted type...
if (const AdjustedType *AT = dyn_cast<AdjustedType>(Ty)) {
QT = AT->desugar();
continue;
}
// ... or an auto type.
if (const AutoType *AT = dyn_cast<AutoType>(Ty)) {
if (!AT->isSugared())
break;
QT = AT->desugar();
continue;
}
// Don't desugar template specializations, unless it's an alias template.
if (const TemplateSpecializationType *TST
= dyn_cast<TemplateSpecializationType>(Ty))
if (!TST->isTypeAlias())
break;
// Don't desugar magic Objective-C types.
if (QualType(Ty,0) == Context.getObjCIdType() ||
QualType(Ty,0) == Context.getObjCClassType() ||
QualType(Ty,0) == Context.getObjCSelType() ||
QualType(Ty,0) == Context.getObjCProtoType())
break;
// Don't desugar va_list.
if (QualType(Ty,0) == Context.getBuiltinVaListType())
break;
// HLSL Change: don't desugar typedefs that were injected into AST.
if (Ty->getTypeClass() == Type::TypeClass::Typedef) {
const TypedefType* tt = cast<TypedefType>(Ty);
if (tt->getDecl()->getLocation() == SourceLocation()) {
break;
}
}
// Otherwise, do a single-step desugar.
QualType Underlying;
bool IsSugar = false;
switch (Ty->getTypeClass()) {
#define ABSTRACT_TYPE(Class, Base)
#define TYPE(Class, Base) \
case Type::Class: { \
const Class##Type *CTy = cast<Class##Type>(Ty); \
if (CTy->isSugared()) { \
IsSugar = true; \
Underlying = CTy->desugar(); \
} \
break; \
}
#include "clang/AST/TypeNodes.def"
}
// If it wasn't sugared, we're done.
if (!IsSugar)
break;
// If the desugared type is a vector type, we don't want to expand
// it, it will turn into an attribute mess. People want their "vec4".
if (isa<VectorType>(Underlying))
break;
// Don't desugar through the primary typedef of an anonymous type.
if (const TagType *UTT = Underlying->getAs<TagType>())
if (const TypedefType *QTT = dyn_cast<TypedefType>(QT))
if (UTT->getDecl()->getTypedefNameForAnonDecl() == QTT->getDecl())
break;
// Record that we actually looked through an opaque type here.
ShouldAKA = true;
QT = Underlying;
}
// If we have a pointer-like type, desugar the pointee as well.
// FIXME: Handle other pointer-like types.
if (const PointerType *Ty = QT->getAs<PointerType>()) {
QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
ShouldAKA));
} else if (const auto *Ty = QT->getAs<ObjCObjectPointerType>()) {
QT = Context.getObjCObjectPointerType(Desugar(Context, Ty->getPointeeType(),
ShouldAKA));
} else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
ShouldAKA));
} else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) {
QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(),
ShouldAKA));
} else if (const auto *Ty = QT->getAs<ObjCObjectType>()) {
if (Ty->getBaseType().getTypePtr() != Ty && !ShouldAKA) {
QualType BaseType = Desugar(Context, Ty->getBaseType(), ShouldAKA);
QT = Context.getObjCObjectType(BaseType, Ty->getTypeArgsAsWritten(),
llvm::makeArrayRef(Ty->qual_begin(),
Ty->getNumProtocols()),
Ty->isKindOfTypeAsWritten());
}
}
return QC.apply(Context, QT);
}
/// \brief Convert the given type to a string suitable for printing as part of
/// a diagnostic.
///
/// There are four main criteria when determining whether we should have an
/// a.k.a. clause when pretty-printing a type:
///
/// 1) Some types provide very minimal sugar that doesn't impede the
/// user's understanding --- for example, elaborated type
/// specifiers. If this is all the sugar we see, we don't want an
/// a.k.a. clause.
/// 2) Some types are technically sugared but are much more familiar
/// when seen in their sugared form --- for example, va_list,
/// vector types, and the magic Objective C types. We don't
/// want to desugar these, even if we do produce an a.k.a. clause.
/// 3) Some types may have already been desugared previously in this diagnostic.
/// if this is the case, doing another "aka" would just be clutter.
/// 4) Two different types within the same diagnostic have the same output
/// string. In this case, force an a.k.a with the desugared type when
/// doing so will provide additional information.
///
/// \param Context the context in which the type was allocated
/// \param Ty the type to print
/// \param QualTypeVals pointer values to QualTypes which are used in the
/// diagnostic message
static std::string
ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
ArrayRef<DiagnosticsEngine::ArgumentValue> PrevArgs,
ArrayRef<intptr_t> QualTypeVals) {
// FIXME: Playing with std::string is really slow.
bool ForceAKA = false;
QualType CanTy = Ty.getCanonicalType();
std::string S = Ty.getAsString(Context.getPrintingPolicy());
std::string CanS = CanTy.getAsString(Context.getPrintingPolicy());
for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) {
QualType CompareTy =
QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I]));
if (CompareTy.isNull())
continue;
if (CompareTy == Ty)
continue; // Same types
QualType CompareCanTy = CompareTy.getCanonicalType();
if (CompareCanTy == CanTy)
continue; // Same canonical types
std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy());
bool ShouldAKA = false;
QualType CompareDesugar = Desugar(Context, CompareTy, ShouldAKA);
std::string CompareDesugarStr =
CompareDesugar.getAsString(Context.getPrintingPolicy());
if (CompareS != S && CompareDesugarStr != S)
continue; // The type string is different than the comparison string
// and the desugared comparison string.
std::string CompareCanS =
CompareCanTy.getAsString(Context.getPrintingPolicy());
if (CompareCanS == CanS)
continue; // No new info from canonical type
ForceAKA = true;
break;
}
// Check to see if we already desugared this type in this
// diagnostic. If so, don't do it again.
bool Repeated = false;
for (unsigned i = 0, e = PrevArgs.size(); i != e; ++i) {
// TODO: Handle ak_declcontext case.
if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) {
void *Ptr = (void*)PrevArgs[i].second;
QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
if (PrevTy == Ty) {
Repeated = true;
break;
}
}
}
// Consider producing an a.k.a. clause if removing all the direct
// sugar gives us something "significantly different".
if (!Repeated) {
bool ShouldAKA = false;
QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
if (ShouldAKA || ForceAKA) {
if (DesugaredTy == Ty) {
DesugaredTy = Ty.getCanonicalType();
}
std::string akaStr = DesugaredTy.getAsString(Context.getPrintingPolicy());
if (akaStr != S) {
S = "'" + S + "' (aka '" + akaStr + "')";
return S;
}
}
// Give some additional info on vector types. These are either not desugared
// or displaying complex __attribute__ expressions so add details of the
// type and element count.
if (Ty->isVectorType()) {
const VectorType *VTy = Ty->getAs<VectorType>();
std::string DecoratedString;
llvm::raw_string_ostream OS(DecoratedString);
const char *Values = VTy->getNumElements() > 1 ? "values" : "value";
OS << "'" << S << "' (vector of " << VTy->getNumElements() << " '"
<< VTy->getElementType().getAsString(Context.getPrintingPolicy())
<< "' " << Values << ")";
return OS.str();
}
}
S = "'" + S + "'";
return S;
}
static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType,
QualType ToType, bool PrintTree,
bool PrintFromType, bool ElideType,
bool ShowColors, raw_ostream &OS);
void clang::FormatASTNodeDiagnosticArgument(
DiagnosticsEngine::ArgumentKind Kind,
intptr_t Val,
StringRef Modifier,
StringRef Argument,
ArrayRef<DiagnosticsEngine::ArgumentValue> PrevArgs,
SmallVectorImpl<char> &Output,
void *Cookie,
ArrayRef<intptr_t> QualTypeVals) {
ASTContext &Context = *static_cast<ASTContext*>(Cookie);
size_t OldEnd = Output.size();
llvm::raw_svector_ostream OS(Output);
bool NeedQuotes = true;
switch (Kind) {
default: llvm_unreachable("unknown ArgumentKind");
case DiagnosticsEngine::ak_qualtype_pair: {
TemplateDiffTypes &TDT = *reinterpret_cast<TemplateDiffTypes*>(Val);
QualType FromType =
QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.FromType));
QualType ToType =
QualType::getFromOpaquePtr(reinterpret_cast<void*>(TDT.ToType));
if (FormatTemplateTypeDiff(Context, FromType, ToType, TDT.PrintTree,
TDT.PrintFromType, TDT.ElideType,
TDT.ShowColors, OS)) {
NeedQuotes = !TDT.PrintTree;
TDT.TemplateDiffUsed = true;
break;
}
// Don't fall-back during tree printing. The caller will handle
// this case.
if (TDT.PrintTree)
return;
// Attempting to do a template diff on non-templates. Set the variables
// and continue with regular type printing of the appropriate type.
Val = TDT.PrintFromType ? TDT.FromType : TDT.ToType;
Modifier = StringRef();
Argument = StringRef();
// Fall through
LLVM_FALLTHROUGH; // HLSL Change
}
case DiagnosticsEngine::ak_qualtype: {
assert(Modifier.empty() && Argument.empty() &&
"Invalid modifier for QualType argument");
QualType Ty(QualType::getFromOpaquePtr(reinterpret_cast<void*>(Val)));
OS << ConvertTypeToDiagnosticString(Context, Ty, PrevArgs, QualTypeVals);
NeedQuotes = false;
break;
}
case DiagnosticsEngine::ak_declarationname: {
if (Modifier == "objcclass" && Argument.empty())
OS << '+';
else if (Modifier == "objcinstance" && Argument.empty())
OS << '-';
else
assert(Modifier.empty() && Argument.empty() &&
"Invalid modifier for DeclarationName argument");
OS << DeclarationName::getFromOpaqueInteger(Val);
break;
}
case DiagnosticsEngine::ak_nameddecl: {
bool Qualified;
if (Modifier == "q" && Argument.empty())
Qualified = true;
else {
assert(Modifier.empty() && Argument.empty() &&
"Invalid modifier for NamedDecl* argument");
Qualified = false;
}
const NamedDecl *ND = reinterpret_cast<const NamedDecl*>(Val);
ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), Qualified);
break;
}
case DiagnosticsEngine::ak_nestednamespec: {
NestedNameSpecifier *NNS = reinterpret_cast<NestedNameSpecifier*>(Val);
NNS->print(OS, Context.getPrintingPolicy());
NeedQuotes = false;
break;
}
case DiagnosticsEngine::ak_declcontext: {
DeclContext *DC = reinterpret_cast<DeclContext *> (Val);
assert(DC && "Should never have a null declaration context");
NeedQuotes = false;
// FIXME: Get the strings for DeclContext from some localized place
if (DC->isTranslationUnit()) {
if (Context.getLangOpts().CPlusPlus)
OS << "the global namespace";
else
OS << "the global scope";
} else if (DC->isClosure()) {
OS << "block literal";
} else if (isLambdaCallOperator(DC)) {
OS << "lambda expression";
} else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
OS << ConvertTypeToDiagnosticString(Context,
Context.getTypeDeclType(Type),
PrevArgs, QualTypeVals);
} else {
assert(isa<NamedDecl>(DC) && "Expected a NamedDecl");
NamedDecl *ND = cast<NamedDecl>(DC);
if (isa<NamespaceDecl>(ND))
OS << "namespace ";
else if (isa<ObjCMethodDecl>(ND))
OS << "method ";
else if (isa<FunctionDecl>(ND))
OS << "function ";
OS << '\'';
ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), true);
OS << '\'';
}
break;
}
case DiagnosticsEngine::ak_attr: {
const Attr *At = reinterpret_cast<Attr *>(Val);
assert(At && "Received null Attr object!");
OS << '\'' << At->getSpelling() << '\'';
NeedQuotes = false;
break;
}
}
OS.flush();
if (NeedQuotes) {
Output.insert(Output.begin()+OldEnd, '\'');
Output.push_back('\'');
}
}
/// TemplateDiff - A class that constructs a pretty string for a pair of
/// QualTypes. For the pair of types, a diff tree will be created containing
/// all the information about the templates and template arguments. Afterwards,
/// the tree is transformed to a string according to the options passed in.
namespace {
class TemplateDiff {
/// Context - The ASTContext which is used for comparing template arguments.
ASTContext &Context;
/// Policy - Used during expression printing.
PrintingPolicy Policy;
/// ElideType - Option to elide identical types.
bool ElideType;
/// PrintTree - Format output string as a tree.
bool PrintTree;
/// ShowColor - Diagnostics support color, so bolding will be used.
bool ShowColor;
/// FromType - When single type printing is selected, this is the type to be
/// be printed. When tree printing is selected, this type will show up first
/// in the tree.
QualType FromType;
/// ToType - The type that FromType is compared to. Only in tree printing
/// will this type be outputed.
QualType ToType;
/// OS - The stream used to construct the output strings.
raw_ostream &OS;
/// IsBold - Keeps track of the bold formatting for the output string.
bool IsBold;
/// DiffTree - A tree representation the differences between two types.
class DiffTree {
public:
/// DiffKind - The difference in a DiffNode and which fields are used.
enum DiffKind {
/// Incomplete or invalid node.
Invalid,
/// Another level of templates, uses TemplateDecl and Qualifiers
Template,
/// Type difference, uses QualType
Type,
/// Expression difference, uses Expr
Expression,
/// Template argument difference, uses TemplateDecl
TemplateTemplate,
/// Integer difference, uses APSInt and Expr
Integer,
/// Declaration difference, uses ValueDecl
Declaration
};
private:
/// DiffNode - The root node stores the original type. Each child node
/// stores template arguments of their parents. For templated types, the
/// template decl is also stored.
struct DiffNode {
DiffKind Kind;
/// NextNode - The index of the next sibling node or 0.
unsigned NextNode;
/// ChildNode - The index of the first child node or 0.
unsigned ChildNode;
/// ParentNode - The index of the parent node.
unsigned ParentNode;
/// FromType, ToType - The type arguments.
QualType FromType, ToType;
/// FromExpr, ToExpr - The expression arguments.
Expr *FromExpr, *ToExpr;
/// FromNullPtr, ToNullPtr - If the template argument is a nullptr
bool FromNullPtr, ToNullPtr;
/// FromTD, ToTD - The template decl for template template
/// arguments or the type arguments that are templates.
TemplateDecl *FromTD, *ToTD;
/// FromQual, ToQual - Qualifiers for template types.
Qualifiers FromQual, ToQual;
/// FromInt, ToInt - APSInt's for integral arguments.
llvm::APSInt FromInt, ToInt;
/// IsValidFromInt, IsValidToInt - Whether the APSInt's are valid.
bool IsValidFromInt, IsValidToInt;
/// FromValueDecl, ToValueDecl - Whether the argument is a decl.
ValueDecl *FromValueDecl, *ToValueDecl;
/// FromAddressOf, ToAddressOf - Whether the ValueDecl needs an address of
/// operator before it.
bool FromAddressOf, ToAddressOf;
/// FromDefault, ToDefault - Whether the argument is a default argument.
bool FromDefault, ToDefault;
/// Same - Whether the two arguments evaluate to the same value.
bool Same;
DiffNode(unsigned ParentNode = 0)
: Kind(Invalid), NextNode(0), ChildNode(0), ParentNode(ParentNode),
FromType(), ToType(), FromExpr(nullptr), ToExpr(nullptr),
FromNullPtr(false), ToNullPtr(false),
FromTD(nullptr), ToTD(nullptr), IsValidFromInt(false),
IsValidToInt(false), FromValueDecl(nullptr), ToValueDecl(nullptr),
FromAddressOf(false), ToAddressOf(false), FromDefault(false),
ToDefault(false), Same(false) {}
};
/// FlatTree - A flattened tree used to store the DiffNodes.
SmallVector<DiffNode, 16> FlatTree;
/// CurrentNode - The index of the current node being used.
unsigned CurrentNode;
/// NextFreeNode - The index of the next unused node. Used when creating
/// child nodes.
unsigned NextFreeNode;
/// ReadNode - The index of the current node being read.
unsigned ReadNode;
public:
DiffTree() :
CurrentNode(0), NextFreeNode(1) {
FlatTree.push_back(DiffNode());
}
// Node writing functions.
/// SetNode - Sets FromTD and ToTD of the current node.
void SetNode(TemplateDecl *FromTD, TemplateDecl *ToTD) {
FlatTree[CurrentNode].FromTD = FromTD;
FlatTree[CurrentNode].ToTD = ToTD;
}
/// SetNode - Sets FromType and ToType of the current node.
void SetNode(QualType FromType, QualType ToType) {
FlatTree[CurrentNode].FromType = FromType;
FlatTree[CurrentNode].ToType = ToType;
}
/// SetNode - Set FromExpr and ToExpr of the current node.
void SetNode(Expr *FromExpr, Expr *ToExpr) {
FlatTree[CurrentNode].FromExpr = FromExpr;
FlatTree[CurrentNode].ToExpr = ToExpr;
}
/// SetNode - Set FromInt and ToInt of the current node.
void SetNode(llvm::APSInt FromInt, llvm::APSInt ToInt,
bool IsValidFromInt, bool IsValidToInt) {
FlatTree[CurrentNode].FromInt = FromInt;
FlatTree[CurrentNode].ToInt = ToInt;
FlatTree[CurrentNode].IsValidFromInt = IsValidFromInt;
FlatTree[CurrentNode].IsValidToInt = IsValidToInt;
}
/// SetNode - Set FromQual and ToQual of the current node.
void SetNode(Qualifiers FromQual, Qualifiers ToQual) {
FlatTree[CurrentNode].FromQual = FromQual;
FlatTree[CurrentNode].ToQual = ToQual;
}
/// SetNode - Set FromValueDecl and ToValueDecl of the current node.
void SetNode(ValueDecl *FromValueDecl, ValueDecl *ToValueDecl,
bool FromAddressOf, bool ToAddressOf) {
FlatTree[CurrentNode].FromValueDecl = FromValueDecl;
FlatTree[CurrentNode].ToValueDecl = ToValueDecl;
FlatTree[CurrentNode].FromAddressOf = FromAddressOf;
FlatTree[CurrentNode].ToAddressOf = ToAddressOf;
}
/// SetSame - Sets the same flag of the current node.
void SetSame(bool Same) {
FlatTree[CurrentNode].Same = Same;
}
/// SetNullPtr - Sets the NullPtr flags of the current node.
void SetNullPtr(bool FromNullPtr, bool ToNullPtr) {
FlatTree[CurrentNode].FromNullPtr = FromNullPtr;
FlatTree[CurrentNode].ToNullPtr = ToNullPtr;
}
/// SetDefault - Sets FromDefault and ToDefault flags of the current node.
void SetDefault(bool FromDefault, bool ToDefault) {
FlatTree[CurrentNode].FromDefault = FromDefault;
FlatTree[CurrentNode].ToDefault = ToDefault;
}
/// SetKind - Sets the current node's type.
void SetKind(DiffKind Kind) {
FlatTree[CurrentNode].Kind = Kind;
}
/// Up - Changes the node to the parent of the current node.
void Up() {
CurrentNode = FlatTree[CurrentNode].ParentNode;
}
/// AddNode - Adds a child node to the current node, then sets that node
/// node as the current node.
void AddNode() {
FlatTree.push_back(DiffNode(CurrentNode));
DiffNode &Node = FlatTree[CurrentNode];
if (Node.ChildNode == 0) {
// If a child node doesn't exist, add one.
Node.ChildNode = NextFreeNode;
} else {
// If a child node exists, find the last child node and add a
// next node to it.
unsigned i;
for (i = Node.ChildNode; FlatTree[i].NextNode != 0;
i = FlatTree[i].NextNode) {
}
FlatTree[i].NextNode = NextFreeNode;
}
CurrentNode = NextFreeNode;
++NextFreeNode;
}
// Node reading functions.
/// StartTraverse - Prepares the tree for recursive traversal.
void StartTraverse() {
ReadNode = 0;
CurrentNode = NextFreeNode;
NextFreeNode = 0;
}
/// Parent - Move the current read node to its parent.
void Parent() {
ReadNode = FlatTree[ReadNode].ParentNode;
}
/// GetNode - Gets the FromType and ToType.
void GetNode(QualType &FromType, QualType &ToType) {
FromType = FlatTree[ReadNode].FromType;
ToType = FlatTree[ReadNode].ToType;
}
/// GetNode - Gets the FromExpr and ToExpr.
void GetNode(Expr *&FromExpr, Expr *&ToExpr) {
FromExpr = FlatTree[ReadNode].FromExpr;
ToExpr = FlatTree[ReadNode].ToExpr;
}
/// GetNode - Gets the FromTD and ToTD.
void GetNode(TemplateDecl *&FromTD, TemplateDecl *&ToTD) {
FromTD = FlatTree[ReadNode].FromTD;
ToTD = FlatTree[ReadNode].ToTD;
}
/// GetNode - Gets the FromInt and ToInt.
void GetNode(llvm::APSInt &FromInt, llvm::APSInt &ToInt,
bool &IsValidFromInt, bool &IsValidToInt) {
FromInt = FlatTree[ReadNode].FromInt;
ToInt = FlatTree[ReadNode].ToInt;
IsValidFromInt = FlatTree[ReadNode].IsValidFromInt;
IsValidToInt = FlatTree[ReadNode].IsValidToInt;
}
/// GetNode - Gets the FromQual and ToQual.
void GetNode(Qualifiers &FromQual, Qualifiers &ToQual) {
FromQual = FlatTree[ReadNode].FromQual;
ToQual = FlatTree[ReadNode].ToQual;
}
/// GetNode - Gets the FromValueDecl and ToValueDecl.
void GetNode(ValueDecl *&FromValueDecl, ValueDecl *&ToValueDecl,
bool &FromAddressOf, bool &ToAddressOf) {
FromValueDecl = FlatTree[ReadNode].FromValueDecl;
ToValueDecl = FlatTree[ReadNode].ToValueDecl;
FromAddressOf = FlatTree[ReadNode].FromAddressOf;
ToAddressOf = FlatTree[ReadNode].ToAddressOf;
}
/// NodeIsSame - Returns true the arguments are the same.
bool NodeIsSame() {
return FlatTree[ReadNode].Same;
}
/// HasChildrend - Returns true if the node has children.
bool HasChildren() {
return FlatTree[ReadNode].ChildNode != 0;
}
/// MoveToChild - Moves from the current node to its child.
void MoveToChild() {
ReadNode = FlatTree[ReadNode].ChildNode;
}
/// AdvanceSibling - If there is a next sibling, advance to it and return
/// true. Otherwise, return false.
bool AdvanceSibling() {
if (FlatTree[ReadNode].NextNode == 0)
return false;
ReadNode = FlatTree[ReadNode].NextNode;
return true;
}
/// HasNextSibling - Return true if the node has a next sibling.
bool HasNextSibling() {
return FlatTree[ReadNode].NextNode != 0;
}
/// FromNullPtr - Returns true if the from argument is null.
bool FromNullPtr() {
return FlatTree[ReadNode].FromNullPtr;
}
/// ToNullPtr - Returns true if the to argument is null.
bool ToNullPtr() {
return FlatTree[ReadNode].ToNullPtr;
}
/// FromDefault - Return true if the from argument is the default.
bool FromDefault() {
return FlatTree[ReadNode].FromDefault;
}
/// ToDefault - Return true if the to argument is the default.
bool ToDefault() {
return FlatTree[ReadNode].ToDefault;
}
/// Empty - Returns true if the tree has no information.
bool Empty() {
return GetKind() == Invalid;
}
/// GetKind - Returns the current node's type.
DiffKind GetKind() {
return FlatTree[ReadNode].Kind;
}
};
DiffTree Tree;
/// TSTiterator - an iterator that is used to enter a
/// TemplateSpecializationType and read TemplateArguments inside template
/// parameter packs in order with the rest of the TemplateArguments.
struct TSTiterator {
typedef const TemplateArgument& reference;
typedef const TemplateArgument* pointer;
/// TST - the template specialization whose arguments this iterator
/// traverse over.
const TemplateSpecializationType *TST;
/// DesugarTST - desugared template specialization used to extract
/// default argument information
const TemplateSpecializationType *DesugarTST;
/// Index - the index of the template argument in TST.
unsigned Index;
/// CurrentTA - if CurrentTA is not the same as EndTA, then CurrentTA
/// points to a TemplateArgument within a parameter pack.
TemplateArgument::pack_iterator CurrentTA;
/// EndTA - the end iterator of a parameter pack
TemplateArgument::pack_iterator EndTA;
/// TSTiterator - Constructs an iterator and sets it to the first template
/// argument.
TSTiterator(ASTContext &Context, const TemplateSpecializationType *TST)
: TST(TST),
DesugarTST(GetTemplateSpecializationType(Context, TST->desugar())),
Index(0), CurrentTA(nullptr), EndTA(nullptr) {
if (isEnd()) return;
// Set to first template argument. If not a parameter pack, done.
TemplateArgument TA = TST->getArg(0);
if (TA.getKind() != TemplateArgument::Pack) return;
// Start looking into the parameter pack.
CurrentTA = TA.pack_begin();
EndTA = TA.pack_end();
// Found a valid template argument.
if (CurrentTA != EndTA) return;
// Parameter pack is empty, use the increment to get to a valid
// template argument.
++(*this);
}
/// isEnd - Returns true if the iterator is one past the end.
bool isEnd() const {
return Index >= TST->getNumArgs();
}
/// &operator++ - Increment the iterator to the next template argument.
TSTiterator &operator++() {
// After the end, Index should be the default argument position in
// DesugarTST, if it exists.
if (isEnd()) {
++Index;
return *this;
}
// If in a parameter pack, advance in the parameter pack.
if (CurrentTA != EndTA) {
++CurrentTA;
if (CurrentTA != EndTA)
return *this;
}
// Loop until a template argument is found, or the end is reached.
while (true) {
// Advance to the next template argument. Break if reached the end.
if (++Index == TST->getNumArgs()) break;
// If the TemplateArgument is not a parameter pack, done.
TemplateArgument TA = TST->getArg(Index);
if (TA.getKind() != TemplateArgument::Pack) break;
// Handle parameter packs.
CurrentTA = TA.pack_begin();
EndTA = TA.pack_end();
// If the parameter pack is empty, try to advance again.
if (CurrentTA != EndTA) break;
}
return *this;
}
/// operator* - Returns the appropriate TemplateArgument.
reference operator*() const {
assert(!isEnd() && "Index exceeds number of arguments.");
if (CurrentTA == EndTA)
return TST->getArg(Index);
else
return *CurrentTA;
}
/// operator-> - Allow access to the underlying TemplateArgument.
pointer operator->() const {
return &operator*();
}
/// getDesugar - Returns the deduced template argument from DesguarTST
reference getDesugar() const {
return DesugarTST->getArg(Index);
}
};
// These functions build up the template diff tree, including functions to
// retrieve and compare template arguments.
static const TemplateSpecializationType * GetTemplateSpecializationType(
ASTContext &Context, QualType Ty) {
if (const TemplateSpecializationType *TST =
Ty->getAs<TemplateSpecializationType>())
return TST;
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return nullptr;
const ClassTemplateSpecializationDecl *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
if (!CTSD)
return nullptr;
Ty = Context.getTemplateSpecializationType(
TemplateName(CTSD->getSpecializedTemplate()),
CTSD->getTemplateArgs().data(),
CTSD->getTemplateArgs().size(),
Ty.getLocalUnqualifiedType().getCanonicalType());
return Ty->getAs<TemplateSpecializationType>();
}
/// DiffTypes - Fills a DiffNode with information about a type difference.
void DiffTypes(const TSTiterator &FromIter, const TSTiterator &ToIter,
TemplateTypeParmDecl *FromDefaultTypeDecl,
TemplateTypeParmDecl *ToDefaultTypeDecl) {
QualType FromType = GetType(FromIter, FromDefaultTypeDecl);
QualType ToType = GetType(ToIter, ToDefaultTypeDecl);
Tree.SetNode(FromType, ToType);
Tree.SetDefault(FromIter.isEnd() && !FromType.isNull(),
ToIter.isEnd() && !ToType.isNull());
Tree.SetKind(DiffTree::Type);
if (FromType.isNull() || ToType.isNull())
return;
if (Context.hasSameType(FromType, ToType)) {
Tree.SetSame(true);
return;
}
const TemplateSpecializationType *FromArgTST =
GetTemplateSpecializationType(Context, FromType);
if (!FromArgTST)
return;
const TemplateSpecializationType *ToArgTST =
GetTemplateSpecializationType(Context, ToType);
if (!ToArgTST)
return;
if (!hasSameTemplate(FromArgTST, ToArgTST))
return;
Qualifiers FromQual = FromType.getQualifiers(),
ToQual = ToType.getQualifiers();
FromQual -= QualType(FromArgTST, 0).getQualifiers();
ToQual -= QualType(ToArgTST, 0).getQualifiers();
Tree.SetNode(FromArgTST->getTemplateName().getAsTemplateDecl(),
ToArgTST->getTemplateName().getAsTemplateDecl());
Tree.SetNode(FromQual, ToQual);
Tree.SetKind(DiffTree::Template);
DiffTemplate(FromArgTST, ToArgTST);
}
/// DiffTemplateTemplates - Fills a DiffNode with information about a
/// template template difference.
void DiffTemplateTemplates(const TSTiterator &FromIter,
const TSTiterator &ToIter,
TemplateTemplateParmDecl *FromDefaultTemplateDecl,
TemplateTemplateParmDecl *ToDefaultTemplateDecl) {
TemplateDecl *FromDecl = GetTemplateDecl(FromIter, FromDefaultTemplateDecl);
TemplateDecl *ToDecl = GetTemplateDecl(ToIter, ToDefaultTemplateDecl);
Tree.SetNode(FromDecl, ToDecl);
Tree.SetSame(FromDecl && ToDecl &&
FromDecl->getCanonicalDecl() == ToDecl->getCanonicalDecl());
Tree.SetDefault(FromIter.isEnd() && FromDecl, ToIter.isEnd() && ToDecl);
Tree.SetKind(DiffTree::TemplateTemplate);
}
/// InitializeNonTypeDiffVariables - Helper function for DiffNonTypes
static void InitializeNonTypeDiffVariables(
ASTContext &Context, const TSTiterator &Iter,
NonTypeTemplateParmDecl *Default, bool &HasInt, bool &HasValueDecl,
bool &IsNullPtr, Expr *&E, llvm::APSInt &Value, ValueDecl *&VD) {
HasInt = !Iter.isEnd() && Iter->getKind() == TemplateArgument::Integral;
HasValueDecl =
!Iter.isEnd() && Iter->getKind() == TemplateArgument::Declaration;
IsNullPtr = !Iter.isEnd() && Iter->getKind() == TemplateArgument::NullPtr;
if (HasInt)
Value = Iter->getAsIntegral();
else if (HasValueDecl)
VD = Iter->getAsDecl();
else if (!IsNullPtr)
E = GetExpr(Iter, Default);
if (E && Default->getType()->isPointerType())
IsNullPtr = CheckForNullPtr(Context, E);
}
/// NeedsAddressOf - Helper function for DiffNonTypes. Returns true if the
/// ValueDecl needs a '&' when printed.
static bool NeedsAddressOf(ValueDecl *VD, Expr *E,
NonTypeTemplateParmDecl *Default) {
if (!VD)
return false;
if (E) {
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
if (UO->getOpcode() == UO_AddrOf) {
return true;
}
}
return false;
}
if (!Default->getType()->isReferenceType()) {
return true;
}
return false;
}
/// DiffNonTypes - Handles any template parameters not handled by DiffTypes
/// of DiffTemplatesTemplates, such as integer and declaration parameters.
void DiffNonTypes(const TSTiterator &FromIter, const TSTiterator &ToIter,
NonTypeTemplateParmDecl *FromDefaultNonTypeDecl,
NonTypeTemplateParmDecl *ToDefaultNonTypeDecl) {
Expr *FromExpr = nullptr, *ToExpr = nullptr;
llvm::APSInt FromInt, ToInt;
ValueDecl *FromValueDecl = nullptr, *ToValueDecl = nullptr;
bool HasFromInt = false, HasToInt = false, HasFromValueDecl = false,
HasToValueDecl = false, FromNullPtr = false, ToNullPtr = false;
InitializeNonTypeDiffVariables(Context, FromIter, FromDefaultNonTypeDecl,
HasFromInt, HasFromValueDecl, FromNullPtr,
FromExpr, FromInt, FromValueDecl);
InitializeNonTypeDiffVariables(Context, ToIter, ToDefaultNonTypeDecl,
HasToInt, HasToValueDecl, ToNullPtr,
ToExpr, ToInt, ToValueDecl);
assert(((!HasFromInt && !HasToInt) ||
(!HasFromValueDecl && !HasToValueDecl)) &&
"Template argument cannot be both integer and declaration");
if (!HasFromInt && !HasToInt && !HasFromValueDecl && !HasToValueDecl) {
Tree.SetNode(FromExpr, ToExpr);
Tree.SetDefault(FromIter.isEnd() && FromExpr, ToIter.isEnd() && ToExpr);
if (FromDefaultNonTypeDecl->getType()->isIntegralOrEnumerationType()) {
if (FromExpr)
HasFromInt = GetInt(Context, FromIter, FromExpr, FromInt,
FromDefaultNonTypeDecl->getType());
if (ToExpr)
HasToInt = GetInt(Context, ToIter, ToExpr, ToInt,
ToDefaultNonTypeDecl->getType());
}
if (HasFromInt && HasToInt) {
Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
Tree.SetSame(FromInt == ToInt);
Tree.SetKind(DiffTree::Integer);
} else if (HasFromInt || HasToInt) {
Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
Tree.SetSame(false);
Tree.SetKind(DiffTree::Integer);
} else {
Tree.SetSame(IsEqualExpr(Context, FromExpr, ToExpr) ||
(FromNullPtr && ToNullPtr));
Tree.SetNullPtr(FromNullPtr, ToNullPtr);
Tree.SetKind(DiffTree::Expression);
}
return;
}
if (HasFromInt || HasToInt) {
if (!HasFromInt && FromExpr)
HasFromInt = GetInt(Context, FromIter, FromExpr, FromInt,
FromDefaultNonTypeDecl->getType());
if (!HasToInt && ToExpr)
HasToInt = GetInt(Context, ToIter, ToExpr, ToInt,
ToDefaultNonTypeDecl->getType());
Tree.SetNode(FromInt, ToInt, HasFromInt, HasToInt);
if (HasFromInt && HasToInt) {
Tree.SetSame(FromInt == ToInt);
} else {
Tree.SetSame(false);
}
Tree.SetDefault(FromIter.isEnd() && HasFromInt,
ToIter.isEnd() && HasToInt);
Tree.SetKind(DiffTree::Integer);
return;
}
if (!HasFromValueDecl && FromExpr)
FromValueDecl = GetValueDecl(FromIter, FromExpr);
if (!HasToValueDecl && ToExpr)
ToValueDecl = GetValueDecl(ToIter, ToExpr);
bool FromAddressOf =
NeedsAddressOf(FromValueDecl, FromExpr, FromDefaultNonTypeDecl);
bool ToAddressOf =
NeedsAddressOf(ToValueDecl, ToExpr, ToDefaultNonTypeDecl);
Tree.SetNullPtr(FromNullPtr, ToNullPtr);
Tree.SetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf);
Tree.SetSame(FromValueDecl && ToValueDecl &&
FromValueDecl->getCanonicalDecl() ==
ToValueDecl->getCanonicalDecl());
Tree.SetDefault(FromIter.isEnd() && FromValueDecl,
ToIter.isEnd() && ToValueDecl);
Tree.SetKind(DiffTree::Declaration);
}
/// DiffTemplate - recursively visits template arguments and stores the
/// argument info into a tree.
void DiffTemplate(const TemplateSpecializationType *FromTST,
const TemplateSpecializationType *ToTST) {
// Begin descent into diffing template tree.
TemplateParameterList *ParamsFrom =
FromTST->getTemplateName().getAsTemplateDecl()->getTemplateParameters();
TemplateParameterList *ParamsTo =
ToTST->getTemplateName().getAsTemplateDecl()->getTemplateParameters();
unsigned TotalArgs = 0;
for (TSTiterator FromIter(Context, FromTST), ToIter(Context, ToTST);
!FromIter.isEnd() || !ToIter.isEnd(); ++TotalArgs) {
Tree.AddNode();
// Get the parameter at index TotalArgs. If index is larger
// than the total number of parameters, then there is an
// argument pack, so re-use the last parameter.
unsigned FromParamIndex = std::min(TotalArgs, ParamsFrom->size() - 1);
unsigned ToParamIndex = std::min(TotalArgs, ParamsTo->size() - 1);
NamedDecl *FromParamND = ParamsFrom->getParam(FromParamIndex);
NamedDecl *ToParamND = ParamsTo->getParam(ToParamIndex);
TemplateTypeParmDecl *FromDefaultTypeDecl =
dyn_cast<TemplateTypeParmDecl>(FromParamND);
TemplateTypeParmDecl *ToDefaultTypeDecl =
dyn_cast<TemplateTypeParmDecl>(ToParamND);
if (FromDefaultTypeDecl && ToDefaultTypeDecl)
DiffTypes(FromIter, ToIter, FromDefaultTypeDecl, ToDefaultTypeDecl);
TemplateTemplateParmDecl *FromDefaultTemplateDecl =
dyn_cast<TemplateTemplateParmDecl>(FromParamND);
TemplateTemplateParmDecl *ToDefaultTemplateDecl =
dyn_cast<TemplateTemplateParmDecl>(ToParamND);
if (FromDefaultTemplateDecl && ToDefaultTemplateDecl)
DiffTemplateTemplates(FromIter, ToIter, FromDefaultTemplateDecl,
ToDefaultTemplateDecl);
NonTypeTemplateParmDecl *FromDefaultNonTypeDecl =
dyn_cast<NonTypeTemplateParmDecl>(FromParamND);
NonTypeTemplateParmDecl *ToDefaultNonTypeDecl =
dyn_cast<NonTypeTemplateParmDecl>(ToParamND);
if (FromDefaultNonTypeDecl && ToDefaultNonTypeDecl)
DiffNonTypes(FromIter, ToIter, FromDefaultNonTypeDecl,
ToDefaultNonTypeDecl);
++FromIter;
++ToIter;
Tree.Up();
}
}
/// makeTemplateList - Dump every template alias into the vector.
static void makeTemplateList(
SmallVectorImpl<const TemplateSpecializationType *> &TemplateList,
const TemplateSpecializationType *TST) {
while (TST) {
TemplateList.push_back(TST);
if (!TST->isTypeAlias())
return;
TST = TST->getAliasedType()->getAs<TemplateSpecializationType>();
}
}
/// hasSameBaseTemplate - Returns true when the base templates are the same,
/// even if the template arguments are not.
static bool hasSameBaseTemplate(const TemplateSpecializationType *FromTST,
const TemplateSpecializationType *ToTST) {
return FromTST->getTemplateName().getAsTemplateDecl()->getCanonicalDecl() ==
ToTST->getTemplateName().getAsTemplateDecl()->getCanonicalDecl();
}
/// hasSameTemplate - Returns true if both types are specialized from the
/// same template declaration. If they come from different template aliases,
/// do a parallel ascension search to determine the highest template alias in
/// common and set the arguments to them.
static bool hasSameTemplate(const TemplateSpecializationType *&FromTST,
const TemplateSpecializationType *&ToTST) {
// Check the top templates if they are the same.
if (hasSameBaseTemplate(FromTST, ToTST))
return true;
// Create vectors of template aliases.
SmallVector<const TemplateSpecializationType*, 1> FromTemplateList,
ToTemplateList;
makeTemplateList(FromTemplateList, FromTST);
makeTemplateList(ToTemplateList, ToTST);
SmallVectorImpl<const TemplateSpecializationType *>::reverse_iterator
FromIter = FromTemplateList.rbegin(), FromEnd = FromTemplateList.rend(),
ToIter = ToTemplateList.rbegin(), ToEnd = ToTemplateList.rend();
// Check if the lowest template types are the same. If not, return.
if (!hasSameBaseTemplate(*FromIter, *ToIter))
return false;
// Begin searching up the template aliases. The bottom most template
// matches so move up until one pair does not match. Use the template
// right before that one.
for (; FromIter != FromEnd && ToIter != ToEnd; ++FromIter, ++ToIter) {
if (!hasSameBaseTemplate(*FromIter, *ToIter))
break;
}
FromTST = FromIter[-1];
ToTST = ToIter[-1];
return true;
}
/// GetType - Retrieves the template type arguments, including default
/// arguments.
static QualType GetType(const TSTiterator &Iter,
TemplateTypeParmDecl *DefaultTTPD) {
bool isVariadic = DefaultTTPD->isParameterPack();
if (!Iter.isEnd())
return Iter->getAsType();
if (isVariadic)
return QualType();
QualType ArgType = DefaultTTPD->getDefaultArgument();
if (ArgType->isDependentType())
return Iter.getDesugar().getAsType();
return ArgType;
}
/// GetExpr - Retrieves the template expression argument, including default
/// arguments.
static Expr *GetExpr(const TSTiterator &Iter,
NonTypeTemplateParmDecl *DefaultNTTPD) {
Expr *ArgExpr = nullptr;
bool isVariadic = DefaultNTTPD->isParameterPack();
if (!Iter.isEnd())
ArgExpr = Iter->getAsExpr();
else if (!isVariadic)
ArgExpr = DefaultNTTPD->getDefaultArgument();
if (ArgExpr)
while (SubstNonTypeTemplateParmExpr *SNTTPE =
dyn_cast<SubstNonTypeTemplateParmExpr>(ArgExpr))
ArgExpr = SNTTPE->getReplacement();
return ArgExpr;
}
/// GetInt - Retrieves the template integer argument, including evaluating
/// default arguments. If the value comes from an expression, extend the
/// APSInt to size of IntegerType to match the behavior in
/// Sema::CheckTemplateArgument
static bool GetInt(ASTContext &Context, const TSTiterator &Iter,
Expr *ArgExpr, llvm::APSInt &Int, QualType IntegerType) {
// Default, value-depenedent expressions require fetching
// from the desugared TemplateArgument, otherwise expression needs to
// be evaluatable.
if (Iter.isEnd() && ArgExpr->isValueDependent()) {
switch (Iter.getDesugar().getKind()) {
case TemplateArgument::Integral:
Int = Iter.getDesugar().getAsIntegral();
return true;
case TemplateArgument::Expression:
ArgExpr = Iter.getDesugar().getAsExpr();
Int = ArgExpr->EvaluateKnownConstInt(Context);
Int = Int.extOrTrunc(Context.getTypeSize(IntegerType));
return true;
default:
llvm_unreachable("Unexpected template argument kind");
}
} else if (ArgExpr->isEvaluatable(Context)) {
Int = ArgExpr->EvaluateKnownConstInt(Context);
Int = Int.extOrTrunc(Context.getTypeSize(IntegerType));
return true;
}
return false;
}
/// GetValueDecl - Retrieves the template Decl argument, including
/// default expression argument.
static ValueDecl *GetValueDecl(const TSTiterator &Iter, Expr *ArgExpr) {
// Default, value-depenedent expressions require fetching
// from the desugared TemplateArgument
if (Iter.isEnd() && ArgExpr->isValueDependent())
switch (Iter.getDesugar().getKind()) {
case TemplateArgument::Declaration:
return Iter.getDesugar().getAsDecl();
case TemplateArgument::Expression:
ArgExpr = Iter.getDesugar().getAsExpr();
return cast<DeclRefExpr>(ArgExpr)->getDecl();
default:
llvm_unreachable("Unexpected template argument kind");
}
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr);
if (!DRE) {
UnaryOperator *UO = dyn_cast<UnaryOperator>(ArgExpr->IgnoreParens());
if (!UO)
return nullptr;
DRE = cast<DeclRefExpr>(UO->getSubExpr());
}
return DRE->getDecl();
}
/// CheckForNullPtr - returns true if the expression can be evaluated as
/// a null pointer
static bool CheckForNullPtr(ASTContext &Context, Expr *E) {
assert(E && "Expected expression");
E = E->IgnoreParenCasts();
if (E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
return true;
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
if (!DRE)
return false;
VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
if (!VD || !VD->hasInit())
return false;
return VD->getInit()->IgnoreParenCasts()->isNullPointerConstant(
Context, Expr::NPC_ValueDependentIsNull);
}
/// GetTemplateDecl - Retrieves the template template arguments, including
/// default arguments.
static TemplateDecl *GetTemplateDecl(const TSTiterator &Iter,
TemplateTemplateParmDecl *DefaultTTPD) {
bool isVariadic = DefaultTTPD->isParameterPack();
TemplateArgument TA = DefaultTTPD->getDefaultArgument().getArgument();
TemplateDecl *DefaultTD = nullptr;
if (TA.getKind() != TemplateArgument::Null)
DefaultTD = TA.getAsTemplate().getAsTemplateDecl();
if (!Iter.isEnd())
return Iter->getAsTemplate().getAsTemplateDecl();
if (!isVariadic)
return DefaultTD;
return nullptr;
}
/// IsEqualExpr - Returns true if the expressions evaluate to the same value.
static bool IsEqualExpr(ASTContext &Context, Expr *FromExpr, Expr *ToExpr) {
if (FromExpr == ToExpr)
return true;
if (!FromExpr || !ToExpr)
return false;
DeclRefExpr *FromDRE = dyn_cast<DeclRefExpr>(FromExpr->IgnoreParens()),
*ToDRE = dyn_cast<DeclRefExpr>(ToExpr->IgnoreParens());
if (FromDRE || ToDRE) {
if (!FromDRE || !ToDRE)
return false;
return FromDRE->getDecl() == ToDRE->getDecl();
}
Expr::EvalResult FromResult, ToResult;
if (!FromExpr->EvaluateAsRValue(FromResult, Context) ||
!ToExpr->EvaluateAsRValue(ToResult, Context)) {
llvm::FoldingSetNodeID FromID, ToID;
FromExpr->Profile(FromID, Context, true);
ToExpr->Profile(ToID, Context, true);
return FromID == ToID;
}
APValue &FromVal = FromResult.Val;
APValue &ToVal = ToResult.Val;
if (FromVal.getKind() != ToVal.getKind()) return false;
switch (FromVal.getKind()) {
case APValue::Int:
return FromVal.getInt() == ToVal.getInt();
case APValue::LValue: {
APValue::LValueBase FromBase = FromVal.getLValueBase();
APValue::LValueBase ToBase = ToVal.getLValueBase();
if (FromBase.isNull() && ToBase.isNull())
return true;
if (FromBase.isNull() || ToBase.isNull())
return false;
return FromBase.get<const ValueDecl*>() ==
ToBase.get<const ValueDecl*>();
}
case APValue::MemberPointer:
return FromVal.getMemberPointerDecl() == ToVal.getMemberPointerDecl();
default:
llvm_unreachable("Unknown template argument expression.");
}
}
// These functions converts the tree representation of the template
// differences into the internal character vector.
/// TreeToString - Converts the Tree object into a character stream which
/// will later be turned into the output string.
void TreeToString(int Indent = 1) {
if (PrintTree) {
OS << '\n';
OS.indent(2 * Indent);
++Indent;
}
// Handle cases where the difference is not templates with different
// arguments.
switch (Tree.GetKind()) {
case DiffTree::Invalid:
llvm_unreachable("Template diffing failed with bad DiffNode");
case DiffTree::Type: {
QualType FromType, ToType;
Tree.GetNode(FromType, ToType);
PrintTypeNames(FromType, ToType, Tree.FromDefault(), Tree.ToDefault(),
Tree.NodeIsSame());
return;
}
case DiffTree::Expression: {
Expr *FromExpr, *ToExpr;
Tree.GetNode(FromExpr, ToExpr);
PrintExpr(FromExpr, ToExpr, Tree.FromNullPtr(), Tree.ToNullPtr(),
Tree.FromDefault(), Tree.ToDefault(), Tree.NodeIsSame());
return;
}
case DiffTree::TemplateTemplate: {
TemplateDecl *FromTD, *ToTD;
Tree.GetNode(FromTD, ToTD);
PrintTemplateTemplate(FromTD, ToTD, Tree.FromDefault(),
Tree.ToDefault(), Tree.NodeIsSame());
return;
}
case DiffTree::Integer: {
llvm::APSInt FromInt, ToInt;
Expr *FromExpr, *ToExpr;
bool IsValidFromInt, IsValidToInt;
Tree.GetNode(FromExpr, ToExpr);
Tree.GetNode(FromInt, ToInt, IsValidFromInt, IsValidToInt);
PrintAPSInt(FromInt, ToInt, IsValidFromInt, IsValidToInt,
FromExpr, ToExpr, Tree.FromDefault(), Tree.ToDefault(),
Tree.NodeIsSame());
return;
}
case DiffTree::Declaration: {
ValueDecl *FromValueDecl, *ToValueDecl;
bool FromAddressOf, ToAddressOf;
Tree.GetNode(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf);
PrintValueDecl(FromValueDecl, ToValueDecl, FromAddressOf, ToAddressOf,
Tree.FromNullPtr(), Tree.ToNullPtr(), Tree.FromDefault(),
Tree.ToDefault(), Tree.NodeIsSame());
return;
}
case DiffTree::Template: {
// Node is root of template. Recurse on children.
TemplateDecl *FromTD, *ToTD;
Tree.GetNode(FromTD, ToTD);
if (!Tree.HasChildren()) {
// If we're dealing with a template specialization with zero
// arguments, there are no children; special-case this.
OS << FromTD->getNameAsString() << "<>";
return;
}
Qualifiers FromQual, ToQual;
Tree.GetNode(FromQual, ToQual);
PrintQualifiers(FromQual, ToQual);
OS << FromTD->getNameAsString() << '<';
Tree.MoveToChild();
unsigned NumElideArgs = 0;
do {
if (ElideType) {
if (Tree.NodeIsSame()) {
++NumElideArgs;
continue;
}
if (NumElideArgs > 0) {
PrintElideArgs(NumElideArgs, Indent);
NumElideArgs = 0;
OS << ", ";
}
}
TreeToString(Indent);
if (Tree.HasNextSibling())
OS << ", ";
} while (Tree.AdvanceSibling());
if (NumElideArgs > 0)
PrintElideArgs(NumElideArgs, Indent);
Tree.Parent();
OS << ">";
return;
}
}
}
// To signal to the text printer that a certain text needs to be bolded,
// a special character is injected into the character stream which the
// text printer will later strip out.
/// Bold - Start bolding text.
void Bold() {
assert(!IsBold && "Attempting to bold text that is already bold.");
IsBold = true;
if (ShowColor)
OS << ToggleHighlight;
}
/// Unbold - Stop bolding text.
void Unbold() {
assert(IsBold && "Attempting to remove bold from unbold text.");
IsBold = false;
if (ShowColor)
OS << ToggleHighlight;
}
// Functions to print out the arguments and highlighting the difference.
/// PrintTypeNames - prints the typenames, bolding differences. Will detect
/// typenames that are the same and attempt to disambiguate them by using
/// canonical typenames.
void PrintTypeNames(QualType FromType, QualType ToType,
bool FromDefault, bool ToDefault, bool Same) {
assert((!FromType.isNull() || !ToType.isNull()) &&
"Only one template argument may be missing.");
if (Same) {
OS << FromType.getAsString(Policy);
return;
}
if (!FromType.isNull() && !ToType.isNull() &&
FromType.getLocalUnqualifiedType() ==
ToType.getLocalUnqualifiedType()) {
Qualifiers FromQual = FromType.getLocalQualifiers(),
ToQual = ToType.getLocalQualifiers();
PrintQualifiers(FromQual, ToQual);
FromType.getLocalUnqualifiedType().print(OS, Policy);
return;
}
std::string FromTypeStr = FromType.isNull() ? "(no argument)"
: FromType.getAsString(Policy);
std::string ToTypeStr = ToType.isNull() ? "(no argument)"
: ToType.getAsString(Policy);
// Switch to canonical typename if it is better.
// TODO: merge this with other aka printing above.
if (FromTypeStr == ToTypeStr) {
std::string FromCanTypeStr =
FromType.getCanonicalType().getAsString(Policy);
std::string ToCanTypeStr = ToType.getCanonicalType().getAsString(Policy);
if (FromCanTypeStr != ToCanTypeStr) {
FromTypeStr = FromCanTypeStr;
ToTypeStr = ToCanTypeStr;
}
}
if (PrintTree) OS << '[';
OS << (FromDefault ? "(default) " : "");
Bold();
OS << FromTypeStr;
Unbold();
if (PrintTree) {
OS << " != " << (ToDefault ? "(default) " : "");
Bold();
OS << ToTypeStr;
Unbold();
OS << "]";
}
return;
}
/// PrintExpr - Prints out the expr template arguments, highlighting argument
/// differences.
void PrintExpr(const Expr *FromExpr, const Expr *ToExpr, bool FromNullPtr,
bool ToNullPtr, bool FromDefault, bool ToDefault, bool Same) {
assert((FromExpr || ToExpr) &&
"Only one template argument may be missing.");
if (Same) {
PrintExpr(FromExpr, FromNullPtr);
} else if (!PrintTree) {
OS << (FromDefault ? "(default) " : "");
Bold();
PrintExpr(FromExpr, FromNullPtr);
Unbold();
} else {
OS << (FromDefault ? "[(default) " : "[");
Bold();
PrintExpr(FromExpr, FromNullPtr);
Unbold();
OS << " != " << (ToDefault ? "(default) " : "");
Bold();
PrintExpr(ToExpr, ToNullPtr);
Unbold();
OS << ']';
}
}
/// PrintExpr - Actual formatting and printing of expressions.
void PrintExpr(const Expr *E, bool NullPtr = false) {
if (E) {
E->printPretty(OS, nullptr, Policy);
return;
}
if (NullPtr) {
OS << "nullptr";
return;
}
OS << "(no argument)";
}
/// PrintTemplateTemplate - Handles printing of template template arguments,
/// highlighting argument differences.
void PrintTemplateTemplate(TemplateDecl *FromTD, TemplateDecl *ToTD,
bool FromDefault, bool ToDefault, bool Same) {
assert((FromTD || ToTD) && "Only one template argument may be missing.");
std::string FromName = FromTD ? FromTD->getName() : "(no argument)";
std::string ToName = ToTD ? ToTD->getName() : "(no argument)";
if (FromTD && ToTD && FromName == ToName) {
FromName = FromTD->getQualifiedNameAsString();
ToName = ToTD->getQualifiedNameAsString();
}
if (Same) {
OS << "template " << FromTD->getNameAsString();
} else if (!PrintTree) {
OS << (FromDefault ? "(default) template " : "template ");
Bold();
OS << FromName;
Unbold();
} else {
OS << (FromDefault ? "[(default) template " : "[template ");
Bold();
OS << FromName;
Unbold();
OS << " != " << (ToDefault ? "(default) template " : "template ");
Bold();
OS << ToName;
Unbold();
OS << ']';
}
}
/// PrintAPSInt - Handles printing of integral arguments, highlighting
/// argument differences.
void PrintAPSInt(llvm::APSInt FromInt, llvm::APSInt ToInt,
bool IsValidFromInt, bool IsValidToInt, Expr *FromExpr,
Expr *ToExpr, bool FromDefault, bool ToDefault, bool Same) {
assert((IsValidFromInt || IsValidToInt) &&
"Only one integral argument may be missing.");
if (Same) {
OS << FromInt.toString(10);
} else if (!PrintTree) {
OS << (FromDefault ? "(default) " : "");
PrintAPSInt(FromInt, FromExpr, IsValidFromInt);
} else {
OS << (FromDefault ? "[(default) " : "[");
PrintAPSInt(FromInt, FromExpr, IsValidFromInt);
OS << " != " << (ToDefault ? "(default) " : "");
PrintAPSInt(ToInt, ToExpr, IsValidToInt);
OS << ']';
}
}
/// PrintAPSInt - If valid, print the APSInt. If the expression is
/// gives more information, print it too.
void PrintAPSInt(llvm::APSInt Val, Expr *E, bool Valid) {
Bold();
if (Valid) {
if (HasExtraInfo(E)) {
PrintExpr(E);
Unbold();
OS << " aka ";
Bold();
}
OS << Val.toString(10);
} else if (E) {
PrintExpr(E);
} else {
OS << "(no argument)";
}
Unbold();
}
/// HasExtraInfo - Returns true if E is not an integer literal or the
/// negation of an integer literal
bool HasExtraInfo(Expr *E) {
if (!E) return false;
E = E->IgnoreImpCasts();
if (isa<IntegerLiteral>(E)) return false;
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E))
if (UO->getOpcode() == UO_Minus)
if (isa<IntegerLiteral>(UO->getSubExpr()))
return false;
return true;
}
void PrintValueDecl(ValueDecl *VD, bool AddressOf, bool NullPtr) {
if (VD) {
if (AddressOf)
OS << "&";
OS << VD->getName();
return;
}
if (NullPtr) {
OS << "nullptr";
return;
}
OS << "(no argument)";
}
/// PrintDecl - Handles printing of Decl arguments, highlighting
/// argument differences.
void PrintValueDecl(ValueDecl *FromValueDecl, ValueDecl *ToValueDecl,
bool FromAddressOf, bool ToAddressOf, bool FromNullPtr,
bool ToNullPtr, bool FromDefault, bool ToDefault,
bool Same) {
assert((FromValueDecl || FromNullPtr || ToValueDecl || ToNullPtr) &&
"Only one Decl argument may be NULL");
if (Same) {
PrintValueDecl(FromValueDecl, FromAddressOf, FromNullPtr);
} else if (!PrintTree) {
OS << (FromDefault ? "(default) " : "");
Bold();
PrintValueDecl(FromValueDecl, FromAddressOf, FromNullPtr);
Unbold();
} else {
OS << (FromDefault ? "[(default) " : "[");
Bold();
PrintValueDecl(FromValueDecl, FromAddressOf, FromNullPtr);
Unbold();
OS << " != " << (ToDefault ? "(default) " : "");
Bold();
PrintValueDecl(ToValueDecl, ToAddressOf, ToNullPtr);
Unbold();
OS << ']';
}
}
// Prints the appropriate placeholder for elided template arguments.
void PrintElideArgs(unsigned NumElideArgs, unsigned Indent) {
if (PrintTree) {
OS << '\n';
for (unsigned i = 0; i < Indent; ++i)
OS << " ";
}
if (NumElideArgs == 0) return;
if (NumElideArgs == 1)
OS << "[...]";
else
OS << "[" << NumElideArgs << " * ...]";
}
// Prints and highlights differences in Qualifiers.
void PrintQualifiers(Qualifiers FromQual, Qualifiers ToQual) {
// Both types have no qualifiers
if (FromQual.empty() && ToQual.empty())
return;
// Both types have same qualifiers
if (FromQual == ToQual) {
PrintQualifier(FromQual, /*ApplyBold*/false);
return;
}
// Find common qualifiers and strip them from FromQual and ToQual.
Qualifiers CommonQual = Qualifiers::removeCommonQualifiers(FromQual,
ToQual);
// The qualifiers are printed before the template name.
// Inline printing:
// The common qualifiers are printed. Then, qualifiers only in this type
// are printed and highlighted. Finally, qualifiers only in the other
// type are printed and highlighted inside parentheses after "missing".
// Tree printing:
// Qualifiers are printed next to each other, inside brackets, and
// separated by "!=". The printing order is:
// common qualifiers, highlighted from qualifiers, "!=",
// common qualifiers, highlighted to qualifiers
if (PrintTree) {
OS << "[";
if (CommonQual.empty() && FromQual.empty()) {
Bold();
OS << "(no qualifiers) ";
Unbold();
} else {
PrintQualifier(CommonQual, /*ApplyBold*/false);
PrintQualifier(FromQual, /*ApplyBold*/true);
}
OS << "!= ";
if (CommonQual.empty() && ToQual.empty()) {
Bold();
OS << "(no qualifiers)";
Unbold();
} else {
PrintQualifier(CommonQual, /*ApplyBold*/false,
/*appendSpaceIfNonEmpty*/!ToQual.empty());
PrintQualifier(ToQual, /*ApplyBold*/true,
/*appendSpaceIfNonEmpty*/false);
}
OS << "] ";
} else {
PrintQualifier(CommonQual, /*ApplyBold*/false);
PrintQualifier(FromQual, /*ApplyBold*/true);
}
}
void PrintQualifier(Qualifiers Q, bool ApplyBold,
bool AppendSpaceIfNonEmpty = true) {
if (Q.empty()) return;
if (ApplyBold) Bold();
Q.print(OS, Policy, AppendSpaceIfNonEmpty);
if (ApplyBold) Unbold();
}
public:
TemplateDiff(raw_ostream &OS, ASTContext &Context, QualType FromType,
QualType ToType, bool PrintTree, bool PrintFromType,
bool ElideType, bool ShowColor)
: Context(Context),
Policy(Context.getLangOpts()),
ElideType(ElideType),
PrintTree(PrintTree),
ShowColor(ShowColor),
// When printing a single type, the FromType is the one printed.
FromType(PrintFromType ? FromType : ToType),
ToType(PrintFromType ? ToType : FromType),
OS(OS),
IsBold(false) {
}
/// DiffTemplate - Start the template type diffing.
void DiffTemplate() {
Qualifiers FromQual = FromType.getQualifiers(),
ToQual = ToType.getQualifiers();
const TemplateSpecializationType *FromOrigTST =
GetTemplateSpecializationType(Context, FromType);
const TemplateSpecializationType *ToOrigTST =
GetTemplateSpecializationType(Context, ToType);
// Only checking templates.
if (!FromOrigTST || !ToOrigTST)
return;
// Different base templates.
if (!hasSameTemplate(FromOrigTST, ToOrigTST)) {
return;
}
FromQual -= QualType(FromOrigTST, 0).getQualifiers();
ToQual -= QualType(ToOrigTST, 0).getQualifiers();
Tree.SetNode(FromType, ToType);
Tree.SetNode(FromQual, ToQual);
Tree.SetKind(DiffTree::Template);
// Same base template, but different arguments.
Tree.SetNode(FromOrigTST->getTemplateName().getAsTemplateDecl(),
ToOrigTST->getTemplateName().getAsTemplateDecl());
DiffTemplate(FromOrigTST, ToOrigTST);
}
/// Emit - When the two types given are templated types with the same
/// base template, a string representation of the type difference will be
/// emitted to the stream and return true. Otherwise, return false.
bool Emit() {
Tree.StartTraverse();
if (Tree.Empty())
return false;
TreeToString();
assert(!IsBold && "Bold is applied to end of string.");
return true;
}
}; // end class TemplateDiff
} // end namespace
/// FormatTemplateTypeDiff - A helper static function to start the template
/// diff and return the properly formatted string. Returns true if the diff
/// is successful.
static bool FormatTemplateTypeDiff(ASTContext &Context, QualType FromType,
QualType ToType, bool PrintTree,
bool PrintFromType, bool ElideType,
bool ShowColors, raw_ostream &OS) {
if (PrintTree)
PrintFromType = true;
TemplateDiff TD(OS, Context, FromType, ToType, PrintTree, PrintFromType,
ElideType, ShowColors);
TD.DiffTemplate();
return TD.Emit();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/HlslTypes.cpp | //===--- HlslTypes.cpp - Type system for HLSL ----*- C++
///////////////////////////////////////////////////////////////////////////////
// //
// HlslTypes.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
///
/// \file //
/// \brief Defines the HLSL type system interface. //
///
// //
///////////////////////////////////////////////////////////////////////////////
#include "clang/AST/HlslTypes.h"
#include "dxc/DXIL/DxilNodeProps.h"
#include "dxc/DXIL/DxilSemantic.h"
#include "dxc/Support/Global.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Type.h"
#include "clang/Sema/AttributeList.h" // conceptually ParsedAttributes
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
namespace hlsl {
/// <summary>Try to convert HLSL template vector/matrix type to
/// ExtVectorType.</summary>
const clang::ExtVectorType *
ConvertHLSLVecMatTypeToExtVectorType(const clang::ASTContext &context,
clang::QualType type) {
const Type *Ty = type.getCanonicalType().getTypePtr();
if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
// TODO: check pointer instead of name
if (templateDecl->getName() == "vector") {
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
const TemplateArgument &arg0 = argList[0];
const TemplateArgument &arg1 = argList[1];
QualType elemTy = arg0.getAsType();
llvm::APSInt elmSize = arg1.getAsIntegral();
return context.getExtVectorType(elemTy, elmSize.getLimitedValue())
->getAs<ExtVectorType>();
}
}
}
return nullptr;
}
bool IsHLSLVecMatType(clang::QualType type) {
const Type *Ty = type.getCanonicalType().getTypePtr();
if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
if (templateDecl->getName() == "vector") {
return true;
} else if (templateDecl->getName() == "matrix") {
return true;
}
}
}
return false;
}
bool IsHLSLMatType(clang::QualType type) {
const clang::Type *Ty = type.getCanonicalType().getTypePtr();
if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
if (templateDecl->getName() == "matrix") {
return true;
}
}
}
return false;
}
bool IsHLSLVecType(clang::QualType type) {
const clang::Type *Ty = type.getCanonicalType().getTypePtr();
if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
if (templateDecl->getName() == "vector") {
return true;
}
}
}
return false;
}
bool IsHLSLNumericOrAggregateOfNumericType(clang::QualType type) {
const clang::Type *Ty = type.getCanonicalType().getTypePtr();
if (isa<RecordType>(Ty)) {
if (IsHLSLVecMatType(type))
return true;
return IsHLSLCopyableAnnotatableRecord(type);
} else if (type->isArrayType()) {
return IsHLSLNumericOrAggregateOfNumericType(
QualType(type->getArrayElementTypeNoTypeQual(), 0));
}
// Chars can only appear as part of strings, which we don't consider numeric.
const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty);
return BuiltinTy != nullptr &&
BuiltinTy->getKind() != BuiltinType::Kind::Char_S;
}
bool IsHLSLNumericUserDefinedType(clang::QualType type) {
const clang::Type *Ty = type.getCanonicalType().getTypePtr();
if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
const RecordDecl *RD = RT->getDecl();
if (!IsUserDefinedRecordType(type))
return false;
for (auto member : RD->fields()) {
if (!IsHLSLNumericOrAggregateOfNumericType(member->getType()))
return false;
}
return true;
}
return false;
}
// In some cases we need record types that are annotatable and trivially
// copyable from outside the shader. This excludes resource types which may be
// trivially copyable inside the shader, and builtin matrix and vector types
// which can't be annotated. But includes UDTs of trivially copyable data and
// the builtin trivially copyable raytracing structs.
bool IsHLSLCopyableAnnotatableRecord(clang::QualType QT) {
return IsHLSLNumericUserDefinedType(QT) ||
IsHLSLBuiltinRayAttributeStruct(QT);
}
bool IsHLSLBuiltinRayAttributeStruct(clang::QualType QT) {
QT = QT.getCanonicalType();
const clang::Type *Ty = QT.getTypePtr();
if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
const RecordDecl *RD = RT->getDecl();
if (RD->getName() == "BuiltInTriangleIntersectionAttributes" ||
RD->getName() == "RayDesc")
return true;
}
return false;
}
// Aggregate types are arrays and user-defined structs
bool IsHLSLAggregateType(clang::QualType type) {
type = type.getCanonicalType();
if (isa<clang::ArrayType>(type))
return true;
return IsUserDefinedRecordType(type);
}
bool GetHLSLNodeIORecordType(const ParmVarDecl *parmDecl, NodeFlags &nodeKind) {
clang::QualType paramTy = parmDecl->getType().getCanonicalType();
if (auto arrayType = dyn_cast<ConstantArrayType>(paramTy))
paramTy = arrayType->getElementType();
nodeKind = NodeFlags(GetNodeIOType(paramTy));
return nodeKind.IsValidNodeKind();
}
clang::QualType GetElementTypeOrType(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl())) {
// TODO: check pointer instead of name
if (templateDecl->getName() == "vector") {
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
return argList[0].getAsType();
} else if (templateDecl->getName() == "matrix") {
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
return argList[0].getAsType();
}
}
}
return type;
}
bool HasHLSLMatOrientation(clang::QualType type, bool *pIsRowMajor) {
const AttributedType *AT = type->getAs<AttributedType>();
while (AT) {
AttributedType::Kind kind = AT->getAttrKind();
switch (kind) {
case AttributedType::attr_hlsl_row_major:
if (pIsRowMajor)
*pIsRowMajor = true;
return true;
case AttributedType::attr_hlsl_column_major:
if (pIsRowMajor)
*pIsRowMajor = false;
return true;
}
AT = AT->getLocallyUnqualifiedSingleStepDesugaredType()
->getAs<AttributedType>();
}
return false;
}
bool IsHLSLMatRowMajor(clang::QualType type, bool defaultValue) {
bool result = defaultValue;
HasHLSLMatOrientation(type, &result);
return result;
}
bool IsHLSLUnsigned(clang::QualType type) {
if (type->getAs<clang::BuiltinType>() == nullptr) {
type = type.getCanonicalType().getNonReferenceType();
if (IsHLSLVecMatType(type))
type = GetElementTypeOrType(type);
if (type->isExtVectorType())
type = type->getAs<clang::ExtVectorType>()->getElementType();
}
return type->isUnsignedIntegerType();
}
bool IsHLSLMinPrecision(clang::QualType Ty) {
Ty = Ty.getCanonicalType().getNonReferenceType();
if (auto BT = Ty->getAs<clang::BuiltinType>()) {
switch (BT->getKind()) {
case clang::BuiltinType::Min12Int:
case clang::BuiltinType::Min16Int:
case clang::BuiltinType::Min16UInt:
case clang::BuiltinType::Min16Float:
case clang::BuiltinType::Min10Float:
return true;
}
}
return false;
}
bool HasHLSLUNormSNorm(clang::QualType type, bool *pIsSNorm) {
// snorm/unorm can be on outer vector/matrix as well as element type
// in the template form. Outer-most type attribute wins.
// The following drills into attributed type for outer type,
// setting *pIsSNorm and returning true if snorm/unorm found.
// If not found on outer type, fall back to element type if different,
// indicating a vector or matrix, and try again.
clang::QualType elementType = GetElementTypeOrType(type);
while (true) {
const AttributedType *AT = type->getAs<AttributedType>();
while (AT) {
AttributedType::Kind kind = AT->getAttrKind();
switch (kind) {
case AttributedType::attr_hlsl_snorm:
if (pIsSNorm)
*pIsSNorm = true;
return true;
case AttributedType::attr_hlsl_unorm:
if (pIsSNorm)
*pIsSNorm = false;
return true;
}
AT = AT->getLocallyUnqualifiedSingleStepDesugaredType()
->getAs<AttributedType>();
}
if (type == elementType)
break;
type = elementType;
}
return false;
}
bool HasHLSLGloballyCoherent(clang::QualType type) {
const AttributedType *AT = type->getAs<AttributedType>();
while (AT) {
AttributedType::Kind kind = AT->getAttrKind();
switch (kind) {
case AttributedType::attr_hlsl_globallycoherent:
return true;
}
AT = AT->getLocallyUnqualifiedSingleStepDesugaredType()
->getAs<AttributedType>();
}
return false;
}
/// Checks whether the pAttributes indicate a parameter is inout or out; if
/// inout, pIsIn will be set to true.
bool IsParamAttributedAsOut(clang::AttributeList *pAttributes, bool *pIsIn);
/// <summary>Gets the type with structural information (elements and shape) for
/// the given type.</summary>
/// <remarks>This function will strip lvalue/rvalue references, attributes and
/// qualifiers.</remarks>
QualType GetStructuralForm(QualType type) {
if (type.isNull()) {
return type;
}
const ReferenceType *RefType = nullptr;
const AttributedType *AttrType = nullptr;
while ((RefType = dyn_cast<ReferenceType>(type)) ||
(AttrType = dyn_cast<AttributedType>(type))) {
type = RefType ? RefType->getPointeeType() : AttrType->getEquivalentType();
}
// Despite its name, getCanonicalTypeUnqualified will preserve const for array
// elements or something
return QualType(type->getCanonicalTypeUnqualified()->getTypePtr(), 0);
}
uint32_t GetElementCount(clang::QualType type) {
uint32_t rowCount, colCount;
GetRowsAndColsForAny(type, rowCount, colCount);
return rowCount * colCount;
}
/// <summary>Returns the number of elements in the specified array
/// type.</summary>
uint32_t GetArraySize(clang::QualType type) {
assert(type->isArrayType() && "otherwise caller shouldn't be invoking this");
if (type->isConstantArrayType()) {
const ConstantArrayType *arrayType =
(const ConstantArrayType *)type->getAsArrayTypeUnsafe();
return arrayType->getSize().getLimitedValue();
} else {
return 0;
}
}
/// <summary>Returns the number of elements in the specified vector
/// type.</summary>
uint32_t GetHLSLVecSize(clang::QualType type) {
type = GetStructuralForm(type);
const Type *Ty = type.getCanonicalType().getTypePtr();
const RecordType *RT = dyn_cast<RecordType>(Ty);
assert(RT != nullptr && "otherwise caller shouldn't be invoking this");
const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getAsCXXRecordDecl());
assert(templateDecl != nullptr &&
"otherwise caller shouldn't be invoking this");
assert(templateDecl->getName() == "vector" &&
"otherwise caller shouldn't be invoking this");
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
const TemplateArgument &arg1 = argList[1];
llvm::APSInt vecSize = arg1.getAsIntegral();
return vecSize.getLimitedValue();
}
void GetRowsAndCols(clang::QualType type, uint32_t &rowCount,
uint32_t &colCount) {
type = GetStructuralForm(type);
const Type *Ty = type.getCanonicalType().getTypePtr();
const RecordType *RT = dyn_cast<RecordType>(Ty);
assert(RT != nullptr && "otherwise caller shouldn't be invoking this");
const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getAsCXXRecordDecl());
assert(templateDecl != nullptr &&
"otherwise caller shouldn't be invoking this");
assert(templateDecl->getName() == "matrix" &&
"otherwise caller shouldn't be invoking this");
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
const TemplateArgument &arg1 = argList[1];
const TemplateArgument &arg2 = argList[2];
llvm::APSInt rowSize = arg1.getAsIntegral();
llvm::APSInt colSize = arg2.getAsIntegral();
rowCount = rowSize.getLimitedValue();
colCount = colSize.getLimitedValue();
}
bool IsArrayConstantStringType(const QualType type) {
DXASSERT_NOMSG(type->isArrayType());
return type->getArrayElementTypeNoTypeQual()->isSpecificBuiltinType(
BuiltinType::Char_S);
}
bool IsPointerStringType(const QualType type) {
DXASSERT_NOMSG(type->isPointerType());
return type->getPointeeType()->isSpecificBuiltinType(BuiltinType::Char_S);
}
bool IsStringType(const QualType type) {
QualType canType = type.getCanonicalType();
return canType->isPointerType() && IsPointerStringType(canType);
}
bool IsStringLiteralType(const QualType type) {
QualType canType = type.getCanonicalType();
return canType->isArrayType() && IsArrayConstantStringType(canType);
}
void GetRowsAndColsForAny(QualType type, uint32_t &rowCount,
uint32_t &colCount) {
assert(!type.isNull());
type = GetStructuralForm(type);
rowCount = 1;
colCount = 1;
const Type *Ty = type.getCanonicalType().getTypePtr();
if (type->isArrayType() && !IsArrayConstantStringType(type)) {
if (type->isConstantArrayType()) {
const ConstantArrayType *arrayType =
(const ConstantArrayType *)type->getAsArrayTypeUnsafe();
colCount = arrayType->getSize().getLimitedValue();
} else {
colCount = 0;
}
} else if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
if (templateDecl->getName() == "matrix") {
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
const TemplateArgument &arg1 = argList[1];
const TemplateArgument &arg2 = argList[2];
llvm::APSInt rowSize = arg1.getAsIntegral();
llvm::APSInt colSize = arg2.getAsIntegral();
rowCount = rowSize.getLimitedValue();
colCount = colSize.getLimitedValue();
} else if (templateDecl->getName() == "vector") {
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
const TemplateArgument &arg1 = argList[1];
llvm::APSInt rowSize = arg1.getAsIntegral();
colCount = rowSize.getLimitedValue();
}
}
}
}
void GetHLSLMatRowColCount(clang::QualType type, unsigned int &row,
unsigned int &col) {
GetRowsAndColsForAny(type, row, col);
}
clang::QualType GetHLSLVecElementType(clang::QualType type) {
type = GetStructuralForm(type);
const Type *Ty = type.getCanonicalType().getTypePtr();
const RecordType *RT = dyn_cast<RecordType>(Ty);
assert(RT != nullptr && "otherwise caller shouldn't be invoking this");
const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getAsCXXRecordDecl());
assert(templateDecl != nullptr &&
"otherwise caller shouldn't be invoking this");
assert(templateDecl->getName() == "vector" &&
"otherwise caller shouldn't be invoking this");
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
const TemplateArgument &arg0 = argList[0];
QualType elemTy = arg0.getAsType();
return elemTy;
}
clang::QualType GetHLSLMatElementType(clang::QualType type) {
type = GetStructuralForm(type);
const Type *Ty = type.getCanonicalType().getTypePtr();
const RecordType *RT = dyn_cast<RecordType>(Ty);
assert(RT != nullptr && "otherwise caller shouldn't be invoking this");
const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getAsCXXRecordDecl());
assert(templateDecl != nullptr &&
"otherwise caller shouldn't be invoking this");
assert(templateDecl->getName() == "matrix" &&
"otherwise caller shouldn't be invoking this");
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
const TemplateArgument &arg0 = argList[0];
QualType elemTy = arg0.getAsType();
return elemTy;
}
// TODO: Add type cache to ASTContext.
bool IsHLSLInputPatchType(QualType type) {
type = type.getCanonicalType();
if (const RecordType *RT = dyn_cast<RecordType>(type)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
if (templateDecl->getName() == "InputPatch") {
return true;
}
}
}
return false;
}
bool IsHLSLOutputPatchType(QualType type) {
type = type.getCanonicalType();
if (const RecordType *RT = dyn_cast<RecordType>(type)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
if (templateDecl->getName() == "OutputPatch") {
return true;
}
}
}
return false;
}
bool IsHLSLPointStreamType(QualType type) {
type = type.getCanonicalType();
if (const RecordType *RT = dyn_cast<RecordType>(type)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
if (templateDecl->getName() == "PointStream")
return true;
}
}
return false;
}
bool IsHLSLLineStreamType(QualType type) {
type = type.getCanonicalType();
if (const RecordType *RT = dyn_cast<RecordType>(type)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
if (templateDecl->getName() == "LineStream")
return true;
}
}
return false;
}
bool IsHLSLTriangleStreamType(QualType type) {
type = type.getCanonicalType();
if (const RecordType *RT = dyn_cast<RecordType>(type)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
if (templateDecl->getName() == "TriangleStream")
return true;
}
}
return false;
}
bool IsHLSLStreamOutputType(QualType type) {
type = type.getCanonicalType();
if (const RecordType *RT = dyn_cast<RecordType>(type)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
if (templateDecl->getName() == "PointStream")
return true;
if (templateDecl->getName() == "LineStream")
return true;
if (templateDecl->getName() == "TriangleStream")
return true;
}
}
return false;
}
bool IsHLSLResourceType(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
if (name == "Texture1D" || name == "RWTexture1D")
return true;
if (name == "Texture2D" || name == "RWTexture2D")
return true;
if (name == "Texture2DMS" || name == "RWTexture2DMS")
return true;
if (name == "Texture3D" || name == "RWTexture3D")
return true;
if (name == "TextureCube" || name == "RWTextureCube")
return true;
if (name == "Texture1DArray" || name == "RWTexture1DArray")
return true;
if (name == "Texture2DArray" || name == "RWTexture2DArray")
return true;
if (name == "Texture2DMSArray" || name == "RWTexture2DMSArray")
return true;
if (name == "TextureCubeArray" || name == "RWTextureCubeArray")
return true;
if (name == "FeedbackTexture2D" || name == "FeedbackTexture2DArray")
return true;
if (name == "RasterizerOrderedTexture1D" ||
name == "RasterizerOrderedTexture2D" ||
name == "RasterizerOrderedTexture3D" ||
name == "RasterizerOrderedTexture1DArray" ||
name == "RasterizerOrderedTexture2DArray" ||
name == "RasterizerOrderedBuffer" ||
name == "RasterizerOrderedByteAddressBuffer" ||
name == "RasterizerOrderedStructuredBuffer")
return true;
if (name == "ByteAddressBuffer" || name == "RWByteAddressBuffer")
return true;
if (name == "StructuredBuffer" || name == "RWStructuredBuffer")
return true;
if (name == "AppendStructuredBuffer" || name == "ConsumeStructuredBuffer")
return true;
if (name == "Buffer" || name == "RWBuffer")
return true;
if (name == "SamplerState" || name == "SamplerComparisonState")
return true;
if (name == "ConstantBuffer" || name == "TextureBuffer")
return true;
if (name == "RaytracingAccelerationStructure")
return true;
}
return false;
}
static HLSLNodeObjectAttr *getNodeAttr(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
if (const auto *Spec =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl()))
if (const auto *Template =
dyn_cast<ClassTemplateDecl>(Spec->getSpecializedTemplate()))
return Template->getTemplatedDecl()->getAttr<HLSLNodeObjectAttr>();
if (const auto *Decl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
return Decl->getAttr<HLSLNodeObjectAttr>();
}
return nullptr;
}
DXIL::NodeIOKind GetNodeIOType(clang::QualType type) {
if (const HLSLNodeObjectAttr *Attr = getNodeAttr(type))
return Attr->getNodeIOType();
return DXIL::NodeIOKind::Invalid;
}
bool IsHLSLNodeInputType(clang::QualType type) {
return (static_cast<uint32_t>(GetNodeIOType(type)) &
static_cast<uint32_t>(DXIL::NodeIOFlags::Input)) != 0;
}
bool IsHLSLDynamicResourceType(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
return name == ".Resource";
}
return false;
}
bool IsHLSLDynamicSamplerType(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
return name == ".Sampler";
}
return false;
}
bool IsHLSLNodeType(clang::QualType type) {
if (const HLSLNodeObjectAttr *Attr = getNodeAttr(type))
return true;
return false;
}
bool IsHLSLObjectWithImplicitMemberAccess(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
if (name == "ConstantBuffer" || name == "TextureBuffer")
return true;
}
return false;
}
bool IsHLSLObjectWithImplicitROMemberAccess(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
// Read-only records
if (name == "ConstantBuffer" || name == "TextureBuffer")
return true;
}
return false;
}
bool IsHLSLRWNodeInputRecordType(clang::QualType type) {
return (static_cast<uint32_t>(GetNodeIOType(type)) &
(static_cast<uint32_t>(DXIL::NodeIOFlags::ReadWrite) |
static_cast<uint32_t>(DXIL::NodeIOFlags::Input))) ==
(static_cast<uint32_t>(DXIL::NodeIOFlags::ReadWrite) |
static_cast<uint32_t>(DXIL::NodeIOFlags::Input));
}
bool IsHLSLRONodeInputRecordType(clang::QualType type) {
return (static_cast<uint32_t>(GetNodeIOType(type)) &
(static_cast<uint32_t>(DXIL::NodeIOFlags::ReadWrite) |
static_cast<uint32_t>(DXIL::NodeIOFlags::Input))) ==
static_cast<uint32_t>(DXIL::NodeIOFlags::Input);
}
bool IsHLSLNodeOutputType(clang::QualType type) {
return (static_cast<uint32_t>(GetNodeIOType(type)) &
(static_cast<uint32_t>(DXIL::NodeIOFlags::Output) |
static_cast<uint32_t>(DXIL::NodeIOFlags::RecordGranularityMask))) ==
static_cast<uint32_t>(DXIL::NodeIOFlags::Output);
}
bool IsHLSLStructuredBufferType(clang::QualType type) {
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
if (name == "StructuredBuffer" || name == "RWStructuredBuffer")
return true;
if (name == "AppendStructuredBuffer" || name == "ConsumeStructuredBuffer")
return true;
}
return false;
}
bool IsHLSLSubobjectType(clang::QualType type) {
DXIL::SubobjectKind kind;
DXIL::HitGroupType hgType;
return GetHLSLSubobjectKind(type, kind, hgType);
}
bool IsUserDefinedRecordType(clang::QualType QT) {
const clang::Type *Ty = QT.getCanonicalType().getTypePtr();
if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
const RecordDecl *RD = RT->getDecl();
if (RD->isImplicit())
return false;
if (auto TD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
if (TD->getSpecializedTemplate()->isImplicit())
return false;
return true;
}
return false;
}
static bool HasTessFactorSemantic(const ValueDecl *decl) {
for (const UnusualAnnotation *it : decl->getUnusualAnnotations()) {
if (it->getKind() == UnusualAnnotation::UA_SemanticDecl) {
const SemanticDecl *sd = cast<SemanticDecl>(it);
StringRef semanticName;
unsigned int index = 0;
Semantic::DecomposeNameAndIndex(sd->SemanticName, &semanticName, &index);
const hlsl::Semantic *pSemantic = hlsl::Semantic::GetByName(semanticName);
if (pSemantic && pSemantic->GetKind() == hlsl::Semantic::Kind::TessFactor)
return true;
}
}
return false;
}
static bool HasTessFactorSemanticRecurse(const ValueDecl *decl, QualType Ty) {
if (Ty->isBuiltinType() || hlsl::IsHLSLVecMatType(Ty))
return false;
if (const RecordType *RT = Ty->getAs<RecordType>()) {
RecordDecl *RD = RT->getDecl();
for (FieldDecl *fieldDecl : RD->fields()) {
if (HasTessFactorSemanticRecurse(fieldDecl, fieldDecl->getType()))
return true;
}
return false;
}
if (Ty->getAsArrayTypeUnsafe())
return HasTessFactorSemantic(decl);
return false;
}
bool IsPatchConstantFunctionDecl(const clang::FunctionDecl *FD) {
// This checks whether the function is structurally capable of being a patch
// constant function, not whether it is in fact the patch constant function
// for the entry point of a compiled hull shader (which may not have been
// seen yet). So the answer is conservative.
if (!FD->getReturnType()->isVoidType()) {
// Try to find TessFactor in return type.
if (HasTessFactorSemanticRecurse(FD, FD->getReturnType()))
return true;
}
// Try to find TessFactor in out param.
for (const ParmVarDecl *param : FD->params()) {
if (param->hasAttr<HLSLOutAttr>()) {
if (HasTessFactorSemanticRecurse(param, param->getType()))
return true;
}
}
return false;
}
bool DoesTypeDefineOverloadedOperator(clang::QualType typeWithOperator,
clang::OverloadedOperatorKind opc,
clang::QualType paramType) {
if (const RecordType *recordType = typeWithOperator->getAs<RecordType>()) {
if (const CXXRecordDecl *cxxRecordDecl =
dyn_cast<CXXRecordDecl>(recordType->getDecl())) {
for (const auto *method : cxxRecordDecl->methods()) {
if (!method->isUserProvided() || method->getNumParams() != 1)
continue;
// It must be an implicit assignment.
if (opc == OO_Equal &&
typeWithOperator != method->getParamDecl(0)->getOriginalType() &&
typeWithOperator == paramType) {
continue;
}
if (method->getOverloadedOperator() == opc)
return true;
}
}
}
return false;
}
bool GetHLSLSubobjectKind(clang::QualType type,
DXIL::SubobjectKind &subobjectKind,
DXIL::HitGroupType &hgType) {
hgType = (DXIL::HitGroupType)(-1);
type = type.getCanonicalType();
if (const RecordType *RT = type->getAs<RecordType>()) {
StringRef name = RT->getDecl()->getName();
switch (name.size()) {
case 17:
return name == "StateObjectConfig"
? (subobjectKind = DXIL::SubobjectKind::StateObjectConfig,
true)
: false;
case 18:
return name == "LocalRootSignature"
? (subobjectKind = DXIL::SubobjectKind::LocalRootSignature,
true)
: false;
case 19:
return name == "GlobalRootSignature"
? (subobjectKind = DXIL::SubobjectKind::GlobalRootSignature,
true)
: false;
case 29:
return name == "SubobjectToExportsAssociation"
? (subobjectKind =
DXIL::SubobjectKind::SubobjectToExportsAssociation,
true)
: false;
case 22:
return name == "RaytracingShaderConfig"
? (subobjectKind = DXIL::SubobjectKind::RaytracingShaderConfig,
true)
: false;
case 24:
return name == "RaytracingPipelineConfig"
? (subobjectKind =
DXIL::SubobjectKind::RaytracingPipelineConfig,
true)
: false;
case 25:
return name == "RaytracingPipelineConfig1"
? (subobjectKind =
DXIL::SubobjectKind::RaytracingPipelineConfig1,
true)
: false;
case 16:
if (name == "TriangleHitGroup") {
subobjectKind = DXIL::SubobjectKind::HitGroup;
hgType = DXIL::HitGroupType::Triangle;
return true;
}
return false;
case 27:
if (name == "ProceduralPrimitiveHitGroup") {
subobjectKind = DXIL::SubobjectKind::HitGroup;
hgType = DXIL::HitGroupType::ProceduralPrimitive;
return true;
}
return false;
}
}
return false;
}
clang::RecordDecl *GetRecordDeclFromNodeObjectType(clang::QualType ObjectTy) {
ObjectTy = ObjectTy.getCanonicalType();
DXASSERT(IsHLSLNodeType(ObjectTy), "Expected Node Object type");
if (const CXXRecordDecl *CXXRD = ObjectTy->getAsCXXRecordDecl()) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(CXXRD)) {
auto &TemplateArgs = templateDecl->getTemplateArgs();
clang::QualType RecType = TemplateArgs[0].getAsType();
if (const RecordType *RT = RecType->getAs<RecordType>())
return RT->getDecl();
}
}
return nullptr;
}
bool IsHLSLRayQueryType(clang::QualType type) {
type = type.getCanonicalType();
if (const RecordType *RT = dyn_cast<RecordType>(type)) {
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(
RT->getAsCXXRecordDecl())) {
StringRef name = templateDecl->getName();
if (name == "RayQuery")
return true;
}
}
return false;
}
QualType GetHLSLResourceResultType(QualType type) {
// Don't canonicalize the type as to not lose snorm in Buffer<snorm float>
const RecordType *RT = type->getAs<RecordType>();
const RecordDecl *RD = RT->getDecl();
if (const ClassTemplateSpecializationDecl *templateDecl =
dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
if (RD->getName().startswith("FeedbackTexture")) {
// Feedback textures are write-only and the data is opaque,
// so there is no result type per se.
return {};
}
// Type-templated resource types
// Prefer getting the template argument from the TemplateSpecializationType
// sugar, since this preserves 'snorm' from 'Buffer<snorm float>' which is
// lost on the ClassTemplateSpecializationDecl since it's considered type
// sugar.
const TemplateArgument *templateArg = &templateDecl->getTemplateArgs()[0];
if (const TemplateSpecializationType *specializationType =
type->getAs<TemplateSpecializationType>()) {
if (specializationType->getNumArgs() >= 1) {
templateArg = &specializationType->getArg(0);
}
}
if (templateArg->getKind() == TemplateArgument::ArgKind::Type)
return templateArg->getAsType();
}
// Non-type-templated resource types like [RW][RasterOrder]ByteAddressBuffer
// Get the result type from handle field.
FieldDecl *HandleFieldDecl = *(RD->field_begin());
DXASSERT(HandleFieldDecl->getName() == "h",
"Resource must have a handle field");
return HandleFieldDecl->getType();
}
unsigned GetHLSLResourceTemplateUInt(clang::QualType type) {
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(
type->castAs<RecordType>()->getDecl());
return (unsigned)templateDecl->getTemplateArgs()[0]
.getAsIntegral()
.getZExtValue();
}
bool IsIncompleteHLSLResourceArrayType(clang::ASTContext &context,
clang::QualType type) {
if (type->isIncompleteArrayType()) {
const IncompleteArrayType *IAT = context.getAsIncompleteArrayType(type);
type = IAT->getElementType();
}
while (type->isArrayType())
type = cast<ArrayType>(type)->getElementType();
if (IsHLSLResourceType(type))
return true;
return false;
}
QualType GetHLSLResourceTemplateParamType(QualType type) {
type = type.getCanonicalType();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(RT->getAsCXXRecordDecl());
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
return argList[0].getAsType();
}
QualType GetHLSLInputPatchElementType(QualType type) {
return GetHLSLResourceTemplateParamType(type);
}
unsigned GetHLSLInputPatchCount(QualType type) {
type = type.getCanonicalType();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(RT->getAsCXXRecordDecl());
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
return argList[1].getAsIntegral().getLimitedValue();
}
clang::QualType GetHLSLOutputPatchElementType(QualType type) {
return GetHLSLResourceTemplateParamType(type);
}
unsigned GetHLSLOutputPatchCount(QualType type) {
type = type.getCanonicalType();
const RecordType *RT = cast<RecordType>(type);
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(RT->getAsCXXRecordDecl());
const TemplateArgumentList &argList = templateDecl->getTemplateArgs();
return argList[1].getAsIntegral().getLimitedValue();
}
bool IsParamAttributedAsOut(clang::AttributeList *pAttributes, bool *pIsIn) {
bool anyFound = false;
bool inFound = false;
bool outFound = false;
while (pAttributes != nullptr) {
switch (pAttributes->getKind()) {
case AttributeList::AT_HLSLIn:
anyFound = true;
inFound = true;
break;
case AttributeList::AT_HLSLOut:
anyFound = true;
outFound = true;
break;
case AttributeList::AT_HLSLInOut:
anyFound = true;
outFound = true;
inFound = true;
break;
default:
// Ignore the majority of attributes that don't have in/out
// characteristics
break;
}
pAttributes = pAttributes->getNext();
}
if (pIsIn)
*pIsIn = inFound || anyFound == false;
return outFound;
}
hlsl::ParameterModifier
ParamModFromAttributeList(clang::AttributeList *pAttributes) {
bool isIn, isOut;
isOut = IsParamAttributedAsOut(pAttributes, &isIn);
return ParameterModifier::FromInOut(isIn, isOut);
}
HLSLScalarType MakeUnsigned(HLSLScalarType T) {
switch (T) {
case HLSLScalarType_int:
return HLSLScalarType_uint;
case HLSLScalarType_int_min16:
return HLSLScalarType_uint_min16;
case HLSLScalarType_int64:
return HLSLScalarType_uint64;
case HLSLScalarType_int16:
return HLSLScalarType_uint16;
default:
// Only signed int types are relevant.
break;
}
return T;
}
} // namespace hlsl
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/Comment.cpp | //===--- Comment.cpp - Comment AST node implementation --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Comment.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
namespace clang {
namespace comments {
const char *Comment::getCommentKindName() const {
switch (getCommentKind()) {
case NoCommentKind: return "NoCommentKind";
#define ABSTRACT_COMMENT(COMMENT)
#define COMMENT(CLASS, PARENT) \
case CLASS##Kind: \
return #CLASS;
#include "clang/AST/CommentNodes.inc"
#undef COMMENT
#undef ABSTRACT_COMMENT
}
llvm_unreachable("Unknown comment kind!");
}
namespace {
struct good {};
struct bad {};
template <typename T>
good implements_child_begin_end(Comment::child_iterator (T::*)() const) {
return good();
}
LLVM_ATTRIBUTE_UNUSED
static inline bad implements_child_begin_end(
Comment::child_iterator (Comment::*)() const) {
return bad();
}
#define ASSERT_IMPLEMENTS_child_begin(function) \
(void) good(implements_child_begin_end(function))
LLVM_ATTRIBUTE_UNUSED
static inline void CheckCommentASTNodes() {
#define ABSTRACT_COMMENT(COMMENT)
#define COMMENT(CLASS, PARENT) \
ASSERT_IMPLEMENTS_child_begin(&CLASS::child_begin); \
ASSERT_IMPLEMENTS_child_begin(&CLASS::child_end);
#include "clang/AST/CommentNodes.inc"
#undef COMMENT
#undef ABSTRACT_COMMENT
}
#undef ASSERT_IMPLEMENTS_child_begin
} // end unnamed namespace
Comment::child_iterator Comment::child_begin() const {
switch (getCommentKind()) {
case NoCommentKind: llvm_unreachable("comment without a kind");
#define ABSTRACT_COMMENT(COMMENT)
#define COMMENT(CLASS, PARENT) \
case CLASS##Kind: \
return static_cast<const CLASS *>(this)->child_begin();
#include "clang/AST/CommentNodes.inc"
#undef COMMENT
#undef ABSTRACT_COMMENT
}
llvm_unreachable("Unknown comment kind!");
}
Comment::child_iterator Comment::child_end() const {
switch (getCommentKind()) {
case NoCommentKind: llvm_unreachable("comment without a kind");
#define ABSTRACT_COMMENT(COMMENT)
#define COMMENT(CLASS, PARENT) \
case CLASS##Kind: \
return static_cast<const CLASS *>(this)->child_end();
#include "clang/AST/CommentNodes.inc"
#undef COMMENT
#undef ABSTRACT_COMMENT
}
llvm_unreachable("Unknown comment kind!");
}
bool TextComment::isWhitespaceNoCache() const {
for (StringRef::const_iterator I = Text.begin(), E = Text.end();
I != E; ++I) {
if (!clang::isWhitespace(*I))
return false;
}
return true;
}
bool ParagraphComment::isWhitespaceNoCache() const {
for (child_iterator I = child_begin(), E = child_end(); I != E; ++I) {
if (const TextComment *TC = dyn_cast<TextComment>(*I)) {
if (!TC->isWhitespace())
return false;
} else
return false;
}
return true;
}
const char *ParamCommandComment::getDirectionAsString(PassDirection D) {
switch (D) {
case ParamCommandComment::In:
return "[in]";
case ParamCommandComment::Out:
return "[out]";
case ParamCommandComment::InOut:
return "[in,out]";
}
llvm_unreachable("unknown PassDirection");
}
void DeclInfo::fill() {
assert(!IsFilled);
// Set defaults.
Kind = OtherKind;
TemplateKind = NotTemplate;
IsObjCMethod = false;
IsInstanceMethod = false;
IsClassMethod = false;
ParamVars = None;
TemplateParameters = nullptr;
if (!CommentDecl) {
// If there is no declaration, the defaults is our only guess.
IsFilled = true;
return;
}
CurrentDecl = CommentDecl;
Decl::Kind K = CommentDecl->getKind();
switch (K) {
default:
// Defaults are should be good for declarations we don't handle explicitly.
break;
case Decl::Function:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
case Decl::CXXConversion: {
const FunctionDecl *FD = cast<FunctionDecl>(CommentDecl);
Kind = FunctionKind;
ParamVars = llvm::makeArrayRef(FD->param_begin(), FD->getNumParams());
ReturnType = FD->getReturnType();
unsigned NumLists = FD->getNumTemplateParameterLists();
if (NumLists != 0) {
TemplateKind = TemplateSpecialization;
TemplateParameters =
FD->getTemplateParameterList(NumLists - 1);
}
if (K == Decl::CXXMethod || K == Decl::CXXConstructor ||
K == Decl::CXXDestructor || K == Decl::CXXConversion) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(CommentDecl);
IsInstanceMethod = MD->isInstance();
IsClassMethod = !IsInstanceMethod;
}
break;
}
case Decl::ObjCMethod: {
const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(CommentDecl);
Kind = FunctionKind;
ParamVars = llvm::makeArrayRef(MD->param_begin(), MD->param_size());
ReturnType = MD->getReturnType();
IsObjCMethod = true;
IsInstanceMethod = MD->isInstanceMethod();
IsClassMethod = !IsInstanceMethod;
break;
}
case Decl::FunctionTemplate: {
const FunctionTemplateDecl *FTD = cast<FunctionTemplateDecl>(CommentDecl);
Kind = FunctionKind;
TemplateKind = Template;
const FunctionDecl *FD = FTD->getTemplatedDecl();
ParamVars = llvm::makeArrayRef(FD->param_begin(), FD->getNumParams());
ReturnType = FD->getReturnType();
TemplateParameters = FTD->getTemplateParameters();
break;
}
case Decl::ClassTemplate: {
const ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(CommentDecl);
Kind = ClassKind;
TemplateKind = Template;
TemplateParameters = CTD->getTemplateParameters();
break;
}
case Decl::ClassTemplatePartialSpecialization: {
const ClassTemplatePartialSpecializationDecl *CTPSD =
cast<ClassTemplatePartialSpecializationDecl>(CommentDecl);
Kind = ClassKind;
TemplateKind = TemplatePartialSpecialization;
TemplateParameters = CTPSD->getTemplateParameters();
break;
}
case Decl::ClassTemplateSpecialization:
Kind = ClassKind;
TemplateKind = TemplateSpecialization;
break;
case Decl::Record:
case Decl::CXXRecord:
Kind = ClassKind;
break;
case Decl::Var:
case Decl::Field:
case Decl::EnumConstant:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
Kind = VariableKind;
break;
case Decl::Namespace:
Kind = NamespaceKind;
break;
case Decl::Typedef: {
Kind = TypedefKind;
// If this is a typedef to something we consider a function, extract
// arguments and return type.
const TypedefDecl *TD = cast<TypedefDecl>(CommentDecl);
const TypeSourceInfo *TSI = TD->getTypeSourceInfo();
if (!TSI)
break;
TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
while (true) {
TL = TL.IgnoreParens();
// Look through qualified types.
if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) {
TL = QualifiedTL.getUnqualifiedLoc();
continue;
}
// Look through pointer types.
if (PointerTypeLoc PointerTL = TL.getAs<PointerTypeLoc>()) {
TL = PointerTL.getPointeeLoc().getUnqualifiedLoc();
continue;
}
// Look through reference types.
if (ReferenceTypeLoc ReferenceTL = TL.getAs<ReferenceTypeLoc>()) {
TL = ReferenceTL.getPointeeLoc().getUnqualifiedLoc();
continue;
}
// Look through adjusted types.
if (AdjustedTypeLoc ATL = TL.getAs<AdjustedTypeLoc>()) {
TL = ATL.getOriginalLoc();
continue;
}
if (BlockPointerTypeLoc BlockPointerTL =
TL.getAs<BlockPointerTypeLoc>()) {
TL = BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
continue;
}
if (MemberPointerTypeLoc MemberPointerTL =
TL.getAs<MemberPointerTypeLoc>()) {
TL = MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
continue;
}
if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>()) {
TL = ETL.getNamedTypeLoc();
continue;
}
// Is this a typedef for a function type?
if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
Kind = FunctionKind;
ParamVars = FTL.getParams();
ReturnType = FTL.getReturnLoc().getType();
break;
}
if (TemplateSpecializationTypeLoc STL =
TL.getAs<TemplateSpecializationTypeLoc>()) {
// If we have a typedef to a template specialization with exactly one
// template argument of a function type, this looks like std::function,
// boost::function, or other function wrapper. Treat these typedefs as
// functions.
if (STL.getNumArgs() != 1)
break;
TemplateArgumentLoc MaybeFunction = STL.getArgLoc(0);
if (MaybeFunction.getArgument().getKind() != TemplateArgument::Type)
break;
TypeSourceInfo *MaybeFunctionTSI = MaybeFunction.getTypeSourceInfo();
TypeLoc TL = MaybeFunctionTSI->getTypeLoc().getUnqualifiedLoc();
if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
Kind = FunctionKind;
ParamVars = FTL.getParams();
ReturnType = FTL.getReturnLoc().getType();
}
break;
}
break;
}
break;
}
case Decl::TypeAlias:
Kind = TypedefKind;
break;
case Decl::TypeAliasTemplate: {
const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(CommentDecl);
Kind = TypedefKind;
TemplateKind = Template;
TemplateParameters = TAT->getTemplateParameters();
break;
}
case Decl::Enum:
Kind = EnumKind;
break;
}
IsFilled = true;
}
StringRef ParamCommandComment::getParamName(const FullComment *FC) const {
assert(isParamIndexValid());
if (isVarArgParam())
return "...";
return FC->getDeclInfo()->ParamVars[getParamIndex()]->getName();
}
StringRef TParamCommandComment::getParamName(const FullComment *FC) const {
assert(isPositionValid());
const TemplateParameterList *TPL = FC->getDeclInfo()->TemplateParameters;
for (unsigned i = 0, e = getDepth(); i != e; ++i) {
if (i == e-1)
return TPL->getParam(getIndex(i))->getName();
const NamedDecl *Param = TPL->getParam(getIndex(i));
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(Param))
TPL = TTP->getTemplateParameters();
}
return "";
}
} // end namespace comments
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/ItaniumMangle.cpp | //===--- ItaniumMangle.cpp - Itanium C++ Name Mangling ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implements C++ name mangling according to the Itanium C++ ABI,
// which is used in GCC 3.2 and newer (and many compilers that are
// ABI-compatible with GCC):
//
// http://mentorembedded.github.io/cxx-abi/abi.html#mangling
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
// //
///////////////////////////////////////////////////////////////////////////////
#define MANGLE_CHECKER 0
#if MANGLE_CHECKER
#include <cxxabi.h>
#endif
using namespace clang;
namespace {
/// Retrieve the declaration context that should be used when mangling the given
/// declaration.
static const DeclContext *getEffectiveDeclContext(const Decl *D) {
// The ABI assumes that lambda closure types that occur within
// default arguments live in the context of the function. However, due to
// the way in which Clang parses and creates function declarations, this is
// not the case: the lambda closure type ends up living in the context
// where the function itself resides, because the function declaration itself
// had not yet been created. Fix the context here.
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
if (RD->isLambda())
if (ParmVarDecl *ContextParam
= dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
return ContextParam->getDeclContext();
}
// Perform the same check for block literals.
if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
if (ParmVarDecl *ContextParam
= dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl()))
return ContextParam->getDeclContext();
}
const DeclContext *DC = D->getDeclContext();
if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(DC))
return getEffectiveDeclContext(CD);
if (const auto *VD = dyn_cast<VarDecl>(D))
if (VD->isExternC())
return VD->getASTContext().getTranslationUnitDecl();
if (const auto *FD = dyn_cast<FunctionDecl>(D))
if (FD->isExternC())
return FD->getASTContext().getTranslationUnitDecl();
return DC;
}
static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
return getEffectiveDeclContext(cast<Decl>(DC));
}
static bool isLocalContainerContext(const DeclContext *DC) {
return isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC) || isa<BlockDecl>(DC);
}
static const RecordDecl *GetLocalClassDecl(const Decl *D) {
const DeclContext *DC = getEffectiveDeclContext(D);
while (!DC->isNamespace() && !DC->isTranslationUnit()) {
if (isLocalContainerContext(DC))
return dyn_cast<RecordDecl>(D);
D = cast<Decl>(DC);
DC = getEffectiveDeclContext(D);
}
return nullptr;
}
static const FunctionDecl *getStructor(const FunctionDecl *fn) {
if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate())
return ftd->getTemplatedDecl();
return fn;
}
static const NamedDecl *getStructor(const NamedDecl *decl) {
const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl);
return (fn ? getStructor(fn) : decl);
}
static bool isLambda(const NamedDecl *ND) {
const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
if (!Record)
return false;
return Record->isLambda();
}
static const unsigned UnknownArity = ~0U;
class ItaniumMangleContextImpl : public ItaniumMangleContext {
typedef std::pair<const DeclContext*, IdentifierInfo*> DiscriminatorKeyTy;
llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
public:
explicit ItaniumMangleContextImpl(ASTContext &Context,
DiagnosticsEngine &Diags)
: ItaniumMangleContext(Context, Diags) {}
/// @name Mangler Entry Points
/// @{
bool shouldMangleCXXName(const NamedDecl *D) override;
bool shouldMangleStringLiteral(const StringLiteral *) override {
return false;
}
void mangleCXXName(const NamedDecl *D, raw_ostream &) override;
void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
raw_ostream &) override;
void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
const ThisAdjustment &ThisAdjustment,
raw_ostream &) override;
void mangleReferenceTemporary(const VarDecl *D, unsigned ManglingNumber,
raw_ostream &) override;
void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) override;
void mangleCXXVTT(const CXXRecordDecl *RD, raw_ostream &) override;
void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
const CXXRecordDecl *Type, raw_ostream &) override;
void mangleCXXRTTI(QualType T, raw_ostream &) override;
void mangleCXXRTTIName(QualType T, raw_ostream &) override;
void mangleTypeName(QualType T, raw_ostream &) override;
void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
raw_ostream &) override;
void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
raw_ostream &) override;
void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override;
void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override;
void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &) override;
void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
void mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) override;
void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
raw_ostream &Out) override;
void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
raw_ostream &Out) override;
void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &) override;
void mangleItaniumThreadLocalWrapper(const VarDecl *D,
raw_ostream &) override;
void mangleStringLiteral(const StringLiteral *, raw_ostream &) override;
void mangleCXXVTableBitSet(const CXXRecordDecl *RD, raw_ostream &) override;
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
// Lambda closure types are already numbered.
if (isLambda(ND))
return false;
// Anonymous tags are already numbered.
if (const TagDecl *Tag = dyn_cast<TagDecl>(ND)) {
if (Tag->getName().empty() && !Tag->getTypedefNameForAnonDecl())
return false;
}
// Use the canonical number for externally visible decls.
if (ND->isExternallyVisible()) {
unsigned discriminator = getASTContext().getManglingNumber(ND);
if (discriminator == 1)
return false;
disc = discriminator - 2;
return true;
}
// Make up a reasonable number for internal decls.
unsigned &discriminator = Uniquifier[ND];
if (!discriminator) {
const DeclContext *DC = getEffectiveDeclContext(ND);
discriminator = ++Discriminator[std::make_pair(DC, ND->getIdentifier())];
}
if (discriminator == 1)
return false;
disc = discriminator-2;
return true;
}
/// @}
};
/// Manage the mangling of a single name.
class CXXNameMangler {
ItaniumMangleContextImpl &Context;
raw_ostream &Out;
/// The "structor" is the top-level declaration being mangled, if
/// that's not a template specialization; otherwise it's the pattern
/// for that specialization.
const NamedDecl *Structor;
unsigned StructorType;
/// The next substitution sequence number.
unsigned SeqID;
class FunctionTypeDepthState {
unsigned Bits;
enum { InResultTypeMask = 1 };
public:
FunctionTypeDepthState() : Bits(0) {}
/// The number of function types we're inside.
unsigned getDepth() const {
return Bits >> 1;
}
/// True if we're in the return type of the innermost function type.
bool isInResultType() const {
return Bits & InResultTypeMask;
}
FunctionTypeDepthState push() {
FunctionTypeDepthState tmp = *this;
Bits = (Bits & ~InResultTypeMask) + 2;
return tmp;
}
void enterResultType() {
Bits |= InResultTypeMask;
}
void leaveResultType() {
Bits &= ~InResultTypeMask;
}
void pop(FunctionTypeDepthState saved) {
assert(getDepth() == saved.getDepth() + 1);
Bits = saved.Bits;
}
} FunctionTypeDepth;
llvm::DenseMap<uintptr_t, unsigned> Substitutions;
ASTContext &getASTContext() const { return Context.getASTContext(); }
public:
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const NamedDecl *D = nullptr)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(0),
SeqID(0) {
// These can't be mangled without a ctor type or dtor type.
assert(!D || (!isa<CXXDestructorDecl>(D) &&
!isa<CXXConstructorDecl>(D)));
}
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const CXXConstructorDecl *D, CXXCtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
SeqID(0) { }
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
SeqID(0) { }
#if MANGLE_CHECKER
~CXXNameMangler() {
if (Out.str()[0] == '\01')
return;
int status = 0;
char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
assert(status == 0 && "Could not demangle mangled name!");
free(result);
}
#endif
raw_ostream &getStream() { return Out; }
void mangle(const NamedDecl *D);
void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
void mangleNumber(const llvm::APSInt &I);
void mangleNumber(int64_t Number);
void mangleFloat(const llvm::APFloat &F);
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleSeqID(unsigned SeqID);
void mangleName(const NamedDecl *ND);
void mangleType(QualType T);
void mangleNameOrStandardSubstitution(const NamedDecl *ND);
private:
bool mangleSubstitution(const NamedDecl *ND);
bool mangleSubstitution(QualType T);
bool mangleSubstitution(TemplateName Template);
bool mangleSubstitution(uintptr_t Ptr);
void mangleExistingSubstitution(QualType type);
void mangleExistingSubstitution(TemplateName name);
bool mangleStandardSubstitution(const NamedDecl *ND);
void addSubstitution(const NamedDecl *ND) {
ND = cast<NamedDecl>(ND->getCanonicalDecl());
addSubstitution(reinterpret_cast<uintptr_t>(ND));
}
void addSubstitution(QualType T);
void addSubstitution(TemplateName Template);
void addSubstitution(uintptr_t Ptr);
void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
bool recursive = false);
void mangleUnresolvedName(NestedNameSpecifier *qualifier,
DeclarationName name,
unsigned KnownArity = UnknownArity);
void mangleName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
void mangleUnqualifiedName(const NamedDecl *ND) {
mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
}
void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
unsigned KnownArity);
void mangleUnscopedName(const NamedDecl *ND);
void mangleUnscopedTemplateName(const TemplateDecl *ND);
void mangleUnscopedTemplateName(TemplateName);
void mangleSourceName(const IdentifierInfo *II);
void mangleLocalName(const Decl *D);
void mangleBlockForPrefix(const BlockDecl *Block);
void mangleUnqualifiedBlock(const BlockDecl *Block);
void mangleLambda(const CXXRecordDecl *Lambda);
void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
bool NoFunction=false);
void mangleNestedName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
void manglePrefix(NestedNameSpecifier *qualifier);
void manglePrefix(const DeclContext *DC, bool NoFunction=false);
void manglePrefix(QualType type);
void mangleTemplatePrefix(const TemplateDecl *ND, bool NoFunction=false);
void mangleTemplatePrefix(TemplateName Template);
bool mangleUnresolvedTypeOrSimpleId(QualType DestroyedType,
StringRef Prefix = "");
void mangleOperatorName(DeclarationName Name, unsigned Arity);
void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
void mangleQualifiers(Qualifiers Quals);
void mangleRefQualifier(RefQualifierKind RefQualifier);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
// Declare manglers for every type class.
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT)
#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
#include "clang/AST/TypeNodes.def"
void mangleType(const TagType*);
void mangleType(TemplateName);
void mangleBareFunctionType(const FunctionType *T,
bool MangleReturnType);
void mangleNeonVectorType(const VectorType *T);
void mangleAArch64NeonVectorType(const VectorType *T);
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
void mangleMemberExprBase(const Expr *base, bool isArrow);
void mangleMemberExpr(const Expr *base, bool isArrow,
NestedNameSpecifier *qualifier,
NamedDecl *firstQualifierLookup,
DeclarationName name,
unsigned knownArity);
void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleInitListElements(const InitListExpr *InitList);
void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
void mangleCXXCtorType(CXXCtorType T);
void mangleCXXDtorType(CXXDtorType T);
void mangleTemplateArgs(const ASTTemplateArgumentListInfo &TemplateArgs);
void mangleTemplateArgs(const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
void mangleTemplateArgs(const TemplateArgumentList &AL);
void mangleTemplateArg(TemplateArgument A);
void mangleTemplateParameter(unsigned Index);
void mangleFunctionParam(const ParmVarDecl *parm);
};
}
bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (FD) {
LanguageLinkage L = FD->getLanguageLinkage();
// Overloadable functions need mangling.
if (FD->hasAttr<OverloadableAttr>())
return true;
// "main" is not mangled.
if (FD->isMain())
return false;
// C++ functions and those whose names are not a simple identifier need
// mangling.
if (!FD->getDeclName().isIdentifier() || L == CXXLanguageLinkage)
return true;
// C functions are not mangled.
if (L == CLanguageLinkage)
return false;
}
// Otherwise, no mangling is done outside C++ mode.
if (!getASTContext().getLangOpts().CPlusPlus)
return false;
const VarDecl *VD = dyn_cast<VarDecl>(D);
if (VD) {
// C variables are not mangled.
if (VD->isExternC())
return false;
// Variables at global scope with non-internal linkage are not mangled
const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = getEffectiveParentContext(DC);
if (DC->isTranslationUnit() && D->getFormalLinkage() != InternalLinkage &&
!isa<VarTemplateSpecializationDecl>(D))
return false;
}
return true;
}
void CXXNameMangler::mangle(const NamedDecl *D) {
// <mangled-name> ::= _Z <encoding>
// ::= <data name>
// ::= <special-name>
Out << "_Z";
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
mangleFunctionEncoding(FD);
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleName(VD);
else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D))
mangleName(IFD->getAnonField());
else
mangleName(cast<FieldDecl>(D));
}
void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
// <encoding> ::= <function name> <bare-function-type>
mangleName(FD);
// Don't mangle in the type if this isn't a decl we should typically mangle.
if (!Context.shouldMangleDeclName(FD))
return;
if (FD->hasAttr<EnableIfAttr>()) {
FunctionTypeDepthState Saved = FunctionTypeDepth.push();
Out << "Ua9enable_ifI";
// FIXME: specific_attr_iterator iterates in reverse order. Fix that and use
// it here.
for (AttrVec::const_reverse_iterator I = FD->getAttrs().rbegin(),
E = FD->getAttrs().rend();
I != E; ++I) {
EnableIfAttr *EIA = dyn_cast<EnableIfAttr>(*I);
if (!EIA)
continue;
Out << 'X';
mangleExpression(EIA->getCond());
Out << 'E';
}
Out << 'E';
FunctionTypeDepth.pop(Saved);
}
// Whether the mangling of a function type includes the return type depends on
// the context and the nature of the function. The rules for deciding whether
// the return type is included are:
//
// 1. Template functions (names or types) have return types encoded, with
// the exceptions listed below.
// 2. Function types not appearing as part of a function name mangling,
// e.g. parameters, pointer types, etc., have return type encoded, with the
// exceptions listed below.
// 3. Non-template function names do not have return types encoded.
//
// The exceptions mentioned in (1) and (2) above, for which the return type is
// never included, are
// 1. Constructors.
// 2. Destructors.
// 3. Conversion operator functions, e.g. operator int.
bool MangleReturnType = false;
if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
isa<CXXConversionDecl>(FD)))
MangleReturnType = true;
// Mangle the type of the primary template.
FD = PrimaryTemplate->getTemplatedDecl();
}
mangleBareFunctionType(FD->getType()->getAs<FunctionType>(),
MangleReturnType);
}
static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
while (isa<LinkageSpecDecl>(DC)) {
DC = getEffectiveParentContext(DC);
}
return DC;
}
/// Return whether a given namespace is the 'std' namespace.
static bool isStd(const NamespaceDecl *NS) {
if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS))
->isTranslationUnit())
return false;
const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
return II && II->isStr("std");
}
// isStdNamespace - Return whether a given decl context is a toplevel 'std'
// namespace.
static bool isStdNamespace(const DeclContext *DC) {
if (!DC->isNamespace())
return false;
return isStd(cast<NamespaceDecl>(DC));
}
static const TemplateDecl *
isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
// Check if we have a function template.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
TemplateArgs = FD->getTemplateSpecializationArgs();
return TD;
}
}
// Check if we have a class template.
if (const ClassTemplateSpecializationDecl *Spec =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
return Spec->getSpecializedTemplate();
}
// Check if we have a variable template.
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
return Spec->getSpecializedTemplate();
}
return nullptr;
}
void CXXNameMangler::mangleName(const NamedDecl *ND) {
// <name> ::= <nested-name>
// ::= <unscoped-name>
// ::= <unscoped-template-name> <template-args>
// ::= <local-name>
//
const DeclContext *DC = getEffectiveDeclContext(ND);
// If this is an extern variable declared locally, the relevant DeclContext
// is that of the containing namespace, or the translation unit.
// FIXME: This is a hack; extern variables declared locally should have
// a proper semantic declaration context!
if (isLocalContainerContext(DC) && ND->hasLinkage() && !isLambda(ND))
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = getEffectiveParentContext(DC);
else if (GetLocalClassDecl(ND)) {
mangleLocalName(ND);
return;
}
DC = IgnoreLinkageSpecDecls(DC);
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleUnscopedTemplateName(TD);
mangleTemplateArgs(*TemplateArgs);
return;
}
mangleUnscopedName(ND);
return;
}
if (isLocalContainerContext(DC)) {
mangleLocalName(ND);
return;
}
mangleNestedName(ND, DC);
}
void CXXNameMangler::mangleName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD));
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
mangleUnscopedTemplateName(TD);
mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
} else {
mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
}
}
void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name> # ::std::
if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
Out << "St";
mangleUnqualifiedName(ND);
}
void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
// <unscoped-template-name> ::= <unscoped-name>
// ::= <substitution>
if (mangleSubstitution(ND))
return;
// <template-template-param> ::= <template-param>
if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND))
mangleTemplateParameter(TTP->getIndex());
else
mangleUnscopedName(ND->getTemplatedDecl());
addSubstitution(ND);
}
void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
// <unscoped-template-name> ::= <unscoped-name>
// ::= <substitution>
if (TemplateDecl *TD = Template.getAsTemplateDecl())
return mangleUnscopedTemplateName(TD);
if (mangleSubstitution(Template))
return;
DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
assert(Dependent && "Not a dependent template name?");
if (const IdentifierInfo *Id = Dependent->getIdentifier())
mangleSourceName(Id);
else
mangleOperatorName(Dependent->getOperator(), UnknownArity);
addSubstitution(Template);
}
void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
// ABI:
// Floating-point literals are encoded using a fixed-length
// lowercase hexadecimal string corresponding to the internal
// representation (IEEE on Itanium), high-order bytes first,
// without leading zeroes. For example: "Lf bf800000 E" is -1.0f
// on Itanium.
// The 'without leading zeroes' thing seems to be an editorial
// mistake; see the discussion on cxx-abi-dev beginning on
// 2012-01-16.
// Our requirements here are just barely weird enough to justify
// using a custom algorithm instead of post-processing APInt::toString().
llvm::APInt valueBits = f.bitcastToAPInt();
unsigned numCharacters = (valueBits.getBitWidth() + 3) / 4;
assert(numCharacters != 0);
// Allocate a buffer of the right number of characters.
SmallVector<char, 20> buffer;
buffer.set_size(numCharacters);
// Fill the buffer left-to-right.
for (unsigned stringIndex = 0; stringIndex != numCharacters; ++stringIndex) {
// The bit-index of the next hex digit.
unsigned digitBitIndex = 4 * (numCharacters - stringIndex - 1);
// Project out 4 bits starting at 'digitIndex'.
llvm::integerPart hexDigit
= valueBits.getRawData()[digitBitIndex / llvm::integerPartWidth];
hexDigit >>= (digitBitIndex % llvm::integerPartWidth);
hexDigit &= 0xF;
// Map that over to a lowercase hex digit.
static const char charForHex[16] = {
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
};
buffer[stringIndex] = charForHex[hexDigit];
}
Out.write(buffer.data(), numCharacters);
}
void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
if (Value.isSigned() && Value.isNegative()) {
Out << 'n';
Value.abs().print(Out, /*signed*/ false);
} else {
Value.print(Out, /*signed*/ false);
}
}
void CXXNameMangler::mangleNumber(int64_t Number) {
// <number> ::= [n] <non-negative decimal integer>
if (Number < 0) {
Out << 'n';
Number = -Number;
}
Out << Number;
}
void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
// <call-offset> ::= h <nv-offset> _
// ::= v <v-offset> _
// <nv-offset> ::= <offset number> # non-virtual base override
// <v-offset> ::= <offset number> _ <virtual offset number>
// # virtual base override, with vcall offset
if (!Virtual) {
Out << 'h';
mangleNumber(NonVirtual);
Out << '_';
return;
}
Out << 'v';
mangleNumber(NonVirtual);
Out << '_';
mangleNumber(Virtual);
Out << '_';
}
void CXXNameMangler::manglePrefix(QualType type) {
if (const auto *TST = type->getAs<TemplateSpecializationType>()) {
if (!mangleSubstitution(QualType(TST, 0))) {
mangleTemplatePrefix(TST->getTemplateName());
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
addSubstitution(QualType(TST, 0));
}
} else if (const auto *DTST =
type->getAs<DependentTemplateSpecializationType>()) {
if (!mangleSubstitution(QualType(DTST, 0))) {
TemplateName Template = getASTContext().getDependentTemplateName(
DTST->getQualifier(), DTST->getIdentifier());
mangleTemplatePrefix(Template);
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs());
addSubstitution(QualType(DTST, 0));
}
} else {
// We use the QualType mangle type variant here because it handles
// substitutions.
mangleType(type);
}
}
/// Mangle everything prior to the base-unresolved-name in an unresolved-name.
///
/// \param recursive - true if this is being called recursively,
/// i.e. if there is more prefix "to the right".
void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
bool recursive) {
// x, ::x
// <unresolved-name> ::= [gs] <base-unresolved-name>
// T::x / decltype(p)::x
// <unresolved-name> ::= sr <unresolved-type> <base-unresolved-name>
// T::N::x /decltype(p)::N::x
// <unresolved-name> ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
// <base-unresolved-name>
// A::x, N::y, A<T>::z; "gs" means leading "::"
// <unresolved-name> ::= [gs] sr <unresolved-qualifier-level>+ E
// <base-unresolved-name>
switch (qualifier->getKind()) {
case NestedNameSpecifier::Global:
Out << "gs";
// We want an 'sr' unless this is the entire NNS.
if (recursive)
Out << "sr";
// We never want an 'E' here.
return;
case NestedNameSpecifier::Super:
llvm_unreachable("Can't mangle __super specifier");
case NestedNameSpecifier::Namespace:
if (qualifier->getPrefix())
mangleUnresolvedPrefix(qualifier->getPrefix(),
/*recursive*/ true);
else
Out << "sr";
mangleSourceName(qualifier->getAsNamespace()->getIdentifier());
break;
case NestedNameSpecifier::NamespaceAlias:
if (qualifier->getPrefix())
mangleUnresolvedPrefix(qualifier->getPrefix(),
/*recursive*/ true);
else
Out << "sr";
mangleSourceName(qualifier->getAsNamespaceAlias()->getIdentifier());
break;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate: {
const Type *type = qualifier->getAsType();
// We only want to use an unresolved-type encoding if this is one of:
// - a decltype
// - a template type parameter
// - a template template parameter with arguments
// In all of these cases, we should have no prefix.
if (qualifier->getPrefix()) {
mangleUnresolvedPrefix(qualifier->getPrefix(),
/*recursive*/ true);
} else {
// Otherwise, all the cases want this.
Out << "sr";
}
if (mangleUnresolvedTypeOrSimpleId(QualType(type, 0), recursive ? "N" : ""))
return;
break;
}
case NestedNameSpecifier::Identifier:
// Member expressions can have these without prefixes.
if (qualifier->getPrefix())
mangleUnresolvedPrefix(qualifier->getPrefix(),
/*recursive*/ true);
else
Out << "sr";
mangleSourceName(qualifier->getAsIdentifier());
break;
}
// If this was the innermost part of the NNS, and we fell out to
// here, append an 'E'.
if (!recursive)
Out << 'E';
}
/// Mangle an unresolved-name, which is generally used for names which
/// weren't resolved to specific entities.
void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier,
DeclarationName name,
unsigned knownArity) {
if (qualifier) mangleUnresolvedPrefix(qualifier);
switch (name.getNameKind()) {
// <base-unresolved-name> ::= <simple-id>
case DeclarationName::Identifier:
mangleSourceName(name.getAsIdentifierInfo());
break;
// <base-unresolved-name> ::= dn <destructor-name>
case DeclarationName::CXXDestructorName:
Out << "dn";
mangleUnresolvedTypeOrSimpleId(name.getCXXNameType());
break;
// <base-unresolved-name> ::= on <operator-name>
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXOperatorName:
Out << "on";
mangleOperatorName(name, knownArity);
break;
case DeclarationName::CXXConstructorName:
llvm_unreachable("Can't mangle a constructor name!");
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCZeroArgSelector:
llvm_unreachable("Can't mangle Objective-C selector names here!");
}
}
void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
DeclarationName Name,
unsigned KnownArity) {
unsigned Arity = KnownArity;
// <unqualified-name> ::= <operator-name>
// ::= <ctor-dtor-name>
// ::= <source-name>
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
if (Name.getAsIdentifierInfo()) {
// We must avoid conflicts between internally- and externally-
// linked variable and function declaration names in the same TU:
// void test() { extern void foo(); }
// static void foo();
// This naming convention is the same as that followed by GCC,
// though it shouldn't actually matter.
if (ND && ND->getFormalLinkage() == InternalLinkage &&
getEffectiveDeclContext(ND)->isFileContext())
Out << 'L';
// HLSL Change - use the 'name for IR' rather than II directly.
StringRef r = ND->getNameForIR();
Out << r.size() << r.data();
break;
}
// Otherwise, an anonymous entity. We must have a declaration.
assert(ND && "mangling empty name without declaration");
if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
if (NS->isAnonymousNamespace()) {
// This is how gcc mangles these names.
Out << "12_GLOBAL__N_1";
break;
}
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// We must have an anonymous union or struct declaration.
const RecordDecl *RD =
cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
// Itanium C++ ABI 5.1.2:
//
// For the purposes of mangling, the name of an anonymous union is
// considered to be the name of the first named data member found by a
// pre-order, depth-first, declaration-order walk of the data members of
// the anonymous union. If there is no such data member (i.e., if all of
// the data members in the union are unnamed), then there is no way for
// a program to refer to the anonymous union, and there is therefore no
// need to mangle its name.
assert(RD->isAnonymousStructOrUnion()
&& "Expected anonymous struct or union!");
const FieldDecl *FD = RD->findFirstNamedDataMember();
// It's actually possible for various reasons for us to get here
// with an empty anonymous struct / union. Fortunately, it
// doesn't really matter what name we generate.
if (!FD) break;
assert(FD->getIdentifier() && "Data member name isn't an identifier!");
mangleSourceName(FD->getIdentifier());
break;
}
// Class extensions have no name as a category, and it's possible
// for them to be the semantic parent of certain declarations
// (primarily, tag decls defined within declarations). Such
// declarations will always have internal linkage, so the name
// doesn't really matter, but we shouldn't crash on them. For
// safety, just handle all ObjC containers here.
if (isa<ObjCContainerDecl>(ND))
break;
// We must have an anonymous struct.
const TagDecl *TD = cast<TagDecl>(ND);
if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
assert(TD->getDeclContext() == D->getDeclContext() &&
"Typedef should not be in another decl context!");
assert(D->getDeclName().getAsIdentifierInfo() &&
"Typedef was not named!");
mangleSourceName(D->getDeclName().getAsIdentifierInfo());
break;
}
// <unnamed-type-name> ::= <closure-type-name>
//
// <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
// <lambda-sig> ::= <parameter-type>+ # Parameter types or 'v' for 'void'.
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
if (Record->isLambda() && Record->getLambdaManglingNumber()) {
mangleLambda(Record);
break;
}
}
if (TD->isExternallyVisible()) {
unsigned UnnamedMangle = getASTContext().getManglingNumber(TD);
Out << "Ut";
if (UnnamedMangle > 1)
Out << llvm::utostr(UnnamedMangle - 2);
Out << '_';
break;
}
// Get a unique id for the anonymous struct.
unsigned AnonStructId = Context.getAnonymousStructId(TD);
// Mangle it as a source name in the form
// [n] $_<id>
// where n is the length of the string.
SmallString<8> Str;
Str += "$_";
Str += llvm::utostr(AnonStructId);
Out << Str.size();
Out << Str;
break;
}
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
llvm_unreachable("Can't mangle Objective-C selector names here!");
case DeclarationName::CXXConstructorName:
if (ND == Structor)
// If the named decl is the C++ constructor we're mangling, use the type
// we were given.
mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
else
// Otherwise, use the complete constructor name. This is relevant if a
// class with a constructor is declared within a constructor.
mangleCXXCtorType(Ctor_Complete);
break;
case DeclarationName::CXXDestructorName:
if (ND == Structor)
// If the named decl is the C++ destructor we're mangling, use the type we
// were given.
mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
else
// Otherwise, use the complete destructor name. This is relevant if a
// class with a destructor is declared within a destructor.
mangleCXXDtorType(Dtor_Complete);
break;
case DeclarationName::CXXOperatorName:
if (ND && Arity == UnknownArity) {
Arity = cast<FunctionDecl>(ND)->getNumParams();
// If we have a member function, we need to include the 'this' pointer.
if (const auto *MD = dyn_cast<CXXMethodDecl>(ND))
if (!MD->isStatic())
Arity++;
}
LLVM_FALLTHROUGH; // HLSL Change
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXLiteralOperatorName:
mangleOperatorName(Name, Arity);
break;
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
}
}
void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
// <source-name> ::= <positive length number> <identifier>
// <number> ::= [n] <non-negative decimal integer>
// <identifier> ::= <unqualified source code identifier>
Out << II->getLength() << II->getName();
}
void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
const DeclContext *DC,
bool NoFunction) {
// <nested-name>
// ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
// ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
// <template-args> E
Out << 'N';
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
Qualifiers MethodQuals =
Qualifiers::fromCVRMask(Method->getTypeQualifiers());
// We do not consider restrict a distinguishing attribute for overloading
// purposes so we must not mangle it.
MethodQuals.removeRestrict();
mangleQualifiers(MethodQuals);
mangleRefQualifier(Method->getRefQualifier());
}
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD, NoFunction);
mangleTemplateArgs(*TemplateArgs);
}
else {
manglePrefix(DC, NoFunction);
mangleUnqualifiedName(ND);
}
Out << 'E';
}
void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
// <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
Out << 'N';
mangleTemplatePrefix(TD);
mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
Out << 'E';
}
void CXXNameMangler::mangleLocalName(const Decl *D) {
// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
// := Z <function encoding> E s [<discriminator>]
// <local-name> := Z <function encoding> E d [ <parameter number> ]
// _ <entity name>
// <discriminator> := _ <non-negative number>
assert(isa<NamedDecl>(D) || isa<BlockDecl>(D));
const RecordDecl *RD = GetLocalClassDecl(D);
const DeclContext *DC = getEffectiveDeclContext(RD ? RD : D);
Out << 'Z';
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC))
mangleObjCMethodName(MD);
else if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC))
mangleBlockForPrefix(BD);
else
mangleFunctionEncoding(cast<FunctionDecl>(DC));
Out << 'E';
if (RD) {
// The parameter number is omitted for the last parameter, 0 for the
// second-to-last parameter, 1 for the third-to-last parameter, etc. The
// <entity name> will of course contain a <closure-type-name>: Its
// numbering will be local to the particular argument in which it appears
// -- other default arguments do not affect its encoding.
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD);
if (CXXRD->isLambda()) {
if (const ParmVarDecl *Parm
= dyn_cast_or_null<ParmVarDecl>(CXXRD->getLambdaContextDecl())) {
if (const FunctionDecl *Func
= dyn_cast<FunctionDecl>(Parm->getDeclContext())) {
Out << 'd';
unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex();
if (Num > 1)
mangleNumber(Num - 2);
Out << '_';
}
}
}
// Mangle the name relative to the closest enclosing function.
// equality ok because RD derived from ND above
if (D == RD) {
mangleUnqualifiedName(RD);
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
manglePrefix(getEffectiveDeclContext(BD), true /*NoFunction*/);
mangleUnqualifiedBlock(BD);
} else {
const NamedDecl *ND = cast<NamedDecl>(D);
mangleNestedName(ND, getEffectiveDeclContext(ND), true /*NoFunction*/);
}
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
// Mangle a block in a default parameter; see above explanation for
// lambdas.
if (const ParmVarDecl *Parm
= dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl())) {
if (const FunctionDecl *Func
= dyn_cast<FunctionDecl>(Parm->getDeclContext())) {
Out << 'd';
unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex();
if (Num > 1)
mangleNumber(Num - 2);
Out << '_';
}
}
mangleUnqualifiedBlock(BD);
} else {
mangleUnqualifiedName(cast<NamedDecl>(D));
}
if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) {
unsigned disc;
if (Context.getNextDiscriminator(ND, disc)) {
if (disc < 10)
Out << '_' << disc;
else
Out << "__" << disc << '_';
}
}
}
void CXXNameMangler::mangleBlockForPrefix(const BlockDecl *Block) {
if (GetLocalClassDecl(Block)) {
mangleLocalName(Block);
return;
}
const DeclContext *DC = getEffectiveDeclContext(Block);
if (isLocalContainerContext(DC)) {
mangleLocalName(Block);
return;
}
manglePrefix(getEffectiveDeclContext(Block));
mangleUnqualifiedBlock(Block);
}
void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) {
if (Decl *Context = Block->getBlockManglingContextDecl()) {
if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
Context->getDeclContext()->isRecord()) {
if (const IdentifierInfo *Name
= cast<NamedDecl>(Context)->getIdentifier()) {
mangleSourceName(Name);
Out << 'M';
}
}
}
// If we have a block mangling number, use it.
unsigned Number = Block->getBlockManglingNumber();
// Otherwise, just make up a number. It doesn't matter what it is because
// the symbol in question isn't externally visible.
if (!Number)
Number = Context.getBlockId(Block, false);
Out << "Ub";
if (Number > 0)
Out << Number - 1;
Out << '_';
}
void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// If the context of a closure type is an initializer for a class member
// (static or nonstatic), it is encoded in a qualified name with a final
// <prefix> of the form:
//
// <data-member-prefix> := <member source-name> M
//
// Technically, the data-member-prefix is part of the <prefix>. However,
// since a closure type will always be mangled with a prefix, it's easier
// to emit that last part of the prefix here.
if (Decl *Context = Lambda->getLambdaContextDecl()) {
if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
Context->getDeclContext()->isRecord()) {
if (const IdentifierInfo *Name
= cast<NamedDecl>(Context)->getIdentifier()) {
mangleSourceName(Name);
Out << 'M';
}
}
}
Out << "Ul";
const FunctionProtoType *Proto = Lambda->getLambdaTypeInfo()->getType()->
getAs<FunctionProtoType>();
mangleBareFunctionType(Proto, /*MangleReturnType=*/false);
Out << "E";
// The number is omitted for the first closure type with a given
// <lambda-sig> in a given context; it is n-2 for the nth closure type
// (in lexical order) with that same <lambda-sig> and context.
//
// The AST keeps track of the number for us.
unsigned Number = Lambda->getLambdaManglingNumber();
assert(Number > 0 && "Lambda should be mangled as an unnamed class");
if (Number > 1)
mangleNumber(Number - 2);
Out << '_';
}
void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
switch (qualifier->getKind()) {
case NestedNameSpecifier::Global:
// nothing
return;
case NestedNameSpecifier::Super:
llvm_unreachable("Can't mangle __super specifier");
case NestedNameSpecifier::Namespace:
mangleName(qualifier->getAsNamespace());
return;
case NestedNameSpecifier::NamespaceAlias:
mangleName(qualifier->getAsNamespaceAlias()->getNamespace());
return;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
manglePrefix(QualType(qualifier->getAsType(), 0));
return;
case NestedNameSpecifier::Identifier:
// Member expressions can have these without prefixes, but that
// should end up in mangleUnresolvedPrefix instead.
assert(qualifier->getPrefix());
manglePrefix(qualifier->getPrefix());
mangleSourceName(qualifier->getAsIdentifier());
return;
}
llvm_unreachable("unexpected nested name specifier");
}
void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
// <prefix> ::= <prefix> <unqualified-name>
// ::= <template-prefix> <template-args>
// ::= <template-param>
// ::= # empty
// ::= <substitution>
DC = IgnoreLinkageSpecDecls(DC);
if (DC->isTranslationUnit())
return;
if (NoFunction && isLocalContainerContext(DC))
return;
assert(!isLocalContainerContext(DC));
const NamedDecl *ND = cast<NamedDecl>(DC);
if (mangleSubstitution(ND))
return;
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
mangleTemplateArgs(*TemplateArgs);
} else {
manglePrefix(getEffectiveDeclContext(ND), NoFunction);
mangleUnqualifiedName(ND);
}
addSubstitution(ND);
}
void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
// <template-prefix> ::= <prefix> <template unqualified-name>
// ::= <template-param>
// ::= <substitution>
if (TemplateDecl *TD = Template.getAsTemplateDecl())
return mangleTemplatePrefix(TD);
if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
manglePrefix(Qualified->getQualifier());
if (OverloadedTemplateStorage *Overloaded
= Template.getAsOverloadedTemplate()) {
mangleUnqualifiedName(nullptr, (*Overloaded->begin())->getDeclName(),
UnknownArity);
return;
}
DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
assert(Dependent && "Unknown template name kind?");
if (NestedNameSpecifier *Qualifier = Dependent->getQualifier())
manglePrefix(Qualifier);
mangleUnscopedTemplateName(Template);
}
void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND,
bool NoFunction) {
// <template-prefix> ::= <prefix> <template unqualified-name>
// ::= <template-param>
// ::= <substitution>
// <template-template-param> ::= <template-param>
// <substitution>
if (mangleSubstitution(ND))
return;
// <template-template-param> ::= <template-param>
if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND)) {
mangleTemplateParameter(TTP->getIndex());
} else {
manglePrefix(getEffectiveDeclContext(ND), NoFunction);
mangleUnqualifiedName(ND->getTemplatedDecl());
}
addSubstitution(ND);
}
/// Mangles a template name under the production <type>. Required for
/// template template arguments.
/// <type> ::= <class-enum-type>
/// ::= <template-param>
/// ::= <substitution>
void CXXNameMangler::mangleType(TemplateName TN) {
if (mangleSubstitution(TN))
return;
TemplateDecl *TD = nullptr;
switch (TN.getKind()) {
case TemplateName::QualifiedTemplate:
TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
goto HaveDecl;
case TemplateName::Template:
TD = TN.getAsTemplateDecl();
goto HaveDecl;
HaveDecl:
if (isa<TemplateTemplateParmDecl>(TD))
mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
else
mangleName(TD);
break;
case TemplateName::OverloadedTemplate:
llvm_unreachable("can't mangle an overloaded template name as a <type>");
case TemplateName::DependentTemplate: {
const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
assert(Dependent->isIdentifier());
// <class-enum-type> ::= <name>
// <name> ::= <nested-name>
mangleUnresolvedPrefix(Dependent->getQualifier());
mangleSourceName(Dependent->getIdentifier());
break;
}
case TemplateName::SubstTemplateTemplateParm: {
// Substituted template parameters are mangled as the substituted
// template. This will check for the substitution twice, which is
// fine, but we have to return early so that we don't try to *add*
// the substitution twice.
SubstTemplateTemplateParmStorage *subst
= TN.getAsSubstTemplateTemplateParm();
mangleType(subst->getReplacement());
return;
}
case TemplateName::SubstTemplateTemplateParmPack: {
// FIXME: not clear how to mangle this!
// template <template <class> class T...> class A {
// template <template <class> class U...> void foo(B<T,U> x...);
// };
Out << "_SUBSTPACK_";
break;
}
}
addSubstitution(TN);
}
bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
StringRef Prefix) {
// Only certain other types are valid as prefixes; enumerate them.
switch (Ty->getTypeClass()) {
case Type::Builtin:
case Type::Complex:
case Type::Adjusted:
case Type::Decayed:
case Type::Pointer:
case Type::BlockPointer:
case Type::LValueReference:
case Type::RValueReference:
case Type::MemberPointer:
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
case Type::FunctionProto:
case Type::FunctionNoProto:
case Type::Paren:
case Type::Attributed:
case Type::Auto:
case Type::PackExpansion:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Atomic:
llvm_unreachable("type is illegal as a nested name specifier");
case Type::SubstTemplateTypeParmPack:
// FIXME: not clear how to mangle this!
// template <class T...> class A {
// template <class U...> void foo(decltype(T::foo(U())) x...);
// };
Out << "_SUBSTPACK_";
break;
// <unresolved-type> ::= <template-param>
// ::= <decltype>
// ::= <template-template-param> <template-args>
// (this last is not official yet)
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::Decltype:
case Type::TemplateTypeParm:
case Type::UnaryTransform:
case Type::SubstTemplateTypeParm:
unresolvedType:
// Some callers want a prefix before the mangled type.
Out << Prefix;
// This seems to do everything we want. It's not really
// sanctioned for a substituted template parameter, though.
mangleType(Ty);
// We never want to print 'E' directly after an unresolved-type,
// so we return directly.
return true;
case Type::Typedef:
mangleSourceName(cast<TypedefType>(Ty)->getDecl()->getIdentifier());
break;
case Type::UnresolvedUsing:
mangleSourceName(
cast<UnresolvedUsingType>(Ty)->getDecl()->getIdentifier());
break;
case Type::Enum:
case Type::Record:
mangleSourceName(cast<TagType>(Ty)->getDecl()->getIdentifier());
break;
case Type::TemplateSpecialization: {
const TemplateSpecializationType *TST =
cast<TemplateSpecializationType>(Ty);
TemplateName TN = TST->getTemplateName();
switch (TN.getKind()) {
case TemplateName::Template:
case TemplateName::QualifiedTemplate: {
TemplateDecl *TD = TN.getAsTemplateDecl();
// If the base is a template template parameter, this is an
// unresolved type.
assert(TD && "no template for template specialization type");
if (isa<TemplateTemplateParmDecl>(TD))
goto unresolvedType;
mangleSourceName(TD->getIdentifier());
break;
}
case TemplateName::OverloadedTemplate:
case TemplateName::DependentTemplate:
llvm_unreachable("invalid base for a template specialization type");
case TemplateName::SubstTemplateTemplateParm: {
SubstTemplateTemplateParmStorage *subst =
TN.getAsSubstTemplateTemplateParm();
mangleExistingSubstitution(subst->getReplacement());
break;
}
case TemplateName::SubstTemplateTemplateParmPack: {
// FIXME: not clear how to mangle this!
// template <template <class U> class T...> class A {
// template <class U...> void foo(decltype(T<U>::foo) x...);
// };
Out << "_SUBSTPACK_";
break;
}
}
mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
break;
}
case Type::InjectedClassName:
mangleSourceName(
cast<InjectedClassNameType>(Ty)->getDecl()->getIdentifier());
break;
case Type::DependentName:
mangleSourceName(cast<DependentNameType>(Ty)->getIdentifier());
break;
case Type::DependentTemplateSpecialization: {
const DependentTemplateSpecializationType *DTST =
cast<DependentTemplateSpecializationType>(Ty);
mangleSourceName(DTST->getIdentifier());
mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs());
break;
}
case Type::Elaborated:
return mangleUnresolvedTypeOrSimpleId(
cast<ElaboratedType>(Ty)->getNamedType(), Prefix);
}
return false;
}
void CXXNameMangler::mangleOperatorName(DeclarationName Name, unsigned Arity) {
switch (Name.getNameKind()) {
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXUsingDirective:
case DeclarationName::Identifier:
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCZeroArgSelector:
llvm_unreachable("Not an operator name");
case DeclarationName::CXXConversionFunctionName:
// <operator-name> ::= cv <type> # (cast)
Out << "cv";
mangleType(Name.getCXXNameType());
break;
case DeclarationName::CXXLiteralOperatorName:
Out << "li";
mangleSourceName(Name.getCXXLiteralIdentifier());
return;
case DeclarationName::CXXOperatorName:
mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
break;
}
}
void
CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
switch (OO) {
// <operator-name> ::= nw # new
case OO_New: Out << "nw"; break;
// ::= na # new[]
case OO_Array_New: Out << "na"; break;
// ::= dl # delete
case OO_Delete: Out << "dl"; break;
// ::= da # delete[]
case OO_Array_Delete: Out << "da"; break;
// ::= ps # + (unary)
// ::= pl # + (binary or unknown)
case OO_Plus:
Out << (Arity == 1? "ps" : "pl"); break;
// ::= ng # - (unary)
// ::= mi # - (binary or unknown)
case OO_Minus:
Out << (Arity == 1? "ng" : "mi"); break;
// ::= ad # & (unary)
// ::= an # & (binary or unknown)
case OO_Amp:
Out << (Arity == 1? "ad" : "an"); break;
// ::= de # * (unary)
// ::= ml # * (binary or unknown)
case OO_Star:
// Use binary when unknown.
Out << (Arity == 1? "de" : "ml"); break;
// ::= co # ~
case OO_Tilde: Out << "co"; break;
// ::= dv # /
case OO_Slash: Out << "dv"; break;
// ::= rm # %
case OO_Percent: Out << "rm"; break;
// ::= or # |
case OO_Pipe: Out << "or"; break;
// ::= eo # ^
case OO_Caret: Out << "eo"; break;
// ::= aS # =
case OO_Equal: Out << "aS"; break;
// ::= pL # +=
case OO_PlusEqual: Out << "pL"; break;
// ::= mI # -=
case OO_MinusEqual: Out << "mI"; break;
// ::= mL # *=
case OO_StarEqual: Out << "mL"; break;
// ::= dV # /=
case OO_SlashEqual: Out << "dV"; break;
// ::= rM # %=
case OO_PercentEqual: Out << "rM"; break;
// ::= aN # &=
case OO_AmpEqual: Out << "aN"; break;
// ::= oR # |=
case OO_PipeEqual: Out << "oR"; break;
// ::= eO # ^=
case OO_CaretEqual: Out << "eO"; break;
// ::= ls # <<
case OO_LessLess: Out << "ls"; break;
// ::= rs # >>
case OO_GreaterGreater: Out << "rs"; break;
// ::= lS # <<=
case OO_LessLessEqual: Out << "lS"; break;
// ::= rS # >>=
case OO_GreaterGreaterEqual: Out << "rS"; break;
// ::= eq # ==
case OO_EqualEqual: Out << "eq"; break;
// ::= ne # !=
case OO_ExclaimEqual: Out << "ne"; break;
// ::= lt # <
case OO_Less: Out << "lt"; break;
// ::= gt # >
case OO_Greater: Out << "gt"; break;
// ::= le # <=
case OO_LessEqual: Out << "le"; break;
// ::= ge # >=
case OO_GreaterEqual: Out << "ge"; break;
// ::= nt # !
case OO_Exclaim: Out << "nt"; break;
// ::= aa # &&
case OO_AmpAmp: Out << "aa"; break;
// ::= oo # ||
case OO_PipePipe: Out << "oo"; break;
// ::= pp # ++
case OO_PlusPlus: Out << "pp"; break;
// ::= mm # --
case OO_MinusMinus: Out << "mm"; break;
// ::= cm # ,
case OO_Comma: Out << "cm"; break;
// ::= pm # ->*
case OO_ArrowStar: Out << "pm"; break;
// ::= pt # ->
case OO_Arrow: Out << "pt"; break;
// ::= cl # ()
case OO_Call: Out << "cl"; break;
// ::= ix # []
case OO_Subscript: Out << "ix"; break;
// ::= qu # ?
// The conditional operator can't be overloaded, but we still handle it when
// mangling expressions.
case OO_Conditional: Out << "qu"; break;
case OO_None:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Not an overloaded operator");
}
}
void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
// <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
if (Quals.hasRestrict())
Out << 'r';
if (Quals.hasVolatile())
Out << 'V';
if (Quals.hasConst())
Out << 'K';
if (Quals.hasAddressSpace()) {
// Address space extension:
//
// <type> ::= U <target-addrspace>
// <type> ::= U <OpenCL-addrspace>
// <type> ::= U <CUDA-addrspace>
SmallString<64> ASString;
unsigned AS = Quals.getAddressSpace();
if (Context.getASTContext().addressSpaceMapManglingFor(AS)) {
// <target-addrspace> ::= "AS" <address-space-number>
unsigned TargetAS = Context.getASTContext().getTargetAddressSpace(AS);
ASString = "AS" + llvm::utostr_32(TargetAS);
} else {
switch (AS) {
default: llvm_unreachable("Not a language specific address space");
// <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant" ]
case LangAS::opencl_global: ASString = "CLglobal"; break;
case LangAS::opencl_local: ASString = "CLlocal"; break;
case LangAS::opencl_constant: ASString = "CLconstant"; break;
// <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ]
case LangAS::cuda_device: ASString = "CUdevice"; break;
case LangAS::cuda_constant: ASString = "CUconstant"; break;
case LangAS::cuda_shared: ASString = "CUshared"; break;
}
}
Out << 'U' << ASString.size() << ASString;
}
StringRef LifetimeName;
switch (Quals.getObjCLifetime()) {
// Objective-C ARC Extension:
//
// <type> ::= U "__strong"
// <type> ::= U "__weak"
// <type> ::= U "__autoreleasing"
case Qualifiers::OCL_None:
break;
case Qualifiers::OCL_Weak:
LifetimeName = "__weak";
break;
case Qualifiers::OCL_Strong:
LifetimeName = "__strong";
break;
case Qualifiers::OCL_Autoreleasing:
LifetimeName = "__autoreleasing";
break;
case Qualifiers::OCL_ExplicitNone:
// The __unsafe_unretained qualifier is *not* mangled, so that
// __unsafe_unretained types in ARC produce the same manglings as the
// equivalent (but, naturally, unqualified) types in non-ARC, providing
// better ABI compatibility.
//
// It's safe to do this because unqualified 'id' won't show up
// in any type signatures that need to be mangled.
break;
}
if (!LifetimeName.empty())
Out << 'U' << LifetimeName.size() << LifetimeName;
}
void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
// <ref-qualifier> ::= R # lvalue reference
// ::= O # rvalue-reference
switch (RefQualifier) {
case RQ_None:
break;
case RQ_LValue:
Out << 'R';
break;
case RQ_RValue:
Out << 'O';
break;
}
}
void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
Context.mangleObjCMethodName(MD, Out);
}
static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty) {
if (Quals)
return true;
if (Ty->isSpecificBuiltinType(BuiltinType::ObjCSel))
return true;
if (Ty->isOpenCLSpecificType())
return true;
if (Ty->isBuiltinType())
return false;
return true;
}
void CXXNameMangler::mangleType(QualType T) {
// If our type is instantiation-dependent but not dependent, we mangle
// it as it was written in the source, removing any top-level sugar.
// Otherwise, use the canonical type.
//
// FIXME: This is an approximation of the instantiation-dependent name
// mangling rules, since we should really be using the type as written and
// augmented via semantic analysis (i.e., with implicit conversions and
// default template arguments) for any instantiation-dependent type.
// Unfortunately, that requires several changes to our AST:
// - Instantiation-dependent TemplateSpecializationTypes will need to be
// uniqued, so that we can handle substitutions properly
// - Default template arguments will need to be represented in the
// TemplateSpecializationType, since they need to be mangled even though
// they aren't written.
// - Conversions on non-type template arguments need to be expressed, since
// they can affect the mangling of sizeof/alignof.
if (!T->isInstantiationDependentType() || T->isDependentType())
T = T.getCanonicalType();
else {
// Desugar any types that are purely sugar.
do {
// Don't desugar through template specialization types that aren't
// type aliases. We need to mangle the template arguments as written.
if (const TemplateSpecializationType *TST
= dyn_cast<TemplateSpecializationType>(T))
if (!TST->isTypeAlias())
break;
QualType Desugared
= T.getSingleStepDesugaredType(Context.getASTContext());
if (Desugared == T)
break;
T = Desugared;
} while (true);
}
SplitQualType split = T.split();
Qualifiers quals = split.Quals;
const Type *ty = split.Ty;
bool isSubstitutable = isTypeSubstitutable(quals, ty);
if (isSubstitutable && mangleSubstitution(T))
return;
// If we're mangling a qualified array type, push the qualifiers to
// the element type.
if (quals && isa<ArrayType>(T)) {
ty = Context.getASTContext().getAsArrayType(T);
quals = Qualifiers();
// Note that we don't update T: we want to add the
// substitution at the original type.
}
if (quals) {
mangleQualifiers(quals);
// Recurse: even if the qualified type isn't yet substitutable,
// the unqualified type might be.
mangleType(QualType(ty, 0));
} else {
switch (ty->getTypeClass()) {
#define ABSTRACT_TYPE(CLASS, PARENT)
#define NON_CANONICAL_TYPE(CLASS, PARENT) \
case Type::CLASS: \
llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
return;
#define TYPE(CLASS, PARENT) \
case Type::CLASS: \
mangleType(static_cast<const CLASS##Type*>(ty)); \
break;
#include "clang/AST/TypeNodes.def"
}
}
// Add the substitution.
if (isSubstitutable)
addSubstitution(T);
}
void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
if (!mangleStandardSubstitution(ND))
mangleName(ND);
}
void CXXNameMangler::mangleType(const BuiltinType *T) {
// <type> ::= <builtin-type>
// <builtin-type> ::= v # void
// ::= w # wchar_t
// ::= b # bool
// ::= c # char
// ::= a # signed char
// ::= h # unsigned char
// ::= s # short
// ::= t # unsigned short
// ::= i # int
// ::= j # unsigned int
// ::= l # long
// ::= m # unsigned long
// ::= x # long long, __int64
// ::= y # unsigned long long, __int64
// ::= n # __int128
// ::= o # unsigned __int128
// ::= f # float
// ::= d # double
// ::= e # long double, __float80
// UNSUPPORTED: ::= g # __float128
// UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
// UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
// UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
// ::= Dh # IEEE 754r half-precision floating point (16 bits)
// ::= Di # char32_t
// ::= Ds # char16_t
// ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
// ::= u <source-name> # vendor extended type
switch (T->getKind()) {
case BuiltinType::Void: Out << 'v'; break;
case BuiltinType::Bool: Out << 'b'; break;
case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
case BuiltinType::UChar: Out << 'h'; break;
case BuiltinType::UShort: Out << 't'; break;
case BuiltinType::UInt: Out << 'j'; break;
case BuiltinType::ULong: Out << 'm'; break;
case BuiltinType::ULongLong: Out << 'y'; break;
case BuiltinType::UInt128: Out << 'o'; break;
case BuiltinType::SChar: Out << 'a'; break;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U: Out << 'w'; break;
case BuiltinType::Char16: Out << "Ds"; break;
case BuiltinType::Char32: Out << "Di"; break;
case BuiltinType::Short: Out << 's'; break;
case BuiltinType::Int: Out << 'i'; break;
case BuiltinType::Long: Out << 'l'; break;
case BuiltinType::LongLong: Out << 'x'; break;
case BuiltinType::Int128: Out << 'n'; break;
case BuiltinType::Half: Out << "Dh"; break;
case BuiltinType::Float: Out << 'f'; break;
case BuiltinType::Double: Out << 'd'; break;
case BuiltinType::LongDouble:
Out << (getASTContext().getTargetInfo().useFloat128ManglingForLongDouble()
? 'g'
: 'e');
break;
case BuiltinType::NullPtr: Out << "Dn"; break;
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
llvm_unreachable("mangling a placeholder type");
case BuiltinType::ObjCId: Out << "11objc_object"; break;
case BuiltinType::ObjCClass: Out << "10objc_class"; break;
case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
case BuiltinType::OCLImage1d: Out << "11ocl_image1d"; break;
case BuiltinType::OCLImage1dArray: Out << "16ocl_image1darray"; break;
case BuiltinType::OCLImage1dBuffer: Out << "17ocl_image1dbuffer"; break;
case BuiltinType::OCLImage2d: Out << "11ocl_image2d"; break;
case BuiltinType::OCLImage2dArray: Out << "16ocl_image2darray"; break;
case BuiltinType::OCLImage3d: Out << "11ocl_image3d"; break;
case BuiltinType::OCLSampler: Out << "11ocl_sampler"; break;
case BuiltinType::OCLEvent: Out << "9ocl_event"; break;
// HLSL Change starts
case BuiltinType::Min12Int: Out << "min12_int"; break;
case BuiltinType::LitInt: Out << "lit_int"; break;
case BuiltinType::LitFloat: Out << "lit_float"; break;
case BuiltinType::Min10Float: Out << "min10_float"; break;
case BuiltinType::Min16Float: Out << "min16_float"; break;
case BuiltinType::Min16Int: Out << "min16_int"; break;
case BuiltinType::Min16UInt: Out << "min16_uint"; break;
case BuiltinType::HalfFloat: Out << "half_float"; break;
case BuiltinType::Int8_4Packed: Out << "int8_t4_packed"; break;
case BuiltinType::UInt8_4Packed: Out << "uint8_t4_packed"; break;
// HLSL Change ends
}
}
// <type> ::= <function-type>
// <function-type> ::= [<CV-qualifiers>] F [Y]
// <bare-function-type> [<ref-qualifier>] E
void CXXNameMangler::mangleType(const FunctionProtoType *T) {
// Mangle CV-qualifiers, if present. These are 'this' qualifiers,
// e.g. "const" in "int (A::*)() const".
mangleQualifiers(Qualifiers::fromCVRMask(T->getTypeQuals()));
Out << 'F';
// FIXME: We don't have enough information in the AST to produce the 'Y'
// encoding for extern "C" function types.
mangleBareFunctionType(T, /*MangleReturnType=*/true);
// Mangle the ref-qualifier, if present.
mangleRefQualifier(T->getRefQualifier());
Out << 'E';
}
void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
llvm_unreachable("Can't mangle K&R function prototypes");
}
void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
bool MangleReturnType) {
// We should never be mangling something without a prototype.
const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
// Record that we're in a function type. See mangleFunctionParam
// for details on what we're trying to achieve here.
FunctionTypeDepthState saved = FunctionTypeDepth.push();
// <bare-function-type> ::= <signature type>+
if (MangleReturnType) {
FunctionTypeDepth.enterResultType();
mangleType(Proto->getReturnType());
FunctionTypeDepth.leaveResultType();
}
if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
// <builtin-type> ::= v # void
Out << 'v';
FunctionTypeDepth.pop(saved);
return;
}
for (const auto &Arg : Proto->param_types())
mangleType(Context.getASTContext().getSignatureParameterType(Arg));
FunctionTypeDepth.pop(saved);
// <builtin-type> ::= z # ellipsis
if (Proto->isVariadic())
Out << 'z';
}
// <type> ::= <class-enum-type>
// <class-enum-type> ::= <name>
void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
mangleName(T->getDecl());
}
// <type> ::= <class-enum-type>
// <class-enum-type> ::= <name>
void CXXNameMangler::mangleType(const EnumType *T) {
mangleType(static_cast<const TagType*>(T));
}
void CXXNameMangler::mangleType(const RecordType *T) {
mangleType(static_cast<const TagType*>(T));
}
void CXXNameMangler::mangleType(const TagType *T) {
mangleName(T->getDecl());
}
// <type> ::= <array-type>
// <array-type> ::= A <positive dimension number> _ <element type>
// ::= A [<dimension expression>] _ <element type>
void CXXNameMangler::mangleType(const ConstantArrayType *T) {
Out << 'A' << T->getSize() << '_';
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const VariableArrayType *T) {
Out << 'A';
// decayed vla types (size 0) will just be skipped.
if (T->getSizeExpr())
mangleExpression(T->getSizeExpr());
Out << '_';
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
Out << 'A';
mangleExpression(T->getSizeExpr());
Out << '_';
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
Out << "A_";
mangleType(T->getElementType());
}
// <type> ::= <pointer-to-member-type>
// <pointer-to-member-type> ::= M <class type> <member type>
void CXXNameMangler::mangleType(const MemberPointerType *T) {
Out << 'M';
mangleType(QualType(T->getClass(), 0));
QualType PointeeType = T->getPointeeType();
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
mangleType(FPT);
// Itanium C++ ABI 5.1.8:
//
// The type of a non-static member function is considered to be different,
// for the purposes of substitution, from the type of a namespace-scope or
// static member function whose type appears similar. The types of two
// non-static member functions are considered to be different, for the
// purposes of substitution, if the functions are members of different
// classes. In other words, for the purposes of substitution, the class of
// which the function is a member is considered part of the type of
// function.
// Given that we already substitute member function pointers as a
// whole, the net effect of this rule is just to unconditionally
// suppress substitution on the function type in a member pointer.
// We increment the SeqID here to emulate adding an entry to the
// substitution table.
++SeqID;
} else
mangleType(PointeeType);
}
// <type> ::= <template-param>
void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
mangleTemplateParameter(T->getIndex());
}
// <type> ::= <template-param>
void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) {
// FIXME: not clear how to mangle this!
// template <class T...> class A {
// template <class U...> void foo(T(*)(U) x...);
// };
Out << "_SUBSTPACK_";
}
// <type> ::= P <type> # pointer-to
void CXXNameMangler::mangleType(const PointerType *T) {
Out << 'P';
mangleType(T->getPointeeType());
}
void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
Out << 'P';
mangleType(T->getPointeeType());
}
// <type> ::= R <type> # reference-to
void CXXNameMangler::mangleType(const LValueReferenceType *T) {
Out << 'R';
mangleType(T->getPointeeType());
}
// <type> ::= O <type> # rvalue reference-to (C++0x)
void CXXNameMangler::mangleType(const RValueReferenceType *T) {
Out << 'O';
mangleType(T->getPointeeType());
}
// <type> ::= C <type> # complex pair (C 2000)
void CXXNameMangler::mangleType(const ComplexType *T) {
Out << 'C';
mangleType(T->getElementType());
}
// ARM's ABI for Neon vector types specifies that they should be mangled as
// if they are structs (to match ARM's initial implementation). The
// vector type must be one of the special types predefined by ARM.
void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
QualType EltType = T->getElementType();
assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
const char *EltName = nullptr;
if (T->getVectorKind() == VectorType::NeonPolyVector) {
switch (cast<BuiltinType>(EltType)->getKind()) {
case BuiltinType::SChar:
case BuiltinType::UChar:
EltName = "poly8_t";
break;
case BuiltinType::Short:
case BuiltinType::UShort:
EltName = "poly16_t";
break;
case BuiltinType::ULongLong:
EltName = "poly64_t";
break;
default: llvm_unreachable("unexpected Neon polynomial vector element type");
}
} else {
switch (cast<BuiltinType>(EltType)->getKind()) {
case BuiltinType::SChar: EltName = "int8_t"; break;
case BuiltinType::UChar: EltName = "uint8_t"; break;
case BuiltinType::Short: EltName = "int16_t"; break;
case BuiltinType::UShort: EltName = "uint16_t"; break;
case BuiltinType::Int: EltName = "int32_t"; break;
case BuiltinType::UInt: EltName = "uint32_t"; break;
case BuiltinType::LongLong: EltName = "int64_t"; break;
case BuiltinType::ULongLong: EltName = "uint64_t"; break;
case BuiltinType::Double: EltName = "float64_t"; break;
case BuiltinType::Float: EltName = "float32_t"; break;
case BuiltinType::Half: EltName = "float16_t";break;
default:
llvm_unreachable("unexpected Neon vector element type");
}
}
const char *BaseName = nullptr;
unsigned BitSize = (T->getNumElements() *
getASTContext().getTypeSize(EltType));
if (BitSize == 64)
BaseName = "__simd64_";
else {
assert(BitSize == 128 && "Neon vector type not 64 or 128 bits");
BaseName = "__simd128_";
}
Out << strlen(BaseName) + strlen(EltName);
Out << BaseName << EltName;
}
static StringRef mangleAArch64VectorBase(const BuiltinType *EltType) {
switch (EltType->getKind()) {
case BuiltinType::SChar:
return "Int8";
case BuiltinType::Short:
return "Int16";
case BuiltinType::Int:
return "Int32";
case BuiltinType::Long:
case BuiltinType::LongLong:
return "Int64";
case BuiltinType::UChar:
return "Uint8";
case BuiltinType::UShort:
return "Uint16";
case BuiltinType::UInt:
return "Uint32";
case BuiltinType::ULong:
case BuiltinType::ULongLong:
return "Uint64";
case BuiltinType::Half:
return "Float16";
case BuiltinType::Float:
return "Float32";
case BuiltinType::Double:
return "Float64";
default:
llvm_unreachable("Unexpected vector element base type");
}
}
// AArch64's ABI for Neon vector types specifies that they should be mangled as
// the equivalent internal name. The vector type must be one of the special
// types predefined by ARM.
void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) {
QualType EltType = T->getElementType();
assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
unsigned BitSize =
(T->getNumElements() * getASTContext().getTypeSize(EltType));
(void)BitSize; // Silence warning.
assert((BitSize == 64 || BitSize == 128) &&
"Neon vector type not 64 or 128 bits");
StringRef EltName;
if (T->getVectorKind() == VectorType::NeonPolyVector) {
switch (cast<BuiltinType>(EltType)->getKind()) {
case BuiltinType::UChar:
EltName = "Poly8";
break;
case BuiltinType::UShort:
EltName = "Poly16";
break;
case BuiltinType::ULong:
case BuiltinType::ULongLong:
EltName = "Poly64";
break;
default:
llvm_unreachable("unexpected Neon polynomial vector element type");
}
} else
EltName = mangleAArch64VectorBase(cast<BuiltinType>(EltType));
std::string TypeName =
("__" + EltName + "x" + llvm::utostr(T->getNumElements()) + "_t").str();
Out << TypeName.length() << TypeName;
}
// GNU extension: vector types
// <type> ::= <vector-type>
// <vector-type> ::= Dv <positive dimension number> _
// <extended element type>
// ::= Dv [<dimension expression>] _ <element type>
// <extended element type> ::= <element type>
// ::= p # AltiVec vector pixel
// ::= b # Altivec vector bool
void CXXNameMangler::mangleType(const VectorType *T) {
if ((T->getVectorKind() == VectorType::NeonVector ||
T->getVectorKind() == VectorType::NeonPolyVector)) {
llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
llvm::Triple::ArchType Arch =
getASTContext().getTargetInfo().getTriple().getArch();
if ((Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_be) && !Target.isOSDarwin())
mangleAArch64NeonVectorType(T);
else
mangleNeonVectorType(T);
return;
}
Out << "Dv" << T->getNumElements() << '_';
if (T->getVectorKind() == VectorType::AltiVecPixel)
Out << 'p';
else if (T->getVectorKind() == VectorType::AltiVecBool)
Out << 'b';
else
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const ExtVectorType *T) {
mangleType(static_cast<const VectorType*>(T));
}
void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
Out << "Dv";
mangleExpression(T->getSizeExpr());
Out << '_';
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const PackExpansionType *T) {
// <type> ::= Dp <type> # pack expansion (C++0x)
Out << "Dp";
mangleType(T->getPattern());
}
void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
mangleSourceName(T->getDecl()->getIdentifier());
}
void CXXNameMangler::mangleType(const ObjCObjectType *T) {
// Treat __kindof as a vendor extended type qualifier.
if (T->isKindOfType())
Out << "U8__kindof";
if (!T->qual_empty()) {
// Mangle protocol qualifiers.
SmallString<64> QualStr;
llvm::raw_svector_ostream QualOS(QualStr);
QualOS << "objcproto";
for (const auto *I : T->quals()) {
StringRef name = I->getName();
QualOS << name.size() << name;
}
QualOS.flush();
Out << 'U' << QualStr.size() << QualStr;
}
mangleType(T->getBaseType());
if (T->isSpecialized()) {
// Mangle type arguments as I <type>+ E
Out << 'I';
for (auto typeArg : T->getTypeArgs())
mangleType(typeArg);
Out << 'E';
}
}
void CXXNameMangler::mangleType(const BlockPointerType *T) {
Out << "U13block_pointer";
mangleType(T->getPointeeType());
}
void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
// Mangle injected class name types as if the user had written the
// specialization out fully. It may not actually be possible to see
// this mangling, though.
mangleType(T->getInjectedSpecializationType());
}
void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
mangleName(TD, T->getArgs(), T->getNumArgs());
} else {
if (mangleSubstitution(QualType(T, 0)))
return;
mangleTemplatePrefix(T->getTemplateName());
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
mangleTemplateArgs(T->getArgs(), T->getNumArgs());
addSubstitution(QualType(T, 0));
}
}
void CXXNameMangler::mangleType(const DependentNameType *T) {
// Proposal by cxx-abi-dev, 2014-03-26
// <class-enum-type> ::= <name> # non-dependent or dependent type name or
// # dependent elaborated type specifier using
// # 'typename'
// ::= Ts <name> # dependent elaborated type specifier using
// # 'struct' or 'class'
// ::= Tu <name> # dependent elaborated type specifier using
// # 'union'
// ::= Te <name> # dependent elaborated type specifier using
// # 'enum'
switch (T->getKeyword()) {
case ETK_Typename:
break;
case ETK_Struct:
case ETK_Class:
case ETK_Interface:
Out << "Ts";
break;
case ETK_Union:
Out << "Tu";
break;
case ETK_Enum:
Out << "Te";
break;
default:
llvm_unreachable("unexpected keyword for dependent type name");
}
// Typename types are always nested
Out << 'N';
manglePrefix(T->getQualifier());
mangleSourceName(T->getIdentifier());
Out << 'E';
}
void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
// Dependently-scoped template types are nested if they have a prefix.
Out << 'N';
// TODO: avoid making this TemplateName.
TemplateName Prefix =
getASTContext().getDependentTemplateName(T->getQualifier(),
T->getIdentifier());
mangleTemplatePrefix(Prefix);
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
mangleTemplateArgs(T->getArgs(), T->getNumArgs());
Out << 'E';
}
void CXXNameMangler::mangleType(const TypeOfType *T) {
// FIXME: this is pretty unsatisfactory, but there isn't an obvious
// "extension with parameters" mangling.
Out << "u6typeof";
}
void CXXNameMangler::mangleType(const TypeOfExprType *T) {
// FIXME: this is pretty unsatisfactory, but there isn't an obvious
// "extension with parameters" mangling.
Out << "u6typeof";
}
void CXXNameMangler::mangleType(const DecltypeType *T) {
Expr *E = T->getUnderlyingExpr();
// type ::= Dt <expression> E # decltype of an id-expression
// # or class member access
// ::= DT <expression> E # decltype of an expression
// This purports to be an exhaustive list of id-expressions and
// class member accesses. Note that we do not ignore parentheses;
// parentheses change the semantics of decltype for these
// expressions (and cause the mangler to use the other form).
if (isa<DeclRefExpr>(E) ||
isa<MemberExpr>(E) ||
isa<UnresolvedLookupExpr>(E) ||
isa<DependentScopeDeclRefExpr>(E) ||
isa<CXXDependentScopeMemberExpr>(E) ||
isa<UnresolvedMemberExpr>(E))
Out << "Dt";
else
Out << "DT";
mangleExpression(E);
Out << 'E';
}
void CXXNameMangler::mangleType(const UnaryTransformType *T) {
// If this is dependent, we need to record that. If not, we simply
// mangle it as the underlying type since they are equivalent.
if (T->isDependentType()) {
Out << 'U';
switch (T->getUTTKind()) {
case UnaryTransformType::EnumUnderlyingType:
Out << "3eut";
break;
}
}
mangleType(T->getUnderlyingType());
}
void CXXNameMangler::mangleType(const AutoType *T) {
QualType D = T->getDeducedType();
// <builtin-type> ::= Da # dependent auto
if (D.isNull())
Out << (T->isDecltypeAuto() ? "Dc" : "Da");
else
mangleType(D);
}
void CXXNameMangler::mangleType(const AtomicType *T) {
// <type> ::= U <source-name> <type> # vendor extended type qualifier
// (Until there's a standardized mangling...)
Out << "U7_Atomic";
mangleType(T->getValueType());
}
void CXXNameMangler::mangleIntegerLiteral(QualType T,
const llvm::APSInt &Value) {
// <expr-primary> ::= L <type> <value number> E # integer literal
Out << 'L';
mangleType(T);
if (T->isBooleanType()) {
// Boolean values are encoded as 0/1.
Out << (Value.getBoolValue() ? '1' : '0');
} else {
mangleNumber(Value);
}
Out << 'E';
}
void CXXNameMangler::mangleMemberExprBase(const Expr *Base, bool IsArrow) {
// Ignore member expressions involving anonymous unions.
while (const auto *RT = Base->getType()->getAs<RecordType>()) {
if (!RT->getDecl()->isAnonymousStructOrUnion())
break;
const auto *ME = dyn_cast<MemberExpr>(Base);
if (!ME)
break;
Base = ME->getBase();
IsArrow = ME->isArrow();
}
if (Base->isImplicitCXXThis()) {
// Note: GCC mangles member expressions to the implicit 'this' as
// *this., whereas we represent them as this->. The Itanium C++ ABI
// does not specify anything here, so we follow GCC.
Out << "dtdefpT";
} else {
Out << (IsArrow ? "pt" : "dt");
mangleExpression(Base);
}
}
/// Mangles a member expression.
void CXXNameMangler::mangleMemberExpr(const Expr *base,
bool isArrow,
NestedNameSpecifier *qualifier,
NamedDecl *firstQualifierLookup,
DeclarationName member,
unsigned arity) {
// <expression> ::= dt <expression> <unresolved-name>
// ::= pt <expression> <unresolved-name>
if (base)
mangleMemberExprBase(base, isArrow);
mangleUnresolvedName(qualifier, member, arity);
}
/// Look at the callee of the given call expression and determine if
/// it's a parenthesized id-expression which would have triggered ADL
/// otherwise.
static bool isParenthesizedADLCallee(const CallExpr *call) {
const Expr *callee = call->getCallee();
const Expr *fn = callee->IgnoreParens();
// Must be parenthesized. IgnoreParens() skips __extension__ nodes,
// too, but for those to appear in the callee, it would have to be
// parenthesized.
if (callee == fn) return false;
// Must be an unresolved lookup.
const UnresolvedLookupExpr *lookup = dyn_cast<UnresolvedLookupExpr>(fn);
if (!lookup) return false;
assert(!lookup->requiresADL());
// Must be an unqualified lookup.
if (lookup->getQualifier()) return false;
// Must not have found a class member. Note that if one is a class
// member, they're all class members.
if (lookup->getNumDecls() > 0 &&
(*lookup->decls_begin())->isCXXClassMember())
return false;
// Otherwise, ADL would have been triggered.
return true;
}
void CXXNameMangler::mangleCastExpression(const Expr *E, StringRef CastEncoding) {
const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
Out << CastEncoding;
mangleType(ECE->getType());
mangleExpression(ECE->getSubExpr());
}
void CXXNameMangler::mangleInitListElements(const InitListExpr *InitList) {
if (auto *Syntactic = InitList->getSyntacticForm())
InitList = Syntactic;
for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
mangleExpression(InitList->getInit(i));
}
void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
// <expression> ::= <unary operator-name> <expression>
// ::= <binary operator-name> <expression> <expression>
// ::= <trinary operator-name> <expression> <expression> <expression>
// ::= cv <type> expression # conversion with one argument
// ::= cv <type> _ <expression>* E # conversion with a different number of arguments
// ::= dc <type> <expression> # dynamic_cast<type> (expression)
// ::= sc <type> <expression> # static_cast<type> (expression)
// ::= cc <type> <expression> # const_cast<type> (expression)
// ::= rc <type> <expression> # reinterpret_cast<type> (expression)
// ::= st <type> # sizeof (a type)
// ::= at <type> # alignof (a type)
// ::= <template-param>
// ::= <function-param>
// ::= sr <type> <unqualified-name> # dependent name
// ::= sr <type> <unqualified-name> <template-args> # dependent template-id
// ::= ds <expression> <expression> # expr.*expr
// ::= sZ <template-param> # size of a parameter pack
// ::= sZ <function-param> # size of a function parameter pack
// ::= <expr-primary>
// <expr-primary> ::= L <type> <value number> E # integer literal
// ::= L <type <value float> E # floating literal
// ::= L <mangled-name> E # external name
// ::= fpT # 'this' expression
QualType ImplicitlyConvertedToType;
recurse:
switch (E->getStmtClass()) {
case Expr::NoStmtClass:
#define ABSTRACT_STMT(Type)
#define EXPR(Type, Base)
#define STMT(Type, Base) \
case Expr::Type##Class:
#include "clang/AST/StmtNodes.inc"
// fallthrough
// These all can only appear in local or variable-initialization
// contexts and so should never appear in a mangling.
case Expr::AddrLabelExprClass:
case Expr::DesignatedInitUpdateExprClass:
case Expr::ImplicitValueInitExprClass:
case Expr::NoInitExprClass:
case Expr::ParenListExprClass:
case Expr::LambdaExprClass:
case Expr::MSPropertyRefExprClass:
case Expr::TypoExprClass: // This should no longer exist in the AST by now.
llvm_unreachable("unexpected statement kind");
// FIXME: invent manglings for all these.
case Expr::BlockExprClass:
case Expr::ChooseExprClass:
case Expr::CompoundLiteralExprClass:
case Expr::DesignatedInitExprClass:
case Expr::ExtVectorElementExprClass:
case Expr::ExtMatrixElementExprClass: // HLSL Change
case Expr::HLSLVectorElementExprClass: // HLSL Change
case Expr::GenericSelectionExprClass:
case Expr::ObjCEncodeExprClass:
case Expr::ObjCIsaExprClass:
case Expr::ObjCIvarRefExprClass:
case Expr::ObjCMessageExprClass:
case Expr::ObjCPropertyRefExprClass:
case Expr::ObjCProtocolExprClass:
case Expr::ObjCSelectorExprClass:
case Expr::ObjCStringLiteralClass:
case Expr::ObjCBoxedExprClass:
case Expr::ObjCArrayLiteralClass:
case Expr::ObjCDictionaryLiteralClass:
case Expr::ObjCSubscriptRefExprClass:
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::OffsetOfExprClass:
case Expr::PredefinedExprClass:
case Expr::ShuffleVectorExprClass:
case Expr::ConvertVectorExprClass:
case Expr::StmtExprClass:
case Expr::TypeTraitExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::VAArgExprClass:
case Expr::CUDAKernelCallExprClass:
case Expr::AsTypeExprClass:
case Expr::PseudoObjectExprClass:
case Expr::AtomicExprClass:
{
// As bad as this diagnostic is, it's better than crashing.
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot yet mangle expression type %0");
Diags.Report(E->getExprLoc(), DiagID)
<< E->getStmtClassName() << E->getSourceRange();
break;
}
case Expr::CXXUuidofExprClass: {
const CXXUuidofExpr *UE = cast<CXXUuidofExpr>(E);
if (UE->isTypeOperand()) {
QualType UuidT = UE->getTypeOperand(Context.getASTContext());
Out << "u8__uuidoft";
mangleType(UuidT);
} else {
Expr *UuidExp = UE->getExprOperand();
Out << "u8__uuidofz";
mangleExpression(UuidExp, Arity);
}
break;
}
// Even gcc-4.5 doesn't mangle this.
case Expr::BinaryConditionalOperatorClass: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"?: operator with omitted middle operand cannot be mangled");
Diags.Report(E->getExprLoc(), DiagID)
<< E->getStmtClassName() << E->getSourceRange();
break;
}
// These are used for internal purposes and cannot be meaningfully mangled.
case Expr::OpaqueValueExprClass:
llvm_unreachable("cannot mangle opaque value; mangling wrong thing?");
case Expr::InitListExprClass: {
Out << "il";
mangleInitListElements(cast<InitListExpr>(E));
Out << "E";
break;
}
case Expr::CXXDefaultArgExprClass:
mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
break;
case Expr::CXXDefaultInitExprClass:
mangleExpression(cast<CXXDefaultInitExpr>(E)->getExpr(), Arity);
break;
case Expr::CXXStdInitializerListExprClass:
mangleExpression(cast<CXXStdInitializerListExpr>(E)->getSubExpr(), Arity);
break;
case Expr::SubstNonTypeTemplateParmExprClass:
mangleExpression(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
Arity);
break;
case Expr::UserDefinedLiteralClass:
// We follow g++'s approach of mangling a UDL as a call to the literal
// operator.
case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
const CallExpr *CE = cast<CallExpr>(E);
// <expression> ::= cp <simple-id> <expression>* E
// We use this mangling only when the call would use ADL except
// for being parenthesized. Per discussion with David
// Vandervoorde, 2011.04.25.
if (isParenthesizedADLCallee(CE)) {
Out << "cp";
// The callee here is a parenthesized UnresolvedLookupExpr with
// no qualifier and should always get mangled as a <simple-id>
// anyway.
// <expression> ::= cl <expression>* E
} else {
Out << "cl";
}
unsigned CallArity = CE->getNumArgs();
for (const Expr *Arg : CE->arguments())
if (isa<PackExpansionExpr>(Arg))
CallArity = UnknownArity;
mangleExpression(CE->getCallee(), CallArity);
for (const Expr *Arg : CE->arguments())
mangleExpression(Arg);
Out << 'E';
break;
}
case Expr::CXXNewExprClass: {
const CXXNewExpr *New = cast<CXXNewExpr>(E);
if (New->isGlobalNew()) Out << "gs";
Out << (New->isArray() ? "na" : "nw");
for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
E = New->placement_arg_end(); I != E; ++I)
mangleExpression(*I);
Out << '_';
mangleType(New->getAllocatedType());
if (New->hasInitializer()) {
if (New->getInitializationStyle() == CXXNewExpr::ListInit)
Out << "il";
else
Out << "pi";
const Expr *Init = New->getInitializer();
if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
// Directly inline the initializers.
for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
E = CCE->arg_end();
I != E; ++I)
mangleExpression(*I);
} else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) {
for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i)
mangleExpression(PLE->getExpr(i));
} else if (New->getInitializationStyle() == CXXNewExpr::ListInit &&
isa<InitListExpr>(Init)) {
// Only take InitListExprs apart for list-initialization.
mangleInitListElements(cast<InitListExpr>(Init));
} else
mangleExpression(Init);
}
Out << 'E';
break;
}
case Expr::CXXPseudoDestructorExprClass: {
const auto *PDE = cast<CXXPseudoDestructorExpr>(E);
if (const Expr *Base = PDE->getBase())
mangleMemberExprBase(Base, PDE->isArrow());
NestedNameSpecifier *Qualifier = PDE->getQualifier();
QualType ScopeType;
if (TypeSourceInfo *ScopeInfo = PDE->getScopeTypeInfo()) {
if (Qualifier) {
mangleUnresolvedPrefix(Qualifier,
/*Recursive=*/true);
mangleUnresolvedTypeOrSimpleId(ScopeInfo->getType());
Out << 'E';
} else {
Out << "sr";
if (!mangleUnresolvedTypeOrSimpleId(ScopeInfo->getType()))
Out << 'E';
}
} else if (Qualifier) {
mangleUnresolvedPrefix(Qualifier);
}
// <base-unresolved-name> ::= dn <destructor-name>
Out << "dn";
QualType DestroyedType = PDE->getDestroyedType();
mangleUnresolvedTypeOrSimpleId(DestroyedType);
break;
}
case Expr::MemberExprClass: {
const MemberExpr *ME = cast<MemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
ME->getQualifier(), nullptr,
ME->getMemberDecl()->getDeclName(), Arity);
break;
}
case Expr::UnresolvedMemberExprClass: {
const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
ME->isArrow(), ME->getQualifier(), nullptr,
ME->getMemberName(), Arity);
if (ME->hasExplicitTemplateArgs())
mangleTemplateArgs(ME->getExplicitTemplateArgs());
break;
}
case Expr::CXXDependentScopeMemberExprClass: {
const CXXDependentScopeMemberExpr *ME
= cast<CXXDependentScopeMemberExpr>(E);
mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
ME->isArrow(), ME->getQualifier(),
ME->getFirstQualifierFoundInScope(),
ME->getMember(), Arity);
if (ME->hasExplicitTemplateArgs())
mangleTemplateArgs(ME->getExplicitTemplateArgs());
break;
}
case Expr::UnresolvedLookupExprClass: {
const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), Arity);
// All the <unresolved-name> productions end in a
// base-unresolved-name, where <template-args> are just tacked
// onto the end.
if (ULE->hasExplicitTemplateArgs())
mangleTemplateArgs(ULE->getExplicitTemplateArgs());
break;
}
case Expr::CXXUnresolvedConstructExprClass: {
const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
unsigned N = CE->arg_size();
Out << "cv";
mangleType(CE->getType());
if (N != 1) Out << '_';
for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
if (N != 1) Out << 'E';
break;
}
case Expr::CXXConstructExprClass: {
const auto *CE = cast<CXXConstructExpr>(E);
if (!CE->isListInitialization() || CE->isStdInitListInitialization()) {
assert(
CE->getNumArgs() >= 1 &&
(CE->getNumArgs() == 1 || isa<CXXDefaultArgExpr>(CE->getArg(1))) &&
"implicit CXXConstructExpr must have one argument");
return mangleExpression(cast<CXXConstructExpr>(E)->getArg(0));
}
Out << "il";
for (auto *E : CE->arguments())
mangleExpression(E);
Out << "E";
break;
}
case Expr::CXXTemporaryObjectExprClass: {
const auto *CE = cast<CXXTemporaryObjectExpr>(E);
unsigned N = CE->getNumArgs();
bool List = CE->isListInitialization();
if (List)
Out << "tl";
else
Out << "cv";
mangleType(CE->getType());
if (!List && N != 1)
Out << '_';
if (CE->isStdInitListInitialization()) {
// We implicitly created a std::initializer_list<T> for the first argument
// of a constructor of type U in an expression of the form U{a, b, c}.
// Strip all the semantic gunk off the initializer list.
auto *SILE =
cast<CXXStdInitializerListExpr>(CE->getArg(0)->IgnoreImplicit());
auto *ILE = cast<InitListExpr>(SILE->getSubExpr()->IgnoreImplicit());
mangleInitListElements(ILE);
} else {
for (auto *E : CE->arguments())
mangleExpression(E);
}
if (List || N != 1)
Out << 'E';
break;
}
case Expr::CXXScalarValueInitExprClass:
Out << "cv";
mangleType(E->getType());
Out << "_E";
break;
case Expr::CXXNoexceptExprClass:
Out << "nx";
mangleExpression(cast<CXXNoexceptExpr>(E)->getOperand());
break;
case Expr::UnaryExprOrTypeTraitExprClass: {
const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
if (!SAE->isInstantiationDependent()) {
// Itanium C++ ABI:
// If the operand of a sizeof or alignof operator is not
// instantiation-dependent it is encoded as an integer literal
// reflecting the result of the operator.
//
// If the result of the operator is implicitly converted to a known
// integer type, that type is used for the literal; otherwise, the type
// of std::size_t or std::ptrdiff_t is used.
QualType T = (ImplicitlyConvertedToType.isNull() ||
!ImplicitlyConvertedToType->isIntegerType())? SAE->getType()
: ImplicitlyConvertedToType;
llvm::APSInt V = SAE->EvaluateKnownConstInt(Context.getASTContext());
mangleIntegerLiteral(T, V);
break;
}
switch(SAE->getKind()) {
case UETT_SizeOf:
Out << 's';
break;
case UETT_AlignOf:
Out << 'a';
break;
case UETT_VecStep: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"cannot yet mangle vec_step expression");
Diags.Report(DiagID);
return;
}
case UETT_OpenMPRequiredSimdAlign: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"cannot yet mangle __builtin_omp_required_simd_align expression");
Diags.Report(DiagID);
return;
}
// HLSL Change Begins
case UETT_ArrayLength: {
DiagnosticsEngine & Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"cannot yet mangle .Length expression");
Diags.Report(DiagID);
return;
}
// HLSL Change Begins
}
if (SAE->isArgumentType()) {
Out << 't';
mangleType(SAE->getArgumentType());
} else {
Out << 'z';
mangleExpression(SAE->getArgumentExpr());
}
break;
}
case Expr::CXXThrowExprClass: {
const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
// <expression> ::= tw <expression> # throw expression
// ::= tr # rethrow
if (TE->getSubExpr()) {
Out << "tw";
mangleExpression(TE->getSubExpr());
} else {
Out << "tr";
}
break;
}
case Expr::CXXTypeidExprClass: {
const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
// <expression> ::= ti <type> # typeid (type)
// ::= te <expression> # typeid (expression)
if (TIE->isTypeOperand()) {
Out << "ti";
mangleType(TIE->getTypeOperand(Context.getASTContext()));
} else {
Out << "te";
mangleExpression(TIE->getExprOperand());
}
break;
}
case Expr::CXXDeleteExprClass: {
const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
// <expression> ::= [gs] dl <expression> # [::] delete expr
// ::= [gs] da <expression> # [::] delete [] expr
if (DE->isGlobalDelete()) Out << "gs";
Out << (DE->isArrayForm() ? "da" : "dl");
mangleExpression(DE->getArgument());
break;
}
case Expr::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(E);
mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
/*Arity=*/1);
mangleExpression(UO->getSubExpr());
break;
}
case Expr::ArraySubscriptExprClass: {
const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
// Array subscript is treated as a syntactically weird form of
// binary operator.
Out << "ix";
mangleExpression(AE->getLHS());
mangleExpression(AE->getRHS());
break;
}
case Expr::CompoundAssignOperatorClass: // fallthrough
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(E);
if (BO->getOpcode() == BO_PtrMemD)
Out << "ds";
else
mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
/*Arity=*/2);
mangleExpression(BO->getLHS());
mangleExpression(BO->getRHS());
break;
}
case Expr::ConditionalOperatorClass: {
const ConditionalOperator *CO = cast<ConditionalOperator>(E);
mangleOperatorName(OO_Conditional, /*Arity=*/3);
mangleExpression(CO->getCond());
mangleExpression(CO->getLHS(), Arity);
mangleExpression(CO->getRHS(), Arity);
break;
}
case Expr::ImplicitCastExprClass: {
ImplicitlyConvertedToType = E->getType();
E = cast<ImplicitCastExpr>(E)->getSubExpr();
goto recurse;
}
case Expr::ObjCBridgedCastExprClass: {
// Mangle ownership casts as a vendor extended operator __bridge,
// __bridge_transfer, or __bridge_retain.
StringRef Kind = cast<ObjCBridgedCastExpr>(E)->getBridgeKindName();
Out << "v1U" << Kind.size() << Kind;
}
// Fall through to mangle the cast itself.
LLVM_FALLTHROUGH; // HLSL Change
case Expr::CStyleCastExprClass:
mangleCastExpression(E, "cv");
break;
case Expr::CXXFunctionalCastExprClass: {
auto *Sub = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreImplicit();
// FIXME: Add isImplicit to CXXConstructExpr.
if (auto *CCE = dyn_cast<CXXConstructExpr>(Sub))
if (CCE->getParenOrBraceRange().isInvalid())
Sub = CCE->getArg(0)->IgnoreImplicit();
if (auto *StdInitList = dyn_cast<CXXStdInitializerListExpr>(Sub))
Sub = StdInitList->getSubExpr()->IgnoreImplicit();
if (auto *IL = dyn_cast<InitListExpr>(Sub)) {
Out << "tl";
mangleType(E->getType());
mangleInitListElements(IL);
Out << "E";
} else {
mangleCastExpression(E, "cv");
}
break;
}
case Expr::CXXStaticCastExprClass:
mangleCastExpression(E, "sc");
break;
case Expr::CXXDynamicCastExprClass:
mangleCastExpression(E, "dc");
break;
case Expr::CXXReinterpretCastExprClass:
mangleCastExpression(E, "rc");
break;
case Expr::CXXConstCastExprClass:
mangleCastExpression(E, "cc");
break;
case Expr::CXXOperatorCallExprClass: {
const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
unsigned NumArgs = CE->getNumArgs();
mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
// Mangle the arguments.
for (unsigned i = 0; i != NumArgs; ++i)
mangleExpression(CE->getArg(i));
break;
}
case Expr::ParenExprClass:
mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
break;
case Expr::DeclRefExprClass: {
const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
switch (D->getKind()) {
default:
// <expr-primary> ::= L <mangled-name> E # external name
Out << 'L';
mangle(D);
Out << 'E';
break;
case Decl::ParmVar:
mangleFunctionParam(cast<ParmVarDecl>(D));
break;
case Decl::EnumConstant: {
const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
mangleIntegerLiteral(ED->getType(), ED->getInitVal());
break;
}
case Decl::NonTypeTemplateParm: {
const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
mangleTemplateParameter(PD->getIndex());
break;
}
}
break;
}
case Expr::SubstNonTypeTemplateParmPackExprClass:
// FIXME: not clear how to mangle this!
// template <unsigned N...> class A {
// template <class U...> void foo(U (&x)[N]...);
// };
Out << "_SUBSTPACK_";
break;
case Expr::FunctionParmPackExprClass: {
// FIXME: not clear how to mangle this!
const FunctionParmPackExpr *FPPE = cast<FunctionParmPackExpr>(E);
Out << "v110_SUBSTPACK";
mangleFunctionParam(FPPE->getParameterPack());
break;
}
case Expr::DependentScopeDeclRefExprClass: {
const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
mangleUnresolvedName(DRE->getQualifier(), DRE->getDeclName(), Arity);
// All the <unresolved-name> productions end in a
// base-unresolved-name, where <template-args> are just tacked
// onto the end.
if (DRE->hasExplicitTemplateArgs())
mangleTemplateArgs(DRE->getExplicitTemplateArgs());
break;
}
case Expr::CXXBindTemporaryExprClass:
mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
break;
case Expr::ExprWithCleanupsClass:
mangleExpression(cast<ExprWithCleanups>(E)->getSubExpr(), Arity);
break;
case Expr::FloatingLiteralClass: {
const FloatingLiteral *FL = cast<FloatingLiteral>(E);
Out << 'L';
mangleType(FL->getType());
mangleFloat(FL->getValue());
Out << 'E';
break;
}
case Expr::CharacterLiteralClass:
Out << 'L';
mangleType(E->getType());
Out << cast<CharacterLiteral>(E)->getValue();
Out << 'E';
break;
// FIXME. __objc_yes/__objc_no are mangled same as true/false
case Expr::ObjCBoolLiteralExprClass:
Out << "Lb";
Out << (cast<ObjCBoolLiteralExpr>(E)->getValue() ? '1' : '0');
Out << 'E';
break;
case Expr::CXXBoolLiteralExprClass:
Out << "Lb";
Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
Out << 'E';
break;
case Expr::IntegerLiteralClass: {
llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
if (E->getType()->isSignedIntegerType())
Value.setIsSigned(true);
mangleIntegerLiteral(E->getType(), Value);
break;
}
case Expr::ImaginaryLiteralClass: {
const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
// Mangle as if a complex literal.
// Proposal from David Vandevoorde, 2010.06.30.
Out << 'L';
mangleType(E->getType());
if (const FloatingLiteral *Imag =
dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
// Mangle a floating-point zero of the appropriate type.
mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
Out << '_';
mangleFloat(Imag->getValue());
} else {
Out << "0_";
llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
if (IE->getSubExpr()->getType()->isSignedIntegerType())
Value.setIsSigned(true);
mangleNumber(Value);
}
Out << 'E';
break;
}
case Expr::StringLiteralClass: {
// Revised proposal from David Vandervoorde, 2010.07.15.
Out << 'L';
assert(isa<ConstantArrayType>(E->getType()));
mangleType(E->getType());
Out << 'E';
break;
}
case Expr::GNUNullExprClass:
// FIXME: should this really be mangled the same as nullptr?
// fallthrough
case Expr::CXXNullPtrLiteralExprClass: {
Out << "LDnE";
break;
}
case Expr::PackExpansionExprClass:
Out << "sp";
mangleExpression(cast<PackExpansionExpr>(E)->getPattern());
break;
case Expr::SizeOfPackExprClass: {
Out << "sZ";
const NamedDecl *Pack = cast<SizeOfPackExpr>(E)->getPack();
if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Pack))
mangleTemplateParameter(TTP->getIndex());
else if (const NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(Pack))
mangleTemplateParameter(NTTP->getIndex());
else if (const TemplateTemplateParmDecl *TempTP
= dyn_cast<TemplateTemplateParmDecl>(Pack))
mangleTemplateParameter(TempTP->getIndex());
else
mangleFunctionParam(cast<ParmVarDecl>(Pack));
break;
}
case Expr::MaterializeTemporaryExprClass: {
mangleExpression(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr());
break;
}
case Expr::CXXFoldExprClass: {
auto *FE = cast<CXXFoldExpr>(E);
if (FE->isLeftFold())
Out << (FE->getInit() ? "fL" : "fl");
else
Out << (FE->getInit() ? "fR" : "fr");
if (FE->getOperator() == BO_PtrMemD)
Out << "ds";
else
mangleOperatorName(
BinaryOperator::getOverloadedOperator(FE->getOperator()),
/*Arity=*/2);
if (FE->getLHS())
mangleExpression(FE->getLHS());
if (FE->getRHS())
mangleExpression(FE->getRHS());
break;
}
case Expr::CXXThisExprClass:
Out << "fpT";
break;
}
}
/// Mangle an expression which refers to a parameter variable.
///
/// <expression> ::= <function-param>
/// <function-param> ::= fp <top-level CV-qualifiers> _ # L == 0, I == 0
/// <function-param> ::= fp <top-level CV-qualifiers>
/// <parameter-2 non-negative number> _ # L == 0, I > 0
/// <function-param> ::= fL <L-1 non-negative number>
/// p <top-level CV-qualifiers> _ # L > 0, I == 0
/// <function-param> ::= fL <L-1 non-negative number>
/// p <top-level CV-qualifiers>
/// <I-1 non-negative number> _ # L > 0, I > 0
///
/// L is the nesting depth of the parameter, defined as 1 if the
/// parameter comes from the innermost function prototype scope
/// enclosing the current context, 2 if from the next enclosing
/// function prototype scope, and so on, with one special case: if
/// we've processed the full parameter clause for the innermost
/// function type, then L is one less. This definition conveniently
/// makes it irrelevant whether a function's result type was written
/// trailing or leading, but is otherwise overly complicated; the
/// numbering was first designed without considering references to
/// parameter in locations other than return types, and then the
/// mangling had to be generalized without changing the existing
/// manglings.
///
/// I is the zero-based index of the parameter within its parameter
/// declaration clause. Note that the original ABI document describes
/// this using 1-based ordinals.
void CXXNameMangler::mangleFunctionParam(const ParmVarDecl *parm) {
unsigned parmDepth = parm->getFunctionScopeDepth();
unsigned parmIndex = parm->getFunctionScopeIndex();
// Compute 'L'.
// parmDepth does not include the declaring function prototype.
// FunctionTypeDepth does account for that.
assert(parmDepth < FunctionTypeDepth.getDepth());
unsigned nestingDepth = FunctionTypeDepth.getDepth() - parmDepth;
if (FunctionTypeDepth.isInResultType())
nestingDepth--;
if (nestingDepth == 0) {
Out << "fp";
} else {
Out << "fL" << (nestingDepth - 1) << 'p';
}
// Top-level qualifiers. We don't have to worry about arrays here,
// because parameters declared as arrays should already have been
// transformed to have pointer type. FIXME: apparently these don't
// get mangled if used as an rvalue of a known non-class type?
assert(!parm->getType()->isArrayType()
&& "parameter's type is still an array type?");
mangleQualifiers(parm->getType().getQualifiers());
// Parameter index.
if (parmIndex != 0) {
Out << (parmIndex - 1);
}
Out << '_';
}
void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
// <ctor-dtor-name> ::= C1 # complete object constructor
// ::= C2 # base object constructor
//
// In addition, C5 is a comdat name with C1 and C2 in it.
switch (T) {
case Ctor_Complete:
Out << "C1";
break;
case Ctor_Base:
Out << "C2";
break;
case Ctor_Comdat:
Out << "C5";
break;
case Ctor_DefaultClosure:
case Ctor_CopyingClosure:
llvm_unreachable("closure constructors don't exist for the Itanium ABI!");
}
}
void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
// <ctor-dtor-name> ::= D0 # deleting destructor
// ::= D1 # complete object destructor
// ::= D2 # base object destructor
//
// In addition, D5 is a comdat name with D1, D2 and, if virtual, D0 in it.
switch (T) {
case Dtor_Deleting:
Out << "D0";
break;
case Dtor_Complete:
Out << "D1";
break;
case Dtor_Base:
Out << "D2";
break;
case Dtor_Comdat:
Out << "D5";
break;
}
}
void CXXNameMangler::mangleTemplateArgs(
const ASTTemplateArgumentListInfo &TemplateArgs) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
for (unsigned i = 0, e = TemplateArgs.NumTemplateArgs; i != e; ++i)
mangleTemplateArg(TemplateArgs.getTemplateArgs()[i].getArgument());
Out << 'E';
}
void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentList &AL) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
for (unsigned i = 0, e = AL.size(); i != e; ++i)
mangleTemplateArg(AL[i]);
Out << 'E';
}
void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs) {
// <template-args> ::= I <template-arg>+ E
Out << 'I';
for (unsigned i = 0; i != NumTemplateArgs; ++i)
mangleTemplateArg(TemplateArgs[i]);
Out << 'E';
}
void CXXNameMangler::mangleTemplateArg(TemplateArgument A) {
// <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
// ::= <expr-primary> # simple expressions
// ::= J <template-arg>* E # argument pack
if (!A.isInstantiationDependent() || A.isDependent())
A = Context.getASTContext().getCanonicalTemplateArgument(A);
switch (A.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Cannot mangle NULL template argument");
case TemplateArgument::Type:
mangleType(A.getAsType());
break;
case TemplateArgument::Template:
// This is mangled as <type>.
mangleType(A.getAsTemplate());
break;
case TemplateArgument::TemplateExpansion:
// <type> ::= Dp <type> # pack expansion (C++0x)
Out << "Dp";
mangleType(A.getAsTemplateOrTemplatePattern());
break;
case TemplateArgument::Expression: {
// It's possible to end up with a DeclRefExpr here in certain
// dependent cases, in which case we should mangle as a
// declaration.
const Expr *E = A.getAsExpr()->IgnoreParens();
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
const ValueDecl *D = DRE->getDecl();
if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
Out << 'L';
mangle(D);
Out << 'E';
break;
}
}
Out << 'X';
mangleExpression(E);
Out << 'E';
break;
}
case TemplateArgument::Integral:
mangleIntegerLiteral(A.getIntegralType(), A.getAsIntegral());
break;
case TemplateArgument::Declaration: {
// <expr-primary> ::= L <mangled-name> E # external name
// Clang produces AST's where pointer-to-member-function expressions
// and pointer-to-function expressions are represented as a declaration not
// an expression. We compensate for it here to produce the correct mangling.
ValueDecl *D = A.getAsDecl();
bool compensateMangling = !A.getParamTypeForDecl()->isReferenceType();
if (compensateMangling) {
Out << 'X';
mangleOperatorName(OO_Amp, 1);
}
Out << 'L';
// References to external entities use the mangled name; if the name would
// not normally be manged then mangle it as unqualified.
mangle(D);
Out << 'E';
if (compensateMangling)
Out << 'E';
break;
}
case TemplateArgument::NullPtr: {
// <expr-primary> ::= L <type> 0 E
Out << 'L';
mangleType(A.getNullPtrType());
Out << "0E";
break;
}
case TemplateArgument::Pack: {
// <template-arg> ::= J <template-arg>* E
Out << 'J';
for (const auto &P : A.pack_elements())
mangleTemplateArg(P);
Out << 'E';
}
}
}
void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
// <template-param> ::= T_ # first template parameter
// ::= T <parameter-2 non-negative number> _
if (Index == 0)
Out << "T_";
else
Out << 'T' << (Index - 1) << '_';
}
void CXXNameMangler::mangleSeqID(unsigned SeqID) {
if (SeqID == 1)
Out << '0';
else if (SeqID > 1) {
SeqID--;
// <seq-id> is encoded in base-36, using digits and upper case letters.
char Buffer[7]; // log(2**32) / log(36) ~= 7
MutableArrayRef<char> BufferRef(Buffer);
MutableArrayRef<char>::reverse_iterator I = BufferRef.rbegin();
for (; SeqID != 0; SeqID /= 36) {
unsigned C = SeqID % 36;
*I++ = (C < 10 ? '0' + C : 'A' + C - 10);
}
Out.write(I.base(), I - BufferRef.rbegin());
}
Out << '_';
}
void CXXNameMangler::mangleExistingSubstitution(QualType type) {
bool result = mangleSubstitution(type);
assert(result && "no existing substitution for type");
(void) result;
}
void CXXNameMangler::mangleExistingSubstitution(TemplateName tname) {
bool result = mangleSubstitution(tname);
assert(result && "no existing substitution for template name");
(void) result;
}
// <substitution> ::= S <seq-id> _
// ::= S_
bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
// Try one of the standard substitutions first.
if (mangleStandardSubstitution(ND))
return true;
ND = cast<NamedDecl>(ND->getCanonicalDecl());
return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
}
/// Determine whether the given type has any qualifiers that are relevant for
/// substitutions.
static bool hasMangledSubstitutionQualifiers(QualType T) {
Qualifiers Qs = T.getQualifiers();
return Qs.getCVRQualifiers() || Qs.hasAddressSpace();
}
bool CXXNameMangler::mangleSubstitution(QualType T) {
if (!hasMangledSubstitutionQualifiers(T)) {
if (const RecordType *RT = T->getAs<RecordType>())
return mangleSubstitution(RT->getDecl());
}
uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
return mangleSubstitution(TypePtr);
}
bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
if (TemplateDecl *TD = Template.getAsTemplateDecl())
return mangleSubstitution(TD);
Template = Context.getASTContext().getCanonicalTemplateName(Template);
return mangleSubstitution(
reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
}
bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
if (I == Substitutions.end())
return false;
unsigned SeqID = I->second;
Out << 'S';
mangleSeqID(SeqID);
return true;
}
static bool isCharType(QualType T) {
if (T.isNull())
return false;
return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
T->isSpecificBuiltinType(BuiltinType::Char_U);
}
/// Returns whether a given type is a template specialization of a given name
/// with a single argument of type char.
static bool isCharSpecialization(QualType T, const char *Name) {
if (T.isNull())
return false;
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return false;
const ClassTemplateSpecializationDecl *SD =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
if (!SD)
return false;
if (!isStdNamespace(getEffectiveDeclContext(SD)))
return false;
const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
if (TemplateArgs.size() != 1)
return false;
if (!isCharType(TemplateArgs[0].getAsType()))
return false;
return SD->getIdentifier()->getName() == Name;
}
template <std::size_t StrLen>
static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD,
const char (&Str)[StrLen]) {
if (!SD->getIdentifier()->isStr(Str))
return false;
const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
if (TemplateArgs.size() != 2)
return false;
if (!isCharType(TemplateArgs[0].getAsType()))
return false;
if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
return false;
return true;
}
bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
// <substitution> ::= St # ::std::
if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
if (isStd(NS)) {
Out << "St";
return true;
}
}
if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
if (!isStdNamespace(getEffectiveDeclContext(TD)))
return false;
// <substitution> ::= Sa # ::std::allocator
if (TD->getIdentifier()->isStr("allocator")) {
Out << "Sa";
return true;
}
// <<substitution> ::= Sb # ::std::basic_string
if (TD->getIdentifier()->isStr("basic_string")) {
Out << "Sb";
return true;
}
}
if (const ClassTemplateSpecializationDecl *SD =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
if (!isStdNamespace(getEffectiveDeclContext(SD)))
return false;
// <substitution> ::= Ss # ::std::basic_string<char,
// ::std::char_traits<char>,
// ::std::allocator<char> >
if (SD->getIdentifier()->isStr("basic_string")) {
const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
if (TemplateArgs.size() != 3)
return false;
if (!isCharType(TemplateArgs[0].getAsType()))
return false;
if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
return false;
if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
return false;
Out << "Ss";
return true;
}
// <substitution> ::= Si # ::std::basic_istream<char,
// ::std::char_traits<char> >
if (isStreamCharSpecialization(SD, "basic_istream")) {
Out << "Si";
return true;
}
// <substitution> ::= So # ::std::basic_ostream<char,
// ::std::char_traits<char> >
if (isStreamCharSpecialization(SD, "basic_ostream")) {
Out << "So";
return true;
}
// <substitution> ::= Sd # ::std::basic_iostream<char,
// ::std::char_traits<char> >
if (isStreamCharSpecialization(SD, "basic_iostream")) {
Out << "Sd";
return true;
}
}
return false;
}
void CXXNameMangler::addSubstitution(QualType T) {
if (!hasMangledSubstitutionQualifiers(T)) {
if (const RecordType *RT = T->getAs<RecordType>()) {
addSubstitution(RT->getDecl());
return;
}
}
uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
addSubstitution(TypePtr);
}
void CXXNameMangler::addSubstitution(TemplateName Template) {
if (TemplateDecl *TD = Template.getAsTemplateDecl())
return addSubstitution(TD);
Template = Context.getASTContext().getCanonicalTemplateName(Template);
addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
}
void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
assert(!Substitutions.count(Ptr) && "Substitution already exists!");
Substitutions[Ptr] = SeqID++;
}
//
/// Mangles the name of the declaration D and emits that name to the given
/// output stream.
///
/// If the declaration D requires a mangled name, this routine will emit that
/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
/// and this routine will return false. In this case, the caller should just
/// emit the identifier of the declaration (\c D->getIdentifier()) as its
/// name.
void ItaniumMangleContextImpl::mangleCXXName(const NamedDecl *D,
raw_ostream &Out) {
assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
"Invalid mangleName() call, argument is not a variable or function!");
assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
"Invalid mangleName() call on 'structor decl!");
PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
getASTContext().getSourceManager(),
"Mangling declaration");
CXXNameMangler Mangler(*this, Out, D);
Mangler.mangle(D);
}
void ItaniumMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D,
CXXCtorType Type,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Type);
Mangler.mangle(D);
}
void ItaniumMangleContextImpl::mangleCXXDtor(const CXXDestructorDecl *D,
CXXDtorType Type,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Type);
Mangler.mangle(D);
}
void ItaniumMangleContextImpl::mangleCXXCtorComdat(const CXXConstructorDecl *D,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Ctor_Comdat);
Mangler.mangle(D);
}
void ItaniumMangleContextImpl::mangleCXXDtorComdat(const CXXDestructorDecl *D,
raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out, D, Dtor_Comdat);
Mangler.mangle(D);
}
void ItaniumMangleContextImpl::mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
raw_ostream &Out) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
// <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
// # base is the nominal target function of thunk
// # first call-offset is 'this' adjustment
// # second call-offset is result adjustment
assert(!isa<CXXDestructorDecl>(MD) &&
"Use mangleCXXDtor for destructor decls!");
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZT";
if (!Thunk.Return.isEmpty())
Mangler.getStream() << 'c';
// Mangle the 'this' pointer adjustment.
Mangler.mangleCallOffset(Thunk.This.NonVirtual,
Thunk.This.Virtual.Itanium.VCallOffsetOffset);
// Mangle the return pointer adjustment if there is one.
if (!Thunk.Return.isEmpty())
Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
Thunk.Return.Virtual.Itanium.VBaseOffsetOffset);
Mangler.mangleFunctionEncoding(MD);
}
void ItaniumMangleContextImpl::mangleCXXDtorThunk(
const CXXDestructorDecl *DD, CXXDtorType Type,
const ThisAdjustment &ThisAdjustment, raw_ostream &Out) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
CXXNameMangler Mangler(*this, Out, DD, Type);
Mangler.getStream() << "_ZT";
// Mangle the 'this' pointer adjustment.
Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
ThisAdjustment.Virtual.Itanium.VCallOffsetOffset);
Mangler.mangleFunctionEncoding(DD);
}
/// Returns the mangled name for a guard variable for the passed in VarDecl.
void ItaniumMangleContextImpl::mangleStaticGuardVariable(const VarDecl *D,
raw_ostream &Out) {
// <special-name> ::= GV <object name> # Guard variable for one-time
// # initialization
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZGV";
Mangler.mangleName(D);
}
void ItaniumMangleContextImpl::mangleDynamicInitializer(const VarDecl *MD,
raw_ostream &Out) {
// These symbols are internal in the Itanium ABI, so the names don't matter.
// Clang has traditionally used this symbol and allowed LLVM to adjust it to
// avoid duplicate symbols.
Out << "__cxx_global_var_init";
}
void ItaniumMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) {
// Prefix the mangling of D with __dtor_.
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "__dtor_";
if (shouldMangleDeclName(D))
Mangler.mangle(D);
else
Mangler.getStream() << D->getName();
}
void ItaniumMangleContextImpl::mangleSEHFilterExpression(
const NamedDecl *EnclosingDecl, raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "__filt_";
if (shouldMangleDeclName(EnclosingDecl))
Mangler.mangle(EnclosingDecl);
else
Mangler.getStream() << EnclosingDecl->getName();
}
void ItaniumMangleContextImpl::mangleSEHFinallyBlock(
const NamedDecl *EnclosingDecl, raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "__fin_";
if (shouldMangleDeclName(EnclosingDecl))
Mangler.mangle(EnclosingDecl);
else
Mangler.getStream() << EnclosingDecl->getName();
}
void ItaniumMangleContextImpl::mangleItaniumThreadLocalInit(const VarDecl *D,
raw_ostream &Out) {
// <special-name> ::= TH <object name>
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTH";
Mangler.mangleName(D);
}
void
ItaniumMangleContextImpl::mangleItaniumThreadLocalWrapper(const VarDecl *D,
raw_ostream &Out) {
// <special-name> ::= TW <object name>
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTW";
Mangler.mangleName(D);
}
void ItaniumMangleContextImpl::mangleReferenceTemporary(const VarDecl *D,
unsigned ManglingNumber,
raw_ostream &Out) {
// We match the GCC mangling here.
// <special-name> ::= GR <object name>
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZGR";
Mangler.mangleName(D);
assert(ManglingNumber > 0 && "Reference temporary mangling number is zero!");
Mangler.mangleSeqID(ManglingNumber - 1);
}
void ItaniumMangleContextImpl::mangleCXXVTable(const CXXRecordDecl *RD,
raw_ostream &Out) {
// <special-name> ::= TV <type> # virtual table
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTV";
Mangler.mangleNameOrStandardSubstitution(RD);
}
void ItaniumMangleContextImpl::mangleCXXVTT(const CXXRecordDecl *RD,
raw_ostream &Out) {
// <special-name> ::= TT <type> # VTT structure
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTT";
Mangler.mangleNameOrStandardSubstitution(RD);
}
void ItaniumMangleContextImpl::mangleCXXCtorVTable(const CXXRecordDecl *RD,
int64_t Offset,
const CXXRecordDecl *Type,
raw_ostream &Out) {
// <special-name> ::= TC <type> <offset number> _ <base type>
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTC";
Mangler.mangleNameOrStandardSubstitution(RD);
Mangler.getStream() << Offset;
Mangler.getStream() << '_';
Mangler.mangleNameOrStandardSubstitution(Type);
}
void ItaniumMangleContextImpl::mangleCXXRTTI(QualType Ty, raw_ostream &Out) {
// <special-name> ::= TI <type> # typeinfo structure
assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTI";
Mangler.mangleType(Ty);
}
void ItaniumMangleContextImpl::mangleCXXRTTIName(QualType Ty,
raw_ostream &Out) {
// <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "_ZTS";
Mangler.mangleType(Ty);
}
void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) {
mangleCXXRTTIName(Ty, Out);
}
void ItaniumMangleContextImpl::mangleCXXVTableBitSet(const CXXRecordDecl *RD,
raw_ostream &Out) {
if (!RD->isExternallyVisible()) {
// This part of the identifier needs to be unique across all translation
// units in the linked program. The scheme fails if multiple translation
// units are compiled using the same relative source file path, or if
// multiple translation units are built from the same source file.
SourceManager &SM = getASTContext().getSourceManager();
Out << "[" << SM.getFileEntryForID(SM.getMainFileID())->getName() << "]";
}
CXXNameMangler Mangler(*this, Out);
Mangler.mangleType(QualType(RD->getTypeForDecl(), 0));
}
void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) {
llvm_unreachable("Can't mangle string literals");
}
ItaniumMangleContext *
ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
return new ItaniumMangleContextImpl(Context, Diags);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/CXXABI.h | //===----- CXXABI.h - Interface to C++ ABIs ---------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This provides an abstract class for C++ AST support. Concrete
// subclasses of this implement AST support for specific C++ ABIs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_AST_CXXABI_H
#define LLVM_CLANG_LIB_AST_CXXABI_H
#include "clang/AST/Type.h"
namespace clang {
class ASTContext;
class CXXConstructorDecl;
class Expr;
class MemberPointerType;
class MangleNumberingContext;
/// Implements C++ ABI-specific semantic analysis functions.
class CXXABI {
public:
virtual ~CXXABI();
/// Returns the width and alignment of a member pointer in bits.
virtual std::pair<uint64_t, unsigned>
getMemberPointerWidthAndAlign(const MemberPointerType *MPT) const = 0;
/// Returns the default calling convention for C++ methods.
virtual CallingConv getDefaultMethodCallConv(bool isVariadic) const = 0;
/// Returns whether the given class is nearly empty, with just virtual
/// pointers and no data except possibly virtual bases.
virtual bool isNearlyEmpty(const CXXRecordDecl *RD) const = 0;
/// Returns a new mangling number context for this C++ ABI.
virtual MangleNumberingContext *createMangleNumberingContext() const = 0;
/// Adds a mapping from class to copy constructor for this C++ ABI.
virtual void addCopyConstructorForExceptionObject(CXXRecordDecl *,
CXXConstructorDecl *) = 0;
/// Retrieves the mapping from class to copy constructor for this C++ ABI.
virtual const CXXConstructorDecl *
getCopyConstructorForExceptionObject(CXXRecordDecl *) = 0;
virtual void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx, Expr *DAE) = 0;
virtual Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
unsigned ParmIdx) = 0;
};
/// Creates an instance of a C++ ABI class.
CXXABI *CreateItaniumCXXABI(ASTContext &Ctx);
CXXABI *CreateMicrosoftCXXABI(ASTContext &Ctx);
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/HlslBuiltinTypeDeclBuilder.cpp | ///////////////////////////////////////////////////////////////////////////////
// //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "clang/AST/HlslBuiltinTypeDeclBuilder.h"
#include "dxc/Support/Global.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TypeLoc.h"
using namespace clang;
using namespace hlsl;
static const SourceLocation NoLoc; // no source location attribution available
BuiltinTypeDeclBuilder::BuiltinTypeDeclBuilder(DeclContext *declContext,
StringRef name,
TagDecl::TagKind tagKind) {
ASTContext &astContext = declContext->getParentASTContext();
IdentifierInfo &nameId =
astContext.Idents.get(name, tok::TokenKind::identifier);
m_recordDecl =
CXXRecordDecl::Create(astContext, tagKind, declContext, NoLoc, NoLoc,
&nameId, nullptr, /* DelayTypeCreation */ true);
m_recordDecl->setImplicit(true);
}
TemplateTypeParmDecl *BuiltinTypeDeclBuilder::addTypeTemplateParam(
StringRef name, TypeSourceInfo *defaultValue, bool parameterPack) {
DXASSERT_NOMSG(!m_recordDecl->isBeingDefined() &&
!m_recordDecl->isCompleteDefinition());
ASTContext &astContext = m_recordDecl->getASTContext();
unsigned index = (unsigned)m_templateParams.size();
TemplateTypeParmDecl *decl = TemplateTypeParmDecl::Create(
astContext, m_recordDecl->getDeclContext(), NoLoc, NoLoc,
/* TemplateDepth */ 0, index,
&astContext.Idents.get(name, tok::TokenKind::identifier),
/* Typename */ false, parameterPack);
if (defaultValue != nullptr)
decl->setDefaultArgument(defaultValue);
m_templateParams.emplace_back(decl);
return decl;
}
TemplateTypeParmDecl *
BuiltinTypeDeclBuilder::addTypeTemplateParam(StringRef name,
QualType defaultValue) {
TypeSourceInfo *defaultValueSourceInfo = nullptr;
if (!defaultValue.isNull())
defaultValueSourceInfo =
m_recordDecl->getASTContext().getTrivialTypeSourceInfo(defaultValue);
return addTypeTemplateParam(name, defaultValueSourceInfo);
}
NonTypeTemplateParmDecl *BuiltinTypeDeclBuilder::addIntegerTemplateParam(
StringRef name, QualType type, Optional<int64_t> defaultValue) {
DXASSERT_NOMSG(!m_recordDecl->isBeingDefined() &&
!m_recordDecl->isCompleteDefinition());
ASTContext &astContext = m_recordDecl->getASTContext();
unsigned index = (unsigned)m_templateParams.size();
NonTypeTemplateParmDecl *decl = NonTypeTemplateParmDecl::Create(
astContext, m_recordDecl->getDeclContext(), NoLoc, NoLoc,
/* TemplateDepth */ 0, index,
&astContext.Idents.get(name, tok::TokenKind::identifier), type,
/* ParameterPack */ false, astContext.getTrivialTypeSourceInfo(type));
if (defaultValue.hasValue()) {
Expr *defaultValueLiteral = IntegerLiteral::Create(
astContext,
llvm::APInt(astContext.getIntWidth(type), defaultValue.getValue()),
type, NoLoc);
decl->setDefaultArgument(defaultValueLiteral);
}
m_templateParams.emplace_back(decl);
return decl;
}
void BuiltinTypeDeclBuilder::startDefinition() {
DXASSERT_NOMSG(!m_recordDecl->isBeingDefined() &&
!m_recordDecl->isCompleteDefinition());
ASTContext &astContext = m_recordDecl->getASTContext();
DeclContext *declContext = m_recordDecl->getDeclContext();
if (!m_templateParams.empty()) {
TemplateParameterList *templateParameterList =
TemplateParameterList::Create(astContext, NoLoc, NoLoc,
m_templateParams.data(),
m_templateParams.size(), NoLoc);
m_templateDecl = ClassTemplateDecl::Create(
astContext, declContext, NoLoc,
DeclarationName(m_recordDecl->getIdentifier()), templateParameterList,
m_recordDecl, nullptr);
m_recordDecl->setDescribedClassTemplate(m_templateDecl);
m_templateDecl->setImplicit(true);
m_templateDecl->setLexicalDeclContext(declContext);
declContext->addDecl(m_templateDecl);
// Requesting the class name specialization will fault in required types.
QualType T = m_templateDecl->getInjectedClassNameSpecialization();
T = astContext.getInjectedClassNameType(m_recordDecl, T);
assert(T->isDependentType() && "Class template type is not dependent?");
} else {
declContext->addDecl(m_recordDecl);
}
m_recordDecl->setLexicalDeclContext(declContext);
m_recordDecl->addAttr(
FinalAttr::CreateImplicit(astContext, FinalAttr::Keyword_final));
m_recordDecl->startDefinition();
}
FieldDecl *BuiltinTypeDeclBuilder::addField(StringRef name, QualType type,
AccessSpecifier access) {
DXASSERT_NOMSG(m_recordDecl->isBeingDefined());
ASTContext &astContext = m_recordDecl->getASTContext();
IdentifierInfo &nameId =
astContext.Idents.get(name, tok::TokenKind::identifier);
TypeSourceInfo *fieldTypeSource =
astContext.getTrivialTypeSourceInfo(type, NoLoc);
const bool MutableFalse = false;
const InClassInitStyle initStyle = InClassInitStyle::ICIS_NoInit;
FieldDecl *fieldDecl =
FieldDecl::Create(astContext, m_recordDecl, NoLoc, NoLoc, &nameId, type,
fieldTypeSource, nullptr, MutableFalse, initStyle);
fieldDecl->setAccess(access);
fieldDecl->setImplicit(true);
m_recordDecl->addDecl(fieldDecl);
#ifndef NDEBUG
// Verify that we can read the field member from the record.
DeclContext::lookup_result lookupResult =
m_recordDecl->lookup(DeclarationName(&nameId));
DXASSERT(!lookupResult.empty(), "Field cannot be looked up");
#endif
return fieldDecl;
}
CXXRecordDecl *BuiltinTypeDeclBuilder::completeDefinition() {
DXASSERT_NOMSG(!m_recordDecl->isCompleteDefinition());
if (!m_recordDecl->isBeingDefined())
startDefinition();
m_recordDecl->completeDefinition();
return m_recordDecl;
}
ClassTemplateDecl *BuiltinTypeDeclBuilder::getTemplateDecl() const {
DXASSERT_NOMSG(m_recordDecl->isBeingDefined() ||
m_recordDecl->isCompleteDefinition());
return m_templateDecl;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/VTableBuilder.cpp | //===--- VTableBuilder.cpp - C++ vtable layout builder --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with generation of the layout of virtual tables.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/VTableBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstdio>
using namespace clang;
#define DUMP_OVERRIDERS 0
namespace {
/// BaseOffset - Represents an offset from a derived class to a direct or
/// indirect base class.
struct BaseOffset {
/// DerivedClass - The derived class.
const CXXRecordDecl *DerivedClass;
/// VirtualBase - If the path from the derived class to the base class
/// involves virtual base classes, this holds the declaration of the last
/// virtual base in this path (i.e. closest to the base class).
const CXXRecordDecl *VirtualBase;
/// NonVirtualOffset - The offset from the derived class to the base class.
/// (Or the offset from the virtual base class to the base class, if the
/// path from the derived class to the base class involves a virtual base
/// class.
CharUnits NonVirtualOffset;
BaseOffset() : DerivedClass(nullptr), VirtualBase(nullptr),
NonVirtualOffset(CharUnits::Zero()) { }
BaseOffset(const CXXRecordDecl *DerivedClass,
const CXXRecordDecl *VirtualBase, CharUnits NonVirtualOffset)
: DerivedClass(DerivedClass), VirtualBase(VirtualBase),
NonVirtualOffset(NonVirtualOffset) { }
bool isEmpty() const { return NonVirtualOffset.isZero() && !VirtualBase; }
};
/// FinalOverriders - Contains the final overrider member functions for all
/// member functions in the base subobjects of a class.
class FinalOverriders {
public:
/// OverriderInfo - Information about a final overrider.
struct OverriderInfo {
/// Method - The method decl of the overrider.
const CXXMethodDecl *Method;
/// VirtualBase - The virtual base class subobject of this overrider.
/// Note that this records the closest derived virtual base class subobject.
const CXXRecordDecl *VirtualBase;
/// Offset - the base offset of the overrider's parent in the layout class.
CharUnits Offset;
OverriderInfo() : Method(nullptr), VirtualBase(nullptr),
Offset(CharUnits::Zero()) { }
};
private:
/// MostDerivedClass - The most derived class for which the final overriders
/// are stored.
const CXXRecordDecl *MostDerivedClass;
/// MostDerivedClassOffset - If we're building final overriders for a
/// construction vtable, this holds the offset from the layout class to the
/// most derived class.
const CharUnits MostDerivedClassOffset;
/// LayoutClass - The class we're using for layout information. Will be
/// different than the most derived class if the final overriders are for a
/// construction vtable.
const CXXRecordDecl *LayoutClass;
ASTContext &Context;
/// MostDerivedClassLayout - the AST record layout of the most derived class.
const ASTRecordLayout &MostDerivedClassLayout;
/// MethodBaseOffsetPairTy - Uniquely identifies a member function
/// in a base subobject.
typedef std::pair<const CXXMethodDecl *, CharUnits> MethodBaseOffsetPairTy;
typedef llvm::DenseMap<MethodBaseOffsetPairTy,
OverriderInfo> OverridersMapTy;
/// OverridersMap - The final overriders for all virtual member functions of
/// all the base subobjects of the most derived class.
OverridersMapTy OverridersMap;
/// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
/// as a record decl and a subobject number) and its offsets in the most
/// derived class as well as the layout class.
typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
CharUnits> SubobjectOffsetMapTy;
typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
/// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
/// given base.
void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
CharUnits OffsetInLayoutClass,
SubobjectOffsetMapTy &SubobjectOffsets,
SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
SubobjectCountMapTy &SubobjectCounts);
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
/// dump - dump the final overriders for a base subobject, and all its direct
/// and indirect base subobjects.
void dump(raw_ostream &Out, BaseSubobject Base,
VisitedVirtualBasesSetTy& VisitedVirtualBases);
public:
FinalOverriders(const CXXRecordDecl *MostDerivedClass,
CharUnits MostDerivedClassOffset,
const CXXRecordDecl *LayoutClass);
/// getOverrider - Get the final overrider for the given method declaration in
/// the subobject with the given base offset.
OverriderInfo getOverrider(const CXXMethodDecl *MD,
CharUnits BaseOffset) const {
assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
"Did not find overrider!");
return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
}
/// dump - dump the final overriders.
void dump() {
VisitedVirtualBasesSetTy VisitedVirtualBases;
dump(llvm::errs(), BaseSubobject(MostDerivedClass, CharUnits::Zero()),
VisitedVirtualBases);
}
};
FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
CharUnits MostDerivedClassOffset,
const CXXRecordDecl *LayoutClass)
: MostDerivedClass(MostDerivedClass),
MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
Context(MostDerivedClass->getASTContext()),
MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
// Compute base offsets.
SubobjectOffsetMapTy SubobjectOffsets;
SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
SubobjectCountMapTy SubobjectCounts;
ComputeBaseOffsets(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
/*IsVirtual=*/false,
MostDerivedClassOffset,
SubobjectOffsets, SubobjectLayoutClassOffsets,
SubobjectCounts);
// Get the final overriders.
CXXFinalOverriderMap FinalOverriders;
MostDerivedClass->getFinalOverriders(FinalOverriders);
for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
E = FinalOverriders.end(); I != E; ++I) {
const CXXMethodDecl *MD = I->first;
const OverridingMethods& Methods = I->second;
for (OverridingMethods::const_iterator I = Methods.begin(),
E = Methods.end(); I != E; ++I) {
unsigned SubobjectNumber = I->first;
assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
SubobjectNumber)) &&
"Did not find subobject offset!");
CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
SubobjectNumber)];
assert(I->second.size() == 1 && "Final overrider is not unique!");
const UniqueVirtualMethod &Method = I->second.front();
const CXXRecordDecl *OverriderRD = Method.Method->getParent();
assert(SubobjectLayoutClassOffsets.count(
std::make_pair(OverriderRD, Method.Subobject))
&& "Did not find subobject offset!");
CharUnits OverriderOffset =
SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
Method.Subobject)];
OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
assert(!Overrider.Method && "Overrider should not exist yet!");
Overrider.Offset = OverriderOffset;
Overrider.Method = Method.Method;
Overrider.VirtualBase = Method.InVirtualSubobject;
}
}
#if DUMP_OVERRIDERS
// And dump them (for now).
dump();
#endif
}
static BaseOffset ComputeBaseOffset(const ASTContext &Context,
const CXXRecordDecl *DerivedRD,
const CXXBasePath &Path) {
CharUnits NonVirtualOffset = CharUnits::Zero();
unsigned NonVirtualStart = 0;
const CXXRecordDecl *VirtualBase = nullptr;
// First, look for the virtual base class.
for (int I = Path.size(), E = 0; I != E; --I) {
const CXXBasePathElement &Element = Path[I - 1];
if (Element.Base->isVirtual()) {
NonVirtualStart = I;
QualType VBaseType = Element.Base->getType();
VirtualBase = VBaseType->getAsCXXRecordDecl();
break;
}
}
// Now compute the non-virtual offset.
for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) {
const CXXBasePathElement &Element = Path[I];
// Check the base class offset.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
const CXXRecordDecl *Base = Element.Base->getType()->getAsCXXRecordDecl();
NonVirtualOffset += Layout.getBaseClassOffset(Base);
}
// FIXME: This should probably use CharUnits or something. Maybe we should
// even change the base offsets in ASTRecordLayout to be specified in
// CharUnits.
return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset);
}
static BaseOffset ComputeBaseOffset(const ASTContext &Context,
const CXXRecordDecl *BaseRD,
const CXXRecordDecl *DerivedRD) {
CXXBasePaths Paths(/*FindAmbiguities=*/false,
/*RecordPaths=*/true, /*DetectVirtual=*/false);
if (!DerivedRD->isDerivedFrom(BaseRD, Paths))
llvm_unreachable("Class must be derived from the passed in base class!");
return ComputeBaseOffset(Context, DerivedRD, Paths.front());
}
static BaseOffset
ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
const CXXMethodDecl *DerivedMD,
const CXXMethodDecl *BaseMD) {
const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>();
const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
// Canonicalize the return types.
CanQualType CanDerivedReturnType =
Context.getCanonicalType(DerivedFT->getReturnType());
CanQualType CanBaseReturnType =
Context.getCanonicalType(BaseFT->getReturnType());
assert(CanDerivedReturnType->getTypeClass() ==
CanBaseReturnType->getTypeClass() &&
"Types must have same type class!");
if (CanDerivedReturnType == CanBaseReturnType) {
// No adjustment needed.
return BaseOffset();
}
if (isa<ReferenceType>(CanDerivedReturnType)) {
CanDerivedReturnType =
CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType();
CanBaseReturnType =
CanBaseReturnType->getAs<ReferenceType>()->getPointeeType();
} else if (isa<PointerType>(CanDerivedReturnType)) {
CanDerivedReturnType =
CanDerivedReturnType->getAs<PointerType>()->getPointeeType();
CanBaseReturnType =
CanBaseReturnType->getAs<PointerType>()->getPointeeType();
} else {
llvm_unreachable("Unexpected return type!");
}
// We need to compare unqualified types here; consider
// const T *Base::foo();
// T *Derived::foo();
if (CanDerivedReturnType.getUnqualifiedType() ==
CanBaseReturnType.getUnqualifiedType()) {
// No adjustment needed.
return BaseOffset();
}
const CXXRecordDecl *DerivedRD =
cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
const CXXRecordDecl *BaseRD =
cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
return ComputeBaseOffset(Context, BaseRD, DerivedRD);
}
void
FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
CharUnits OffsetInLayoutClass,
SubobjectOffsetMapTy &SubobjectOffsets,
SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
SubobjectCountMapTy &SubobjectCounts) {
const CXXRecordDecl *RD = Base.getBase();
unsigned SubobjectNumber = 0;
if (!IsVirtual)
SubobjectNumber = ++SubobjectCounts[RD];
// Set up the subobject to offset mapping.
assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
&& "Subobject offset already exists!");
assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
&& "Subobject offset already exists!");
SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = Base.getBaseOffset();
SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
OffsetInLayoutClass;
// Traverse our bases.
for (const auto &B : RD->bases()) {
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
CharUnits BaseOffset;
CharUnits BaseOffsetInLayoutClass;
if (B.isVirtual()) {
// Check if we've visited this virtual base before.
if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
continue;
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
BaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(BaseDecl);
} else {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
CharUnits Offset = Layout.getBaseClassOffset(BaseDecl);
BaseOffset = Base.getBaseOffset() + Offset;
BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
}
ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset),
B.isVirtual(), BaseOffsetInLayoutClass,
SubobjectOffsets, SubobjectLayoutClassOffsets,
SubobjectCounts);
}
}
void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
VisitedVirtualBasesSetTy &VisitedVirtualBases) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
for (const auto &B : RD->bases()) {
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Ignore bases that don't have any virtual member functions.
if (!BaseDecl->isPolymorphic())
continue;
CharUnits BaseOffset;
if (B.isVirtual()) {
if (!VisitedVirtualBases.insert(BaseDecl).second) {
// We've visited this base before.
continue;
}
BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
} else {
BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
}
dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
}
Out << "Final overriders for (";
RD->printQualifiedName(Out);
Out << ", ";
Out << Base.getBaseOffset().getQuantity() << ")\n";
// Now dump the overriders for this base subobject.
for (const auto *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
MD = MD->getCanonicalDecl();
OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
Out << " ";
MD->printQualifiedName(Out);
Out << " - (";
Overrider.Method->printQualifiedName(Out);
Out << ", " << Overrider.Offset.getQuantity() << ')';
BaseOffset Offset;
if (!Overrider.Method->isPure())
Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
if (!Offset.isEmpty()) {
Out << " [ret-adj: ";
if (Offset.VirtualBase) {
Offset.VirtualBase->printQualifiedName(Out);
Out << " vbase, ";
}
Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
}
Out << "\n";
}
}
/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
struct VCallOffsetMap {
typedef std::pair<const CXXMethodDecl *, CharUnits> MethodAndOffsetPairTy;
/// Offsets - Keeps track of methods and their offsets.
// FIXME: This should be a real map and not a vector.
SmallVector<MethodAndOffsetPairTy, 16> Offsets;
/// MethodsCanShareVCallOffset - Returns whether two virtual member functions
/// can share the same vcall offset.
static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
const CXXMethodDecl *RHS);
public:
/// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
/// add was successful, or false if there was already a member function with
/// the same signature in the map.
bool AddVCallOffset(const CXXMethodDecl *MD, CharUnits OffsetOffset);
/// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
/// vtable address point) for the given virtual member function.
CharUnits getVCallOffsetOffset(const CXXMethodDecl *MD);
// empty - Return whether the offset map is empty or not.
bool empty() const { return Offsets.empty(); }
};
static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
const CXXMethodDecl *RHS) {
const FunctionProtoType *LT =
cast<FunctionProtoType>(LHS->getType().getCanonicalType());
const FunctionProtoType *RT =
cast<FunctionProtoType>(RHS->getType().getCanonicalType());
// Fast-path matches in the canonical types.
if (LT == RT) return true;
// Force the signatures to match. We can't rely on the overrides
// list here because there isn't necessarily an inheritance
// relationship between the two methods.
if (LT->getTypeQuals() != RT->getTypeQuals() ||
LT->getNumParams() != RT->getNumParams())
return false;
for (unsigned I = 0, E = LT->getNumParams(); I != E; ++I)
if (LT->getParamType(I) != RT->getParamType(I))
return false;
return true;
}
bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
const CXXMethodDecl *RHS) {
assert(LHS->isVirtual() && "LHS must be virtual!");
assert(RHS->isVirtual() && "LHS must be virtual!");
// A destructor can share a vcall offset with another destructor.
if (isa<CXXDestructorDecl>(LHS))
return isa<CXXDestructorDecl>(RHS);
// FIXME: We need to check more things here.
// The methods must have the same name.
DeclarationName LHSName = LHS->getDeclName();
DeclarationName RHSName = RHS->getDeclName();
if (LHSName != RHSName)
return false;
// And the same signatures.
return HasSameVirtualSignature(LHS, RHS);
}
bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
CharUnits OffsetOffset) {
// Check if we can reuse an offset.
for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
return false;
}
// Add the offset.
Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
return true;
}
CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
// Look for an offset.
for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
return Offsets[I].second;
}
llvm_unreachable("Should always find a vcall offset offset!");
}
/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
class VCallAndVBaseOffsetBuilder {
public:
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
VBaseOffsetOffsetsMapTy;
private:
/// MostDerivedClass - The most derived class for which we're building vcall
/// and vbase offsets.
const CXXRecordDecl *MostDerivedClass;
/// LayoutClass - The class we're using for layout information. Will be
/// different than the most derived class if we're building a construction
/// vtable.
const CXXRecordDecl *LayoutClass;
/// Context - The ASTContext which we will use for layout information.
ASTContext &Context;
/// Components - vcall and vbase offset components
typedef SmallVector<VTableComponent, 64> VTableComponentVectorTy;
VTableComponentVectorTy Components;
/// VisitedVirtualBases - Visited virtual bases.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
/// VCallOffsets - Keeps track of vcall offsets.
VCallOffsetMap VCallOffsets;
/// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
/// relative to the address point.
VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
/// FinalOverriders - The final overriders of the most derived class.
/// (Can be null when we're not building a vtable of the most derived class).
const FinalOverriders *Overriders;
/// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
/// given base subobject.
void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
CharUnits RealBaseOffset);
/// AddVCallOffsets - Add vcall offsets for the given base subobject.
void AddVCallOffsets(BaseSubobject Base, CharUnits VBaseOffset);
/// AddVBaseOffsets - Add vbase offsets for the given class.
void AddVBaseOffsets(const CXXRecordDecl *Base,
CharUnits OffsetInLayoutClass);
/// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
/// chars, relative to the vtable address point.
CharUnits getCurrentOffsetOffset() const;
public:
VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
const CXXRecordDecl *LayoutClass,
const FinalOverriders *Overriders,
BaseSubobject Base, bool BaseIsVirtual,
CharUnits OffsetInLayoutClass)
: MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
// Add vcall and vbase offsets.
AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
}
/// Methods for iterating over the components.
typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
const_iterator components_begin() const { return Components.rbegin(); }
const_iterator components_end() const { return Components.rend(); }
const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
return VBaseOffsetOffsets;
}
};
void
VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
bool BaseIsVirtual,
CharUnits RealBaseOffset) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
// Itanium C++ ABI 2.5.2:
// ..in classes sharing a virtual table with a primary base class, the vcall
// and vbase offsets added by the derived class all come before the vcall
// and vbase offsets required by the base class, so that the latter may be
// laid out as required by the base class without regard to additions from
// the derived class(es).
// (Since we're emitting the vcall and vbase offsets in reverse order, we'll
// emit them for the primary base first).
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
CharUnits PrimaryBaseOffset;
// Get the base offset of the primary base.
if (PrimaryBaseIsVirtual) {
assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
} else {
assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
}
AddVCallAndVBaseOffsets(
BaseSubobject(PrimaryBase,PrimaryBaseOffset),
PrimaryBaseIsVirtual, RealBaseOffset);
}
AddVBaseOffsets(Base.getBase(), RealBaseOffset);
// We only want to add vcall offsets for virtual bases.
if (BaseIsVirtual)
AddVCallOffsets(Base, RealBaseOffset);
}
CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
// OffsetIndex is the index of this vcall or vbase offset, relative to the
// vtable address point. (We subtract 3 to account for the information just
// above the address point, the RTTI info, the offset to top, and the
// vcall offset itself).
int64_t OffsetIndex = -(int64_t)(3 + Components.size());
CharUnits PointerWidth =
Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
CharUnits OffsetOffset = PointerWidth * OffsetIndex;
return OffsetOffset;
}
void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
CharUnits VBaseOffset) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
// Handle the primary base first.
// We only want to add vcall offsets if the base is non-virtual; a virtual
// primary base will have its vcall and vbase offsets emitted already.
if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
// Get the base offset of the primary base.
assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should have a zero offset!");
AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
VBaseOffset);
}
// Add the vcall offsets.
for (const auto *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
MD = MD->getCanonicalDecl();
CharUnits OffsetOffset = getCurrentOffsetOffset();
// Don't add a vcall offset if we already have one for this member function
// signature.
if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
continue;
CharUnits Offset = CharUnits::Zero();
if (Overriders) {
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
Overriders->getOverrider(MD, Base.getBaseOffset());
/// The vcall offset is the offset from the virtual base to the object
/// where the function was overridden.
Offset = Overrider.Offset - VBaseOffset;
}
Components.push_back(
VTableComponent::MakeVCallOffset(Offset));
}
// And iterate over all non-virtual bases (ignoring the primary base).
for (const auto &B : RD->bases()) {
if (B.isVirtual())
continue;
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
if (BaseDecl == PrimaryBase)
continue;
// Get the base offset of this base.
CharUnits BaseOffset = Base.getBaseOffset() +
Layout.getBaseClassOffset(BaseDecl);
AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset),
VBaseOffset);
}
}
void
VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
CharUnits OffsetInLayoutClass) {
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
// Add vbase offsets.
for (const auto &B : RD->bases()) {
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Check if this is a virtual base that we haven't visited before.
if (B.isVirtual() && VisitedVirtualBases.insert(BaseDecl).second) {
CharUnits Offset =
LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
// Add the vbase offset offset.
assert(!VBaseOffsetOffsets.count(BaseDecl) &&
"vbase offset offset already exists!");
CharUnits VBaseOffsetOffset = getCurrentOffsetOffset();
VBaseOffsetOffsets.insert(
std::make_pair(BaseDecl, VBaseOffsetOffset));
Components.push_back(
VTableComponent::MakeVBaseOffset(Offset));
}
// Check the base class looking for more vbase offsets.
AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
}
}
/// ItaniumVTableBuilder - Class for building vtable layout information.
class ItaniumVTableBuilder {
public:
/// PrimaryBasesSetVectorTy - A set vector of direct and indirect
/// primary bases.
typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
PrimaryBasesSetVectorTy;
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
VBaseOffsetOffsetsMapTy;
typedef llvm::DenseMap<BaseSubobject, uint64_t>
AddressPointsMapTy;
typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
private:
/// VTables - Global vtable information.
ItaniumVTableContext &VTables;
/// MostDerivedClass - The most derived class for which we're building this
/// vtable.
const CXXRecordDecl *MostDerivedClass;
/// MostDerivedClassOffset - If we're building a construction vtable, this
/// holds the offset from the layout class to the most derived class.
const CharUnits MostDerivedClassOffset;
/// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
/// base. (This only makes sense when building a construction vtable).
bool MostDerivedClassIsVirtual;
/// LayoutClass - The class we're using for layout information. Will be
/// different than the most derived class if we're building a construction
/// vtable.
const CXXRecordDecl *LayoutClass;
/// Context - The ASTContext which we will use for layout information.
ASTContext &Context;
/// FinalOverriders - The final overriders of the most derived class.
const FinalOverriders Overriders;
/// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
/// bases in this vtable.
llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
/// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
/// the most derived class.
VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
/// Components - The components of the vtable being built.
SmallVector<VTableComponent, 64> Components;
/// AddressPoints - Address points for the vtable being built.
AddressPointsMapTy AddressPoints;
/// MethodInfo - Contains information about a method in a vtable.
/// (Used for computing 'this' pointer adjustment thunks.
struct MethodInfo {
/// BaseOffset - The base offset of this method.
const CharUnits BaseOffset;
/// BaseOffsetInLayoutClass - The base offset in the layout class of this
/// method.
const CharUnits BaseOffsetInLayoutClass;
/// VTableIndex - The index in the vtable that this method has.
/// (For destructors, this is the index of the complete destructor).
const uint64_t VTableIndex;
MethodInfo(CharUnits BaseOffset, CharUnits BaseOffsetInLayoutClass,
uint64_t VTableIndex)
: BaseOffset(BaseOffset),
BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
VTableIndex(VTableIndex) { }
MethodInfo()
: BaseOffset(CharUnits::Zero()),
BaseOffsetInLayoutClass(CharUnits::Zero()),
VTableIndex(0) { }
};
typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
/// MethodInfoMap - The information for all methods in the vtable we're
/// currently building.
MethodInfoMapTy MethodInfoMap;
/// MethodVTableIndices - Contains the index (relative to the vtable address
/// point) where the function pointer for a virtual function is stored.
MethodVTableIndicesTy MethodVTableIndices;
typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
/// VTableThunks - The thunks by vtable index in the vtable currently being
/// built.
VTableThunksMapTy VTableThunks;
typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
/// Thunks - A map that contains all the thunks needed for all methods in the
/// most derived class for which the vtable is currently being built.
ThunksMapTy Thunks;
/// AddThunk - Add a thunk for the given method.
void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
/// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
/// part of the vtable we're currently building.
void ComputeThisAdjustments();
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
/// PrimaryVirtualBases - All known virtual bases who are a primary base of
/// some other base.
VisitedVirtualBasesSetTy PrimaryVirtualBases;
/// ComputeReturnAdjustment - Compute the return adjustment given a return
/// adjustment base offset.
ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
/// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
/// the 'this' pointer from the base subobject to the derived subobject.
BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
BaseSubobject Derived) const;
/// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
/// given virtual member function, its offset in the layout class and its
/// final overrider.
ThisAdjustment
ComputeThisAdjustment(const CXXMethodDecl *MD,
CharUnits BaseOffsetInLayoutClass,
FinalOverriders::OverriderInfo Overrider);
/// AddMethod - Add a single virtual member function to the vtable
/// components vector.
void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
/// IsOverriderUsed - Returns whether the overrider will ever be used in this
/// part of the vtable.
///
/// Itanium C++ ABI 2.5.2:
///
/// struct A { virtual void f(); };
/// struct B : virtual public A { int i; };
/// struct C : virtual public A { int j; };
/// struct D : public B, public C {};
///
/// When B and C are declared, A is a primary base in each case, so although
/// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
/// adjustment is required and no thunk is generated. However, inside D
/// objects, A is no longer a primary base of C, so if we allowed calls to
/// C::f() to use the copy of A's vtable in the C subobject, we would need
/// to adjust this from C* to B::A*, which would require a third-party
/// thunk. Since we require that a call to C::f() first convert to A*,
/// C-in-D's copy of A's vtable is never referenced, so this is not
/// necessary.
bool IsOverriderUsed(const CXXMethodDecl *Overrider,
CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
CharUnits FirstBaseOffsetInLayoutClass) const;
/// AddMethods - Add the methods of this base subobject and all its
/// primary bases to the vtable components vector.
void AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
CharUnits FirstBaseOffsetInLayoutClass,
PrimaryBasesSetVectorTy &PrimaryBases);
// LayoutVTable - Layout the vtable for the given base class, including its
// secondary vtables and any vtables for virtual bases.
void LayoutVTable();
/// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
/// given base subobject, as well as all its secondary vtables.
///
/// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
/// or a direct or indirect base of a virtual base.
///
/// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
/// in the layout class.
void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
bool BaseIsMorallyVirtual,
bool BaseIsVirtualInLayoutClass,
CharUnits OffsetInLayoutClass);
/// LayoutSecondaryVTables - Layout the secondary vtables for the given base
/// subobject.
///
/// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
/// or a direct or indirect base of a virtual base.
void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
CharUnits OffsetInLayoutClass);
/// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
/// class hierarchy.
void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
CharUnits OffsetInLayoutClass,
VisitedVirtualBasesSetTy &VBases);
/// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
/// given base (excluding any primary bases).
void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
VisitedVirtualBasesSetTy &VBases);
/// isBuildingConstructionVTable - Return whether this vtable builder is
/// building a construction vtable.
bool isBuildingConstructorVTable() const {
return MostDerivedClass != LayoutClass;
}
public:
ItaniumVTableBuilder(ItaniumVTableContext &VTables,
const CXXRecordDecl *MostDerivedClass,
CharUnits MostDerivedClassOffset,
bool MostDerivedClassIsVirtual,
const CXXRecordDecl *LayoutClass)
: VTables(VTables), MostDerivedClass(MostDerivedClass),
MostDerivedClassOffset(MostDerivedClassOffset),
MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
assert(!Context.getTargetInfo().getCXXABI().isMicrosoft());
LayoutVTable();
if (Context.getLangOpts().DumpVTableLayouts)
dumpLayout(llvm::outs());
}
uint64_t getNumThunks() const {
return Thunks.size();
}
ThunksMapTy::const_iterator thunks_begin() const {
return Thunks.begin();
}
ThunksMapTy::const_iterator thunks_end() const {
return Thunks.end();
}
const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
return VBaseOffsetOffsets;
}
const AddressPointsMapTy &getAddressPoints() const {
return AddressPoints;
}
MethodVTableIndicesTy::const_iterator vtable_indices_begin() const {
return MethodVTableIndices.begin();
}
MethodVTableIndicesTy::const_iterator vtable_indices_end() const {
return MethodVTableIndices.end();
}
/// getNumVTableComponents - Return the number of components in the vtable
/// currently built.
uint64_t getNumVTableComponents() const {
return Components.size();
}
const VTableComponent *vtable_component_begin() const {
return Components.begin();
}
const VTableComponent *vtable_component_end() const {
return Components.end();
}
AddressPointsMapTy::const_iterator address_points_begin() const {
return AddressPoints.begin();
}
AddressPointsMapTy::const_iterator address_points_end() const {
return AddressPoints.end();
}
VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
return VTableThunks.begin();
}
VTableThunksMapTy::const_iterator vtable_thunks_end() const {
return VTableThunks.end();
}
/// dumpLayout - Dump the vtable layout.
void dumpLayout(raw_ostream&);
};
void ItaniumVTableBuilder::AddThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk) {
assert(!isBuildingConstructorVTable() &&
"Can't add thunks for construction vtable");
SmallVectorImpl<ThunkInfo> &ThunksVector = Thunks[MD];
// Check if we have this thunk already.
if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
ThunksVector.end())
return;
ThunksVector.push_back(Thunk);
}
typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
/// Visit all the methods overridden by the given method recursively,
/// in a depth-first pre-order. The Visitor's visitor method returns a bool
/// indicating whether to continue the recursion for the given overridden
/// method (i.e. returning false stops the iteration).
template <class VisitorTy>
static void
visitAllOverriddenMethods(const CXXMethodDecl *MD, VisitorTy &Visitor) {
assert(MD->isVirtual() && "Method is not virtual!");
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
E = MD->end_overridden_methods(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
if (!Visitor.visit(OverriddenMD))
continue;
visitAllOverriddenMethods(OverriddenMD, Visitor);
}
}
namespace {
struct OverriddenMethodsCollector {
OverriddenMethodsSetTy *Methods;
bool visit(const CXXMethodDecl *MD) {
// Don't recurse on this method if we've already collected it.
return Methods->insert(MD).second;
}
};
}
/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
/// the overridden methods that the function decl overrides.
static void
ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
OverriddenMethodsSetTy& OverriddenMethods) {
OverriddenMethodsCollector Collector = { &OverriddenMethods };
visitAllOverriddenMethods(MD, Collector);
}
void ItaniumVTableBuilder::ComputeThisAdjustments() {
// Now go through the method info map and see if any of the methods need
// 'this' pointer adjustments.
for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
E = MethodInfoMap.end(); I != E; ++I) {
const CXXMethodDecl *MD = I->first;
const MethodInfo &MethodInfo = I->second;
// Ignore adjustments for unused function pointers.
uint64_t VTableIndex = MethodInfo.VTableIndex;
if (Components[VTableIndex].getKind() ==
VTableComponent::CK_UnusedFunctionPointer)
continue;
// Get the final overrider for this method.
FinalOverriders::OverriderInfo Overrider =
Overriders.getOverrider(MD, MethodInfo.BaseOffset);
// Check if we need an adjustment at all.
if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
// When a return thunk is needed by a derived class that overrides a
// virtual base, gcc uses a virtual 'this' adjustment as well.
// While the thunk itself might be needed by vtables in subclasses or
// in construction vtables, there doesn't seem to be a reason for using
// the thunk in this vtable. Still, we do so to match gcc.
if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
continue;
}
ThisAdjustment ThisAdjustment =
ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
if (ThisAdjustment.isEmpty())
continue;
// Add it.
VTableThunks[VTableIndex].This = ThisAdjustment;
if (isa<CXXDestructorDecl>(MD)) {
// Add an adjustment for the deleting destructor as well.
VTableThunks[VTableIndex + 1].This = ThisAdjustment;
}
}
/// Clear the method info map.
MethodInfoMap.clear();
if (isBuildingConstructorVTable()) {
// We don't need to store thunk information for construction vtables.
return;
}
for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
E = VTableThunks.end(); I != E; ++I) {
const VTableComponent &Component = Components[I->first];
const ThunkInfo &Thunk = I->second;
const CXXMethodDecl *MD;
switch (Component.getKind()) {
default:
llvm_unreachable("Unexpected vtable component kind!");
case VTableComponent::CK_FunctionPointer:
MD = Component.getFunctionDecl();
break;
case VTableComponent::CK_CompleteDtorPointer:
MD = Component.getDestructorDecl();
break;
case VTableComponent::CK_DeletingDtorPointer:
// We've already added the thunk when we saw the complete dtor pointer.
continue;
}
if (MD->getParent() == MostDerivedClass)
AddThunk(MD, Thunk);
}
}
ReturnAdjustment
ItaniumVTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
ReturnAdjustment Adjustment;
if (!Offset.isEmpty()) {
if (Offset.VirtualBase) {
// Get the virtual base offset offset.
if (Offset.DerivedClass == MostDerivedClass) {
// We can get the offset offset directly from our map.
Adjustment.Virtual.Itanium.VBaseOffsetOffset =
VBaseOffsetOffsets.lookup(Offset.VirtualBase).getQuantity();
} else {
Adjustment.Virtual.Itanium.VBaseOffsetOffset =
VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
Offset.VirtualBase).getQuantity();
}
}
Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
}
return Adjustment;
}
BaseOffset ItaniumVTableBuilder::ComputeThisAdjustmentBaseOffset(
BaseSubobject Base, BaseSubobject Derived) const {
const CXXRecordDecl *BaseRD = Base.getBase();
const CXXRecordDecl *DerivedRD = Derived.getBase();
CXXBasePaths Paths(/*FindAmbiguities=*/true,
/*RecordPaths=*/true, /*DetectVirtual=*/true);
if (!DerivedRD->isDerivedFrom(BaseRD, Paths))
llvm_unreachable("Class must be derived from the passed in base class!");
// We have to go through all the paths, and see which one leads us to the
// right base subobject.
for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
I != E; ++I) {
BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset;
if (Offset.VirtualBase) {
// If we have a virtual base class, the non-virtual offset is relative
// to the virtual base class offset.
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
/// Get the virtual base offset, relative to the most derived class
/// layout.
OffsetToBaseSubobject +=
LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
} else {
// Otherwise, the non-virtual offset is relative to the derived class
// offset.
OffsetToBaseSubobject += Derived.getBaseOffset();
}
// Check if this path gives us the right base subobject.
if (OffsetToBaseSubobject == Base.getBaseOffset()) {
// Since we're going from the base class _to_ the derived class, we'll
// invert the non-virtual offset here.
Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
return Offset;
}
}
return BaseOffset();
}
ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment(
const CXXMethodDecl *MD, CharUnits BaseOffsetInLayoutClass,
FinalOverriders::OverriderInfo Overrider) {
// Ignore adjustments for pure virtual member functions.
if (Overrider.Method->isPure())
return ThisAdjustment();
BaseSubobject OverriddenBaseSubobject(MD->getParent(),
BaseOffsetInLayoutClass);
BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
Overrider.Offset);
// Compute the adjustment offset.
BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
OverriderBaseSubobject);
if (Offset.isEmpty())
return ThisAdjustment();
ThisAdjustment Adjustment;
if (Offset.VirtualBase) {
// Get the vcall offset map for this virtual base.
VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
if (VCallOffsets.empty()) {
// We don't have vcall offsets for this virtual base, go ahead and
// build them.
VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
/*FinalOverriders=*/nullptr,
BaseSubobject(Offset.VirtualBase,
CharUnits::Zero()),
/*BaseIsVirtual=*/true,
/*OffsetInLayoutClass=*/
CharUnits::Zero());
VCallOffsets = Builder.getVCallOffsets();
}
Adjustment.Virtual.Itanium.VCallOffsetOffset =
VCallOffsets.getVCallOffsetOffset(MD).getQuantity();
}
// Set the non-virtual part of the adjustment.
Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
return Adjustment;
}
void ItaniumVTableBuilder::AddMethod(const CXXMethodDecl *MD,
ReturnAdjustment ReturnAdjustment) {
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
assert(ReturnAdjustment.isEmpty() &&
"Destructor can't have return adjustment!");
// Add both the complete destructor and the deleting destructor.
Components.push_back(VTableComponent::MakeCompleteDtor(DD));
Components.push_back(VTableComponent::MakeDeletingDtor(DD));
} else {
// Add the return adjustment if necessary.
if (!ReturnAdjustment.isEmpty())
VTableThunks[Components.size()].Return = ReturnAdjustment;
// Add the function.
Components.push_back(VTableComponent::MakeFunction(MD));
}
}
/// OverridesIndirectMethodInBase - Return whether the given member function
/// overrides any methods in the set of given bases.
/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
/// For example, if we have:
///
/// struct A { virtual void f(); }
/// struct B : A { virtual void f(); }
/// struct C : B { virtual void f(); }
///
/// OverridesIndirectMethodInBase will return true if given C::f as the method
/// and { A } as the set of bases.
static bool OverridesIndirectMethodInBases(
const CXXMethodDecl *MD,
ItaniumVTableBuilder::PrimaryBasesSetVectorTy &Bases) {
if (Bases.count(MD->getParent()))
return true;
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
E = MD->end_overridden_methods(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
// Check "indirect overriders".
if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
return true;
}
return false;
}
bool ItaniumVTableBuilder::IsOverriderUsed(
const CXXMethodDecl *Overrider, CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
CharUnits FirstBaseOffsetInLayoutClass) const {
// If the base and the first base in the primary base chain have the same
// offsets, then this overrider will be used.
if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
return true;
// We know now that Base (or a direct or indirect base of it) is a primary
// base in part of the class hierarchy, but not a primary base in the most
// derived class.
// If the overrider is the first base in the primary base chain, we know
// that the overrider will be used.
if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
return true;
ItaniumVTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
PrimaryBases.insert(RD);
// Now traverse the base chain, starting with the first base, until we find
// the base that is no longer a primary base.
while (true) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
if (!PrimaryBase)
break;
if (Layout.isPrimaryBaseVirtual()) {
assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should always be at offset 0!");
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
// Now check if this is the primary base that is not a primary base in the
// most derived class.
if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
FirstBaseOffsetInLayoutClass) {
// We found it, stop walking the chain.
break;
}
} else {
assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should always be at offset 0!");
}
if (!PrimaryBases.insert(PrimaryBase))
llvm_unreachable("Found a duplicate primary base!");
RD = PrimaryBase;
}
// If the final overrider is an override of one of the primary bases,
// then we know that it will be used.
return OverridesIndirectMethodInBases(Overrider, PrimaryBases);
}
typedef llvm::SmallSetVector<const CXXRecordDecl *, 8> BasesSetVectorTy;
/// FindNearestOverriddenMethod - Given a method, returns the overridden method
/// from the nearest base. Returns null if no method was found.
/// The Bases are expected to be sorted in a base-to-derived order.
static const CXXMethodDecl *
FindNearestOverriddenMethod(const CXXMethodDecl *MD,
BasesSetVectorTy &Bases) {
OverriddenMethodsSetTy OverriddenMethods;
ComputeAllOverriddenMethods(MD, OverriddenMethods);
for (int I = Bases.size(), E = 0; I != E; --I) {
const CXXRecordDecl *PrimaryBase = Bases[I - 1];
// Now check the overridden methods.
for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
E = OverriddenMethods.end(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
// We found our overridden method.
if (OverriddenMD->getParent() == PrimaryBase)
return OverriddenMD;
}
}
return nullptr;
}
void ItaniumVTableBuilder::AddMethods(
BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
CharUnits FirstBaseOffsetInLayoutClass,
PrimaryBasesSetVectorTy &PrimaryBases) {
// Itanium C++ ABI 2.5.2:
// The order of the virtual function pointers in a virtual table is the
// order of declaration of the corresponding member functions in the class.
//
// There is an entry for any virtual function declared in a class,
// whether it is a new function or overrides a base class function,
// unless it overrides a function from the primary base, and conversion
// between their return types does not require an adjustment.
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
CharUnits PrimaryBaseOffset;
CharUnits PrimaryBaseOffsetInLayoutClass;
if (Layout.isPrimaryBaseVirtual()) {
assert(Layout.getVBaseClassOffset(PrimaryBase).isZero() &&
"Primary vbase should have a zero offset!");
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
PrimaryBaseOffset =
MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
PrimaryBaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
} else {
assert(Layout.getBaseClassOffset(PrimaryBase).isZero() &&
"Primary base should have a zero offset!");
PrimaryBaseOffset = Base.getBaseOffset();
PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass;
}
AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain,
FirstBaseOffsetInLayoutClass, PrimaryBases);
if (!PrimaryBases.insert(PrimaryBase))
llvm_unreachable("Found a duplicate primary base!");
}
const CXXDestructorDecl *ImplicitVirtualDtor = nullptr;
typedef llvm::SmallVector<const CXXMethodDecl *, 8> NewVirtualFunctionsTy;
NewVirtualFunctionsTy NewVirtualFunctions;
// Now go through all virtual member functions and add them.
for (const auto *MD : RD->methods()) {
if (!MD->isVirtual())
continue;
MD = MD->getCanonicalDecl();
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
Overriders.getOverrider(MD, Base.getBaseOffset());
// Check if this virtual member function overrides a method in a primary
// base. If this is the case, and the return type doesn't require adjustment
// then we can just use the member function from the primary base.
if (const CXXMethodDecl *OverriddenMD =
FindNearestOverriddenMethod(MD, PrimaryBases)) {
if (ComputeReturnAdjustmentBaseOffset(Context, MD,
OverriddenMD).isEmpty()) {
// Replace the method info of the overridden method with our own
// method.
assert(MethodInfoMap.count(OverriddenMD) &&
"Did not find the overridden method!");
MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
OverriddenMethodInfo.VTableIndex);
assert(!MethodInfoMap.count(MD) &&
"Should not have method info for this method yet!");
MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
MethodInfoMap.erase(OverriddenMD);
// If the overridden method exists in a virtual base class or a direct
// or indirect base class of a virtual base class, we need to emit a
// thunk if we ever have a class hierarchy where the base class is not
// a primary base in the complete object.
if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
// Compute the this adjustment.
ThisAdjustment ThisAdjustment =
ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
Overrider);
if (ThisAdjustment.Virtual.Itanium.VCallOffsetOffset &&
Overrider.Method->getParent() == MostDerivedClass) {
// There's no return adjustment from OverriddenMD and MD,
// but that doesn't mean there isn't one between MD and
// the final overrider.
BaseOffset ReturnAdjustmentOffset =
ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
ReturnAdjustment ReturnAdjustment =
ComputeReturnAdjustment(ReturnAdjustmentOffset);
// This is a virtual thunk for the most derived class, add it.
AddThunk(Overrider.Method,
ThunkInfo(ThisAdjustment, ReturnAdjustment));
}
}
continue;
}
}
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
if (MD->isImplicit()) {
// Itanium C++ ABI 2.5.2:
// If a class has an implicitly-defined virtual destructor,
// its entries come after the declared virtual function pointers.
assert(!ImplicitVirtualDtor &&
"Did already see an implicit virtual dtor!");
ImplicitVirtualDtor = DD;
continue;
}
}
NewVirtualFunctions.push_back(MD);
}
if (ImplicitVirtualDtor)
NewVirtualFunctions.push_back(ImplicitVirtualDtor);
for (NewVirtualFunctionsTy::const_iterator I = NewVirtualFunctions.begin(),
E = NewVirtualFunctions.end(); I != E; ++I) {
const CXXMethodDecl *MD = *I;
// Get the final overrider.
FinalOverriders::OverriderInfo Overrider =
Overriders.getOverrider(MD, Base.getBaseOffset());
// Insert the method info for this method.
MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
Components.size());
assert(!MethodInfoMap.count(MD) &&
"Should not have method info for this method yet!");
MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
// Check if this overrider is going to be used.
const CXXMethodDecl *OverriderMD = Overrider.Method;
if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
FirstBaseInPrimaryBaseChain,
FirstBaseOffsetInLayoutClass)) {
Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
continue;
}
// Check if this overrider needs a return adjustment.
// We don't want to do this for pure virtual member functions.
BaseOffset ReturnAdjustmentOffset;
if (!OverriderMD->isPure()) {
ReturnAdjustmentOffset =
ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
}
ReturnAdjustment ReturnAdjustment =
ComputeReturnAdjustment(ReturnAdjustmentOffset);
AddMethod(Overrider.Method, ReturnAdjustment);
}
}
void ItaniumVTableBuilder::LayoutVTable() {
LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass,
CharUnits::Zero()),
/*BaseIsMorallyVirtual=*/false,
MostDerivedClassIsVirtual,
MostDerivedClassOffset);
VisitedVirtualBasesSetTy VBases;
// Determine the primary virtual bases.
DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
VBases);
VBases.clear();
LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
// -fapple-kext adds an extra entry at end of vtbl.
bool IsAppleKext = Context.getLangOpts().AppleKext;
if (IsAppleKext)
Components.push_back(VTableComponent::MakeVCallOffset(CharUnits::Zero()));
}
void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
BaseSubobject Base, bool BaseIsMorallyVirtual,
bool BaseIsVirtualInLayoutClass, CharUnits OffsetInLayoutClass) {
assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
// Add vcall and vbase offsets for this vtable.
VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
Base, BaseIsVirtualInLayoutClass,
OffsetInLayoutClass);
Components.append(Builder.components_begin(), Builder.components_end());
// Check if we need to add these vcall offsets.
if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
if (VCallOffsets.empty())
VCallOffsets = Builder.getVCallOffsets();
}
// If we're laying out the most derived class we want to keep track of the
// virtual base class offset offsets.
if (Base.getBase() == MostDerivedClass)
VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
// Add the offset to top.
CharUnits OffsetToTop = MostDerivedClassOffset - OffsetInLayoutClass;
Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop));
// Next, add the RTTI.
Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
uint64_t AddressPoint = Components.size();
// Now go through all virtual member functions and add them.
PrimaryBasesSetVectorTy PrimaryBases;
AddMethods(Base, OffsetInLayoutClass,
Base.getBase(), OffsetInLayoutClass,
PrimaryBases);
const CXXRecordDecl *RD = Base.getBase();
if (RD == MostDerivedClass) {
assert(MethodVTableIndices.empty());
for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
E = MethodInfoMap.end(); I != E; ++I) {
const CXXMethodDecl *MD = I->first;
const MethodInfo &MI = I->second;
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)]
= MI.VTableIndex - AddressPoint;
MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)]
= MI.VTableIndex + 1 - AddressPoint;
} else {
MethodVTableIndices[MD] = MI.VTableIndex - AddressPoint;
}
}
}
// Compute 'this' pointer adjustments.
ComputeThisAdjustments();
// Add all address points.
while (true) {
AddressPoints.insert(std::make_pair(
BaseSubobject(RD, OffsetInLayoutClass),
AddressPoint));
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
if (!PrimaryBase)
break;
if (Layout.isPrimaryBaseVirtual()) {
// Check if this virtual primary base is a primary base in the layout
// class. If it's not, we don't want to add it.
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
OffsetInLayoutClass) {
// We don't want to add this class (or any of its primary bases).
break;
}
}
RD = PrimaryBase;
}
// Layout secondary vtables.
LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
}
void
ItaniumVTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
bool BaseIsMorallyVirtual,
CharUnits OffsetInLayoutClass) {
// Itanium C++ ABI 2.5.2:
// Following the primary virtual table of a derived class are secondary
// virtual tables for each of its proper base classes, except any primary
// base(s) with which it shares its primary virtual table.
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
for (const auto &B : RD->bases()) {
// Ignore virtual bases, we'll emit them later.
if (B.isVirtual())
continue;
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Ignore bases that don't have a vtable.
if (!BaseDecl->isDynamicClass())
continue;
if (isBuildingConstructorVTable()) {
// Itanium C++ ABI 2.6.4:
// Some of the base class subobjects may not need construction virtual
// tables, which will therefore not be present in the construction
// virtual table group, even though the subobject virtual tables are
// present in the main virtual table group for the complete object.
if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
continue;
}
// Get the base offset of this base.
CharUnits RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
CharUnits BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
CharUnits BaseOffsetInLayoutClass =
OffsetInLayoutClass + RelativeBaseOffset;
// Don't emit a secondary vtable for a primary base. We might however want
// to emit secondary vtables for other bases of this base.
if (BaseDecl == PrimaryBase) {
LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
continue;
}
// Layout the primary vtable (and any secondary vtables) for this base.
LayoutPrimaryAndSecondaryVTables(
BaseSubobject(BaseDecl, BaseOffset),
BaseIsMorallyVirtual,
/*BaseIsVirtualInLayoutClass=*/false,
BaseOffsetInLayoutClass);
}
}
void ItaniumVTableBuilder::DeterminePrimaryVirtualBases(
const CXXRecordDecl *RD, CharUnits OffsetInLayoutClass,
VisitedVirtualBasesSetTy &VBases) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Check if this base has a primary base.
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
// Check if it's virtual.
if (Layout.isPrimaryBaseVirtual()) {
bool IsPrimaryVirtualBase = true;
if (isBuildingConstructorVTable()) {
// Check if the base is actually a primary base in the class we use for
// layout.
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
CharUnits PrimaryBaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
// We know that the base is not a primary base in the layout class if
// the base offsets are different.
if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass)
IsPrimaryVirtualBase = false;
}
if (IsPrimaryVirtualBase)
PrimaryVirtualBases.insert(PrimaryBase);
}
}
// Traverse bases, looking for more primary virtual bases.
for (const auto &B : RD->bases()) {
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
CharUnits BaseOffsetInLayoutClass;
if (B.isVirtual()) {
if (!VBases.insert(BaseDecl).second)
continue;
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
BaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(BaseDecl);
} else {
BaseOffsetInLayoutClass =
OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
}
DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
}
}
void ItaniumVTableBuilder::LayoutVTablesForVirtualBases(
const CXXRecordDecl *RD, VisitedVirtualBasesSetTy &VBases) {
// Itanium C++ ABI 2.5.2:
// Then come the virtual base virtual tables, also in inheritance graph
// order, and again excluding primary bases (which share virtual tables with
// the classes for which they are primary).
for (const auto &B : RD->bases()) {
const CXXRecordDecl *BaseDecl = B.getType()->getAsCXXRecordDecl();
// Check if this base needs a vtable. (If it's virtual, not a primary base
// of some other class, and we haven't visited it before).
if (B.isVirtual() && BaseDecl->isDynamicClass() &&
!PrimaryVirtualBases.count(BaseDecl) &&
VBases.insert(BaseDecl).second) {
const ASTRecordLayout &MostDerivedClassLayout =
Context.getASTRecordLayout(MostDerivedClass);
CharUnits BaseOffset =
MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
const ASTRecordLayout &LayoutClassLayout =
Context.getASTRecordLayout(LayoutClass);
CharUnits BaseOffsetInLayoutClass =
LayoutClassLayout.getVBaseClassOffset(BaseDecl);
LayoutPrimaryAndSecondaryVTables(
BaseSubobject(BaseDecl, BaseOffset),
/*BaseIsMorallyVirtual=*/true,
/*BaseIsVirtualInLayoutClass=*/true,
BaseOffsetInLayoutClass);
}
// We only need to check the base for virtual base vtables if it actually
// has virtual bases.
if (BaseDecl->getNumVBases())
LayoutVTablesForVirtualBases(BaseDecl, VBases);
}
}
/// dumpLayout - Dump the vtable layout.
void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
// FIXME: write more tests that actually use the dumpLayout output to prevent
// ItaniumVTableBuilder regressions.
if (isBuildingConstructorVTable()) {
Out << "Construction vtable for ('";
MostDerivedClass->printQualifiedName(Out);
Out << "', ";
Out << MostDerivedClassOffset.getQuantity() << ") in '";
LayoutClass->printQualifiedName(Out);
} else {
Out << "Vtable for '";
MostDerivedClass->printQualifiedName(Out);
}
Out << "' (" << Components.size() << " entries).\n";
// Iterate through the address points and insert them into a new map where
// they are keyed by the index and not the base object.
// Since an address point can be shared by multiple subobjects, we use an
// STL multimap.
std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
E = AddressPoints.end(); I != E; ++I) {
const BaseSubobject& Base = I->first;
uint64_t Index = I->second;
AddressPointsByIndex.insert(std::make_pair(Index, Base));
}
for (unsigned I = 0, E = Components.size(); I != E; ++I) {
uint64_t Index = I;
Out << llvm::format("%4d | ", I);
const VTableComponent &Component = Components[I];
// Dump the component.
switch (Component.getKind()) {
case VTableComponent::CK_VCallOffset:
Out << "vcall_offset ("
<< Component.getVCallOffset().getQuantity()
<< ")";
break;
case VTableComponent::CK_VBaseOffset:
Out << "vbase_offset ("
<< Component.getVBaseOffset().getQuantity()
<< ")";
break;
case VTableComponent::CK_OffsetToTop:
Out << "offset_to_top ("
<< Component.getOffsetToTop().getQuantity()
<< ")";
break;
case VTableComponent::CK_RTTI:
Component.getRTTIDecl()->printQualifiedName(Out);
Out << " RTTI";
break;
case VTableComponent::CK_FunctionPointer: {
const CXXMethodDecl *MD = Component.getFunctionDecl();
std::string Str =
PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
MD);
Out << Str;
if (MD->isPure())
Out << " [pure]";
if (MD->isDeleted())
Out << " [deleted]";
ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty()) {
// If this function pointer has a return adjustment, dump it.
if (!Thunk.Return.isEmpty()) {
Out << "\n [return adjustment: ";
Out << Thunk.Return.NonVirtual << " non-virtual";
if (Thunk.Return.Virtual.Itanium.VBaseOffsetOffset) {
Out << ", " << Thunk.Return.Virtual.Itanium.VBaseOffsetOffset;
Out << " vbase offset offset";
}
Out << ']';
}
// If this function pointer has a 'this' pointer adjustment, dump it.
if (!Thunk.This.isEmpty()) {
Out << "\n [this adjustment: ";
Out << Thunk.This.NonVirtual << " non-virtual";
if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) {
Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset;
Out << " vcall offset offset";
}
Out << ']';
}
}
break;
}
case VTableComponent::CK_CompleteDtorPointer:
case VTableComponent::CK_DeletingDtorPointer: {
bool IsComplete =
Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
const CXXDestructorDecl *DD = Component.getDestructorDecl();
DD->printQualifiedName(Out);
if (IsComplete)
Out << "() [complete]";
else
Out << "() [deleting]";
if (DD->isPure())
Out << " [pure]";
ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty()) {
// If this destructor has a 'this' pointer adjustment, dump it.
if (!Thunk.This.isEmpty()) {
Out << "\n [this adjustment: ";
Out << Thunk.This.NonVirtual << " non-virtual";
if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) {
Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset;
Out << " vcall offset offset";
}
Out << ']';
}
}
break;
}
case VTableComponent::CK_UnusedFunctionPointer: {
const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
std::string Str =
PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
MD);
Out << "[unused] " << Str;
if (MD->isPure())
Out << " [pure]";
}
}
Out << '\n';
// Dump the next address point.
uint64_t NextIndex = Index + 1;
if (AddressPointsByIndex.count(NextIndex)) {
if (AddressPointsByIndex.count(NextIndex) == 1) {
const BaseSubobject &Base =
AddressPointsByIndex.find(NextIndex)->second;
Out << " -- (";
Base.getBase()->printQualifiedName(Out);
Out << ", " << Base.getBaseOffset().getQuantity();
Out << ") vtable address --\n";
} else {
CharUnits BaseOffset =
AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
// We store the class names in a set to get a stable order.
std::set<std::string> ClassNames;
for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
AddressPointsByIndex.lower_bound(NextIndex), E =
AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
assert(I->second.getBaseOffset() == BaseOffset &&
"Invalid base offset!");
const CXXRecordDecl *RD = I->second.getBase();
ClassNames.insert(RD->getQualifiedNameAsString());
}
for (std::set<std::string>::const_iterator I = ClassNames.begin(),
E = ClassNames.end(); I != E; ++I) {
Out << " -- (" << *I;
Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n";
}
}
}
}
Out << '\n';
if (isBuildingConstructorVTable())
return;
if (MostDerivedClass->getNumVBases()) {
// We store the virtual base class names and their offsets in a map to get
// a stable order.
std::map<std::string, CharUnits> ClassNamesAndOffsets;
for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
E = VBaseOffsetOffsets.end(); I != E; ++I) {
std::string ClassName = I->first->getQualifiedNameAsString();
CharUnits OffsetOffset = I->second;
ClassNamesAndOffsets.insert(
std::make_pair(ClassName, OffsetOffset));
}
Out << "Virtual base offset offsets for '";
MostDerivedClass->printQualifiedName(Out);
Out << "' (";
Out << ClassNamesAndOffsets.size();
Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
for (std::map<std::string, CharUnits>::const_iterator I =
ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
I != E; ++I)
Out << " " << I->first << " | " << I->second.getQuantity() << '\n';
Out << "\n";
}
if (!Thunks.empty()) {
// We store the method names in a map to get a stable order.
std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
I != E; ++I) {
const CXXMethodDecl *MD = I->first;
std::string MethodName =
PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
MD);
MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
}
for (std::map<std::string, const CXXMethodDecl *>::const_iterator I =
MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end();
I != E; ++I) {
const std::string &MethodName = I->first;
const CXXMethodDecl *MD = I->second;
ThunkInfoVectorTy ThunksVector = Thunks[MD];
std::sort(ThunksVector.begin(), ThunksVector.end(),
[](const ThunkInfo &LHS, const ThunkInfo &RHS) {
assert(LHS.Method == nullptr && RHS.Method == nullptr);
return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
});
Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
const ThunkInfo &Thunk = ThunksVector[I];
Out << llvm::format("%4d | ", I);
// If this function pointer has a return pointer adjustment, dump it.
if (!Thunk.Return.isEmpty()) {
Out << "return adjustment: " << Thunk.Return.NonVirtual;
Out << " non-virtual";
if (Thunk.Return.Virtual.Itanium.VBaseOffsetOffset) {
Out << ", " << Thunk.Return.Virtual.Itanium.VBaseOffsetOffset;
Out << " vbase offset offset";
}
if (!Thunk.This.isEmpty())
Out << "\n ";
}
// If this function pointer has a 'this' pointer adjustment, dump it.
if (!Thunk.This.isEmpty()) {
Out << "this adjustment: ";
Out << Thunk.This.NonVirtual << " non-virtual";
if (Thunk.This.Virtual.Itanium.VCallOffsetOffset) {
Out << ", " << Thunk.This.Virtual.Itanium.VCallOffsetOffset;
Out << " vcall offset offset";
}
}
Out << '\n';
}
Out << '\n';
}
}
// Compute the vtable indices for all the member functions.
// Store them in a map keyed by the index so we'll get a sorted table.
std::map<uint64_t, std::string> IndicesMap;
for (const auto *MD : MostDerivedClass->methods()) {
// We only want virtual member functions.
if (!MD->isVirtual())
continue;
MD = MD->getCanonicalDecl();
std::string MethodName =
PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
MD);
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
GlobalDecl GD(DD, Dtor_Complete);
assert(MethodVTableIndices.count(GD));
uint64_t VTableIndex = MethodVTableIndices[GD];
IndicesMap[VTableIndex] = MethodName + " [complete]";
IndicesMap[VTableIndex + 1] = MethodName + " [deleting]";
} else {
assert(MethodVTableIndices.count(MD));
IndicesMap[MethodVTableIndices[MD]] = MethodName;
}
}
// Print the vtable indices for all the member functions.
if (!IndicesMap.empty()) {
Out << "VTable indices for '";
MostDerivedClass->printQualifiedName(Out);
Out << "' (" << IndicesMap.size() << " entries).\n";
for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
E = IndicesMap.end(); I != E; ++I) {
uint64_t VTableIndex = I->first;
const std::string &MethodName = I->second;
Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName
<< '\n';
}
}
Out << '\n';
}
}
VTableLayout::VTableLayout(uint64_t NumVTableComponents,
const VTableComponent *VTableComponents,
uint64_t NumVTableThunks,
const VTableThunkTy *VTableThunks,
const AddressPointsMapTy &AddressPoints,
bool IsMicrosoftABI)
: NumVTableComponents(NumVTableComponents),
VTableComponents(new VTableComponent[NumVTableComponents]),
NumVTableThunks(NumVTableThunks),
VTableThunks(new VTableThunkTy[NumVTableThunks]),
AddressPoints(AddressPoints),
IsMicrosoftABI(IsMicrosoftABI) {
std::copy(VTableComponents, VTableComponents+NumVTableComponents,
this->VTableComponents.get());
std::copy(VTableThunks, VTableThunks+NumVTableThunks,
this->VTableThunks.get());
std::sort(this->VTableThunks.get(),
this->VTableThunks.get() + NumVTableThunks,
[](const VTableLayout::VTableThunkTy &LHS,
const VTableLayout::VTableThunkTy &RHS) {
assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
"Different thunks should have unique indices!");
return LHS.first < RHS.first;
});
}
VTableLayout::~VTableLayout() { }
ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
: VTableContextBase(/*MS=*/false) {}
ItaniumVTableContext::~ItaniumVTableContext() {
llvm::DeleteContainerSeconds(VTableLayouts);
}
uint64_t ItaniumVTableContext::getMethodVTableIndex(GlobalDecl GD) {
MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
if (I != MethodVTableIndices.end())
return I->second;
const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
computeVTableRelatedInformation(RD);
I = MethodVTableIndices.find(GD);
assert(I != MethodVTableIndices.end() && "Did not find index!");
return I->second;
}
CharUnits
ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *VBase) {
ClassPairTy ClassPair(RD, VBase);
VirtualBaseClassOffsetOffsetsMapTy::iterator I =
VirtualBaseClassOffsetOffsets.find(ClassPair);
if (I != VirtualBaseClassOffsetOffsets.end())
return I->second;
VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/nullptr,
BaseSubobject(RD, CharUnits::Zero()),
/*BaseIsVirtual=*/false,
/*OffsetInLayoutClass=*/CharUnits::Zero());
for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
Builder.getVBaseOffsetOffsets().begin(),
E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
// Insert all types.
ClassPairTy ClassPair(RD, I->first);
VirtualBaseClassOffsetOffsets.insert(
std::make_pair(ClassPair, I->second));
}
I = VirtualBaseClassOffsetOffsets.find(ClassPair);
assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
return I->second;
}
static VTableLayout *CreateVTableLayout(const ItaniumVTableBuilder &Builder) {
SmallVector<VTableLayout::VTableThunkTy, 1>
VTableThunks(Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
return new VTableLayout(Builder.getNumVTableComponents(),
Builder.vtable_component_begin(),
VTableThunks.size(),
VTableThunks.data(),
Builder.getAddressPoints(),
/*IsMicrosoftABI=*/false);
}
void
ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
const VTableLayout *&Entry = VTableLayouts[RD];
// Check if we've computed this information before.
if (Entry)
return;
ItaniumVTableBuilder Builder(*this, RD, CharUnits::Zero(),
/*MostDerivedClassIsVirtual=*/0, RD);
Entry = CreateVTableLayout(Builder);
MethodVTableIndices.insert(Builder.vtable_indices_begin(),
Builder.vtable_indices_end());
// Add the known thunks.
Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
// If we don't have the vbase information for this class, insert it.
// getVirtualBaseOffsetOffset will compute it separately without computing
// the rest of the vtable related information.
if (!RD->getNumVBases())
return;
const CXXRecordDecl *VBase =
RD->vbases_begin()->getType()->getAsCXXRecordDecl();
if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
return;
for (ItaniumVTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator
I = Builder.getVBaseOffsetOffsets().begin(),
E = Builder.getVBaseOffsetOffsets().end();
I != E; ++I) {
// Insert all types.
ClassPairTy ClassPair(RD, I->first);
VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
}
}
VTableLayout *ItaniumVTableContext::createConstructionVTableLayout(
const CXXRecordDecl *MostDerivedClass, CharUnits MostDerivedClassOffset,
bool MostDerivedClassIsVirtual, const CXXRecordDecl *LayoutClass) {
ItaniumVTableBuilder Builder(*this, MostDerivedClass, MostDerivedClassOffset,
MostDerivedClassIsVirtual, LayoutClass);
return CreateVTableLayout(Builder);
}
namespace {
// Vtables in the Microsoft ABI are different from the Itanium ABI.
//
// The main differences are:
// 1. Separate vftable and vbtable.
//
// 2. Each subobject with a vfptr gets its own vftable rather than an address
// point in a single vtable shared between all the subobjects.
// Each vftable is represented by a separate section and virtual calls
// must be done using the vftable which has a slot for the function to be
// called.
//
// 3. Virtual method definitions expect their 'this' parameter to point to the
// first vfptr whose table provides a compatible overridden method. In many
// cases, this permits the original vf-table entry to directly call
// the method instead of passing through a thunk.
// See example before VFTableBuilder::ComputeThisOffset below.
//
// A compatible overridden method is one which does not have a non-trivial
// covariant-return adjustment.
//
// The first vfptr is the one with the lowest offset in the complete-object
// layout of the defining class, and the method definition will subtract
// that constant offset from the parameter value to get the real 'this'
// value. Therefore, if the offset isn't really constant (e.g. if a virtual
// function defined in a virtual base is overridden in a more derived
// virtual base and these bases have a reverse order in the complete
// object), the vf-table may require a this-adjustment thunk.
//
// 4. vftables do not contain new entries for overrides that merely require
// this-adjustment. Together with #3, this keeps vf-tables smaller and
// eliminates the need for this-adjustment thunks in many cases, at the cost
// of often requiring redundant work to adjust the "this" pointer.
//
// 5. Instead of VTT and constructor vtables, vbtables and vtordisps are used.
// Vtordisps are emitted into the class layout if a class has
// a) a user-defined ctor/dtor
// and
// b) a method overriding a method in a virtual base.
//
// To get a better understanding of this code,
// you might want to see examples in test/CodeGenCXX/microsoft-abi-vtables-*.cpp
class VFTableBuilder {
public:
typedef MicrosoftVTableContext::MethodVFTableLocation MethodVFTableLocation;
typedef llvm::DenseMap<GlobalDecl, MethodVFTableLocation>
MethodVFTableLocationsTy;
typedef llvm::iterator_range<MethodVFTableLocationsTy::const_iterator>
method_locations_range;
private:
/// VTables - Global vtable information.
MicrosoftVTableContext &VTables;
/// Context - The ASTContext which we will use for layout information.
ASTContext &Context;
/// MostDerivedClass - The most derived class for which we're building this
/// vtable.
const CXXRecordDecl *MostDerivedClass;
const ASTRecordLayout &MostDerivedClassLayout;
const VPtrInfo &WhichVFPtr;
/// FinalOverriders - The final overriders of the most derived class.
const FinalOverriders Overriders;
/// Components - The components of the vftable being built.
SmallVector<VTableComponent, 64> Components;
MethodVFTableLocationsTy MethodVFTableLocations;
/// \brief Does this class have an RTTI component?
bool HasRTTIComponent;
/// MethodInfo - Contains information about a method in a vtable.
/// (Used for computing 'this' pointer adjustment thunks.
struct MethodInfo {
/// VBTableIndex - The nonzero index in the vbtable that
/// this method's base has, or zero.
const uint64_t VBTableIndex;
/// VFTableIndex - The index in the vftable that this method has.
const uint64_t VFTableIndex;
/// Shadowed - Indicates if this vftable slot is shadowed by
/// a slot for a covariant-return override. If so, it shouldn't be printed
/// or used for vcalls in the most derived class.
bool Shadowed;
/// UsesExtraSlot - Indicates if this vftable slot was created because
/// any of the overridden slots required a return adjusting thunk.
bool UsesExtraSlot;
MethodInfo(uint64_t VBTableIndex, uint64_t VFTableIndex,
bool UsesExtraSlot = false)
: VBTableIndex(VBTableIndex), VFTableIndex(VFTableIndex),
Shadowed(false), UsesExtraSlot(UsesExtraSlot) {}
MethodInfo()
: VBTableIndex(0), VFTableIndex(0), Shadowed(false),
UsesExtraSlot(false) {}
};
typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
/// MethodInfoMap - The information for all methods in the vftable we're
/// currently building.
MethodInfoMapTy MethodInfoMap;
typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
/// VTableThunks - The thunks by vftable index in the vftable currently being
/// built.
VTableThunksMapTy VTableThunks;
typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
/// Thunks - A map that contains all the thunks needed for all methods in the
/// most derived class for which the vftable is currently being built.
ThunksMapTy Thunks;
/// AddThunk - Add a thunk for the given method.
void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
// Check if we have this thunk already.
if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
ThunksVector.end())
return;
ThunksVector.push_back(Thunk);
}
/// ComputeThisOffset - Returns the 'this' argument offset for the given
/// method, relative to the beginning of the MostDerivedClass.
CharUnits ComputeThisOffset(FinalOverriders::OverriderInfo Overrider);
void CalculateVtordispAdjustment(FinalOverriders::OverriderInfo Overrider,
CharUnits ThisOffset, ThisAdjustment &TA);
/// AddMethod - Add a single virtual member function to the vftable
/// components vector.
void AddMethod(const CXXMethodDecl *MD, ThunkInfo TI) {
if (!TI.isEmpty()) {
VTableThunks[Components.size()] = TI;
AddThunk(MD, TI);
}
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
assert(TI.Return.isEmpty() &&
"Destructor can't have return adjustment!");
Components.push_back(VTableComponent::MakeDeletingDtor(DD));
} else {
Components.push_back(VTableComponent::MakeFunction(MD));
}
}
/// AddMethods - Add the methods of this base subobject and the relevant
/// subbases to the vftable we're currently laying out.
void AddMethods(BaseSubobject Base, unsigned BaseDepth,
const CXXRecordDecl *LastVBase,
BasesSetVectorTy &VisitedBases);
void LayoutVFTable() {
// RTTI data goes before all other entries.
if (HasRTTIComponent)
Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
BasesSetVectorTy VisitedBases;
AddMethods(BaseSubobject(MostDerivedClass, CharUnits::Zero()), 0, nullptr,
VisitedBases);
assert((HasRTTIComponent ? Components.size() - 1 : Components.size()) &&
"vftable can't be empty");
assert(MethodVFTableLocations.empty());
for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
E = MethodInfoMap.end(); I != E; ++I) {
const CXXMethodDecl *MD = I->first;
const MethodInfo &MI = I->second;
// Skip the methods that the MostDerivedClass didn't override
// and the entries shadowed by return adjusting thunks.
if (MD->getParent() != MostDerivedClass || MI.Shadowed)
continue;
MethodVFTableLocation Loc(MI.VBTableIndex, WhichVFPtr.getVBaseWithVPtr(),
WhichVFPtr.NonVirtualOffset, MI.VFTableIndex);
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
MethodVFTableLocations[GlobalDecl(DD, Dtor_Deleting)] = Loc;
} else {
MethodVFTableLocations[MD] = Loc;
}
}
}
public:
VFTableBuilder(MicrosoftVTableContext &VTables,
const CXXRecordDecl *MostDerivedClass, const VPtrInfo *Which)
: VTables(VTables),
Context(MostDerivedClass->getASTContext()),
MostDerivedClass(MostDerivedClass),
MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)),
WhichVFPtr(*Which),
Overriders(MostDerivedClass, CharUnits(), MostDerivedClass) {
// Only include the RTTI component if we know that we will provide a
// definition of the vftable.
HasRTTIComponent = Context.getLangOpts().RTTIData &&
!MostDerivedClass->hasAttr<DLLImportAttr>() &&
MostDerivedClass->getTemplateSpecializationKind() !=
TSK_ExplicitInstantiationDeclaration;
LayoutVFTable();
if (Context.getLangOpts().DumpVTableLayouts)
dumpLayout(llvm::outs());
}
uint64_t getNumThunks() const { return Thunks.size(); }
ThunksMapTy::const_iterator thunks_begin() const { return Thunks.begin(); }
ThunksMapTy::const_iterator thunks_end() const { return Thunks.end(); }
method_locations_range vtable_locations() const {
return method_locations_range(MethodVFTableLocations.begin(),
MethodVFTableLocations.end());
}
uint64_t getNumVTableComponents() const { return Components.size(); }
const VTableComponent *vtable_component_begin() const {
return Components.begin();
}
const VTableComponent *vtable_component_end() const {
return Components.end();
}
VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
return VTableThunks.begin();
}
VTableThunksMapTy::const_iterator vtable_thunks_end() const {
return VTableThunks.end();
}
void dumpLayout(raw_ostream &);
};
/// InitialOverriddenDefinitionCollector - Finds the set of least derived bases
/// that define the given method.
struct InitialOverriddenDefinitionCollector {
BasesSetVectorTy Bases;
OverriddenMethodsSetTy VisitedOverriddenMethods;
bool visit(const CXXMethodDecl *OverriddenMD) {
if (OverriddenMD->size_overridden_methods() == 0)
Bases.insert(OverriddenMD->getParent());
// Don't recurse on this method if we've already collected it.
return VisitedOverriddenMethods.insert(OverriddenMD).second;
}
};
} // end namespace
static bool BaseInSet(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path, void *BasesSet) {
BasesSetVectorTy *Bases = (BasesSetVectorTy *)BasesSet;
return Bases->count(Specifier->getType()->getAsCXXRecordDecl());
}
// Let's study one class hierarchy as an example:
// struct A {
// virtual void f();
// int x;
// };
//
// struct B : virtual A {
// virtual void f();
// };
//
// Record layouts:
// struct A:
// 0 | (A vftable pointer)
// 4 | int x
//
// struct B:
// 0 | (B vbtable pointer)
// 4 | struct A (virtual base)
// 4 | (A vftable pointer)
// 8 | int x
//
// Let's assume we have a pointer to the A part of an object of dynamic type B:
// B b;
// A *a = (A*)&b;
// a->f();
//
// In this hierarchy, f() belongs to the vftable of A, so B::f() expects
// "this" parameter to point at the A subobject, which is B+4.
// In the B::f() prologue, it adjusts "this" back to B by subtracting 4,
// performed as a *static* adjustment.
//
// Interesting thing happens when we alter the relative placement of A and B
// subobjects in a class:
// struct C : virtual B { };
//
// C c;
// A *a = (A*)&c;
// a->f();
//
// Respective record layout is:
// 0 | (C vbtable pointer)
// 4 | struct A (virtual base)
// 4 | (A vftable pointer)
// 8 | int x
// 12 | struct B (virtual base)
// 12 | (B vbtable pointer)
//
// The final overrider of f() in class C is still B::f(), so B+4 should be
// passed as "this" to that code. However, "a" points at B-8, so the respective
// vftable entry should hold a thunk that adds 12 to the "this" argument before
// performing a tail call to B::f().
//
// With this example in mind, we can now calculate the 'this' argument offset
// for the given method, relative to the beginning of the MostDerivedClass.
CharUnits
VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) {
InitialOverriddenDefinitionCollector Collector;
visitAllOverriddenMethods(Overrider.Method, Collector);
// If there are no overrides then 'this' is located
// in the base that defines the method.
if (Collector.Bases.size() == 0)
return Overrider.Offset;
CXXBasePaths Paths;
Overrider.Method->getParent()->lookupInBases(BaseInSet, &Collector.Bases,
Paths);
// This will hold the smallest this offset among overridees of MD.
// This implies that an offset of a non-virtual base will dominate an offset
// of a virtual base to potentially reduce the number of thunks required
// in the derived classes that inherit this method.
CharUnits Ret;
bool First = true;
const ASTRecordLayout &OverriderRDLayout =
Context.getASTRecordLayout(Overrider.Method->getParent());
for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
I != E; ++I) {
const CXXBasePath &Path = (*I);
CharUnits ThisOffset = Overrider.Offset;
CharUnits LastVBaseOffset;
// For each path from the overrider to the parents of the overridden
// methods, traverse the path, calculating the this offset in the most
// derived class.
for (int J = 0, F = Path.size(); J != F; ++J) {
const CXXBasePathElement &Element = Path[J];
QualType CurTy = Element.Base->getType();
const CXXRecordDecl *PrevRD = Element.Class,
*CurRD = CurTy->getAsCXXRecordDecl();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(PrevRD);
if (Element.Base->isVirtual()) {
// The interesting things begin when you have virtual inheritance.
// The final overrider will use a static adjustment equal to the offset
// of the vbase in the final overrider class.
// For example, if the final overrider is in a vbase B of the most
// derived class and it overrides a method of the B's own vbase A,
// it uses A* as "this". In its prologue, it can cast A* to B* with
// a static offset. This offset is used regardless of the actual
// offset of A from B in the most derived class, requiring an
// this-adjusting thunk in the vftable if A and B are laid out
// differently in the most derived class.
LastVBaseOffset = ThisOffset =
Overrider.Offset + OverriderRDLayout.getVBaseClassOffset(CurRD);
} else {
ThisOffset += Layout.getBaseClassOffset(CurRD);
}
}
if (isa<CXXDestructorDecl>(Overrider.Method)) {
if (LastVBaseOffset.isZero()) {
// If a "Base" class has at least one non-virtual base with a virtual
// destructor, the "Base" virtual destructor will take the address
// of the "Base" subobject as the "this" argument.
ThisOffset = Overrider.Offset;
} else {
// A virtual destructor of a virtual base takes the address of the
// virtual base subobject as the "this" argument.
ThisOffset = LastVBaseOffset;
}
}
if (Ret > ThisOffset || First) {
First = false;
Ret = ThisOffset;
}
}
assert(!First && "Method not found in the given subobject?");
return Ret;
}
// Things are getting even more complex when the "this" adjustment has to
// use a dynamic offset instead of a static one, or even two dynamic offsets.
// This is sometimes required when a virtual call happens in the middle of
// a non-most-derived class construction or destruction.
//
// Let's take a look at the following example:
// struct A {
// virtual void f();
// };
//
// void foo(A *a) { a->f(); } // Knows nothing about siblings of A.
//
// struct B : virtual A {
// virtual void f();
// B() {
// foo(this);
// }
// };
//
// struct C : virtual B {
// virtual void f();
// };
//
// Record layouts for these classes are:
// struct A
// 0 | (A vftable pointer)
//
// struct B
// 0 | (B vbtable pointer)
// 4 | (vtordisp for vbase A)
// 8 | struct A (virtual base)
// 8 | (A vftable pointer)
//
// struct C
// 0 | (C vbtable pointer)
// 4 | (vtordisp for vbase A)
// 8 | struct A (virtual base) // A precedes B!
// 8 | (A vftable pointer)
// 12 | struct B (virtual base)
// 12 | (B vbtable pointer)
//
// When one creates an object of type C, the C constructor:
// - initializes all the vbptrs, then
// - calls the A subobject constructor
// (initializes A's vfptr with an address of A vftable), then
// - calls the B subobject constructor
// (initializes A's vfptr with an address of B vftable and vtordisp for A),
// that in turn calls foo(), then
// - initializes A's vfptr with an address of C vftable and zeroes out the
// vtordisp
// FIXME: if a structor knows it belongs to MDC, why doesn't it use a vftable
// without vtordisp thunks?
// FIXME: how are vtordisp handled in the presence of nooverride/final?
//
// When foo() is called, an object with a layout of class C has a vftable
// referencing B::f() that assumes a B layout, so the "this" adjustments are
// incorrect, unless an extra adjustment is done. This adjustment is called
// "vtordisp adjustment". Vtordisp basically holds the difference between the
// actual location of a vbase in the layout class and the location assumed by
// the vftable of the class being constructed/destructed. Vtordisp is only
// needed if "this" escapes a
// structor (or we can't prove otherwise).
// [i.e. vtordisp is a dynamic adjustment for a static adjustment, which is an
// estimation of a dynamic adjustment]
//
// foo() gets a pointer to the A vbase and doesn't know anything about B or C,
// so it just passes that pointer as "this" in a virtual call.
// If there was no vtordisp, that would just dispatch to B::f().
// However, B::f() assumes B+8 is passed as "this",
// yet the pointer foo() passes along is B-4 (i.e. C+8).
// An extra adjustment is needed, so we emit a thunk into the B vftable.
// This vtordisp thunk subtracts the value of vtordisp
// from the "this" argument (-12) before making a tailcall to B::f().
//
// Let's consider an even more complex example:
// struct D : virtual B, virtual C {
// D() {
// foo(this);
// }
// };
//
// struct D
// 0 | (D vbtable pointer)
// 4 | (vtordisp for vbase A)
// 8 | struct A (virtual base) // A precedes both B and C!
// 8 | (A vftable pointer)
// 12 | struct B (virtual base) // B precedes C!
// 12 | (B vbtable pointer)
// 16 | struct C (virtual base)
// 16 | (C vbtable pointer)
//
// When D::D() calls foo(), we find ourselves in a thunk that should tailcall
// to C::f(), which assumes C+8 as its "this" parameter. This time, foo()
// passes along A, which is C-8. The A vtordisp holds
// "D.vbptr[index_of_A] - offset_of_A_in_D"
// and we statically know offset_of_A_in_D, so can get a pointer to D.
// When we know it, we can make an extra vbtable lookup to locate the C vbase
// and one extra static adjustment to calculate the expected value of C+8.
void VFTableBuilder::CalculateVtordispAdjustment(
FinalOverriders::OverriderInfo Overrider, CharUnits ThisOffset,
ThisAdjustment &TA) {
const ASTRecordLayout::VBaseOffsetsMapTy &VBaseMap =
MostDerivedClassLayout.getVBaseOffsetsMap();
const ASTRecordLayout::VBaseOffsetsMapTy::const_iterator &VBaseMapEntry =
VBaseMap.find(WhichVFPtr.getVBaseWithVPtr());
assert(VBaseMapEntry != VBaseMap.end());
// If there's no vtordisp or the final overrider is defined in the same vbase
// as the initial declaration, we don't need any vtordisp adjustment.
if (!VBaseMapEntry->second.hasVtorDisp() ||
Overrider.VirtualBase == WhichVFPtr.getVBaseWithVPtr())
return;
// OK, now we know we need to use a vtordisp thunk.
// The implicit vtordisp field is located right before the vbase.
CharUnits OffsetOfVBaseWithVFPtr = VBaseMapEntry->second.VBaseOffset;
TA.Virtual.Microsoft.VtordispOffset =
(OffsetOfVBaseWithVFPtr - WhichVFPtr.FullOffsetInMDC).getQuantity() - 4;
// A simple vtordisp thunk will suffice if the final overrider is defined
// in either the most derived class or its non-virtual base.
if (Overrider.Method->getParent() == MostDerivedClass ||
!Overrider.VirtualBase)
return;
// Otherwise, we need to do use the dynamic offset of the final overrider
// in order to get "this" adjustment right.
TA.Virtual.Microsoft.VBPtrOffset =
(OffsetOfVBaseWithVFPtr + WhichVFPtr.NonVirtualOffset -
MostDerivedClassLayout.getVBPtrOffset()).getQuantity();
TA.Virtual.Microsoft.VBOffsetOffset =
Context.getTypeSizeInChars(Context.IntTy).getQuantity() *
VTables.getVBTableIndex(MostDerivedClass, Overrider.VirtualBase);
TA.NonVirtual = (ThisOffset - Overrider.Offset).getQuantity();
}
static void GroupNewVirtualOverloads(
const CXXRecordDecl *RD,
SmallVector<const CXXMethodDecl *, 10> &VirtualMethods) {
// Put the virtual methods into VirtualMethods in the proper order:
// 1) Group overloads by declaration name. New groups are added to the
// vftable in the order of their first declarations in this class
// (including overrides and non-virtual methods).
// 2) In each group, new overloads appear in the reverse order of declaration.
typedef SmallVector<const CXXMethodDecl *, 1> MethodGroup;
SmallVector<MethodGroup, 10> Groups;
typedef llvm::DenseMap<DeclarationName, unsigned> VisitedGroupIndicesTy;
VisitedGroupIndicesTy VisitedGroupIndices;
for (const auto *MD : RD->methods()) {
MD = MD->getCanonicalDecl();
VisitedGroupIndicesTy::iterator J;
bool Inserted;
std::tie(J, Inserted) = VisitedGroupIndices.insert(
std::make_pair(MD->getDeclName(), Groups.size()));
if (Inserted)
Groups.push_back(MethodGroup());
if (MD->isVirtual())
Groups[J->second].push_back(MD);
}
for (unsigned I = 0, E = Groups.size(); I != E; ++I)
VirtualMethods.append(Groups[I].rbegin(), Groups[I].rend());
}
static bool isDirectVBase(const CXXRecordDecl *Base, const CXXRecordDecl *RD) {
for (const auto &B : RD->bases()) {
if (B.isVirtual() && B.getType()->getAsCXXRecordDecl() == Base)
return true;
}
return false;
}
void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
const CXXRecordDecl *LastVBase,
BasesSetVectorTy &VisitedBases) {
const CXXRecordDecl *RD = Base.getBase();
if (!RD->isPolymorphic())
return;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// See if this class expands a vftable of the base we look at, which is either
// the one defined by the vfptr base path or the primary base of the current
// class.
const CXXRecordDecl *NextBase = nullptr, *NextLastVBase = LastVBase;
CharUnits NextBaseOffset;
if (BaseDepth < WhichVFPtr.PathToBaseWithVPtr.size()) {
NextBase = WhichVFPtr.PathToBaseWithVPtr[BaseDepth];
if (isDirectVBase(NextBase, RD)) {
NextLastVBase = NextBase;
NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(NextBase);
} else {
NextBaseOffset =
Base.getBaseOffset() + Layout.getBaseClassOffset(NextBase);
}
} else if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
assert(!Layout.isPrimaryBaseVirtual() &&
"No primary virtual bases in this ABI");
NextBase = PrimaryBase;
NextBaseOffset = Base.getBaseOffset();
}
if (NextBase) {
AddMethods(BaseSubobject(NextBase, NextBaseOffset), BaseDepth + 1,
NextLastVBase, VisitedBases);
if (!VisitedBases.insert(NextBase))
llvm_unreachable("Found a duplicate primary base!");
}
SmallVector<const CXXMethodDecl*, 10> VirtualMethods;
// Put virtual methods in the proper order.
GroupNewVirtualOverloads(RD, VirtualMethods);
// Now go through all virtual member functions and add them to the current
// vftable. This is done by
// - replacing overridden methods in their existing slots, as long as they
// don't require return adjustment; calculating This adjustment if needed.
// - adding new slots for methods of the current base not present in any
// sub-bases;
// - adding new slots for methods that require Return adjustment.
// We keep track of the methods visited in the sub-bases in MethodInfoMap.
for (unsigned I = 0, E = VirtualMethods.size(); I != E; ++I) {
const CXXMethodDecl *MD = VirtualMethods[I];
FinalOverriders::OverriderInfo FinalOverrider =
Overriders.getOverrider(MD, Base.getBaseOffset());
const CXXMethodDecl *FinalOverriderMD = FinalOverrider.Method;
const CXXMethodDecl *OverriddenMD =
FindNearestOverriddenMethod(MD, VisitedBases);
ThisAdjustment ThisAdjustmentOffset;
bool ReturnAdjustingThunk = false, ForceReturnAdjustmentMangling = false;
CharUnits ThisOffset = ComputeThisOffset(FinalOverrider);
ThisAdjustmentOffset.NonVirtual =
(ThisOffset - WhichVFPtr.FullOffsetInMDC).getQuantity();
if ((OverriddenMD || FinalOverriderMD != MD) &&
WhichVFPtr.getVBaseWithVPtr())
CalculateVtordispAdjustment(FinalOverrider, ThisOffset,
ThisAdjustmentOffset);
if (OverriddenMD) {
// If MD overrides anything in this vftable, we need to update the
// entries.
MethodInfoMapTy::iterator OverriddenMDIterator =
MethodInfoMap.find(OverriddenMD);
// If the overridden method went to a different vftable, skip it.
if (OverriddenMDIterator == MethodInfoMap.end())
continue;
MethodInfo &OverriddenMethodInfo = OverriddenMDIterator->second;
// Let's check if the overrider requires any return adjustments.
// We must create a new slot if the MD's return type is not trivially
// convertible to the OverriddenMD's one.
// Once a chain of method overrides adds a return adjusting vftable slot,
// all subsequent overrides will also use an extra method slot.
ReturnAdjustingThunk = !ComputeReturnAdjustmentBaseOffset(
Context, MD, OverriddenMD).isEmpty() ||
OverriddenMethodInfo.UsesExtraSlot;
if (!ReturnAdjustingThunk) {
// No return adjustment needed - just replace the overridden method info
// with the current info.
MethodInfo MI(OverriddenMethodInfo.VBTableIndex,
OverriddenMethodInfo.VFTableIndex);
MethodInfoMap.erase(OverriddenMDIterator);
assert(!MethodInfoMap.count(MD) &&
"Should not have method info for this method yet!");
MethodInfoMap.insert(std::make_pair(MD, MI));
continue;
}
// In case we need a return adjustment, we'll add a new slot for
// the overrider. Mark the overriden method as shadowed by the new slot.
OverriddenMethodInfo.Shadowed = true;
// Force a special name mangling for a return-adjusting thunk
// unless the method is the final overrider without this adjustment.
ForceReturnAdjustmentMangling =
!(MD == FinalOverriderMD && ThisAdjustmentOffset.isEmpty());
} else if (Base.getBaseOffset() != WhichVFPtr.FullOffsetInMDC ||
MD->size_overridden_methods()) {
// Skip methods that don't belong to the vftable of the current class,
// e.g. each method that wasn't seen in any of the visited sub-bases
// but overrides multiple methods of other sub-bases.
continue;
}
// If we got here, MD is a method not seen in any of the sub-bases or
// it requires return adjustment. Insert the method info for this method.
unsigned VBIndex =
LastVBase ? VTables.getVBTableIndex(MostDerivedClass, LastVBase) : 0;
MethodInfo MI(VBIndex,
HasRTTIComponent ? Components.size() - 1 : Components.size(),
ReturnAdjustingThunk);
assert(!MethodInfoMap.count(MD) &&
"Should not have method info for this method yet!");
MethodInfoMap.insert(std::make_pair(MD, MI));
// Check if this overrider needs a return adjustment.
// We don't want to do this for pure virtual member functions.
BaseOffset ReturnAdjustmentOffset;
ReturnAdjustment ReturnAdjustment;
if (!FinalOverriderMD->isPure()) {
ReturnAdjustmentOffset =
ComputeReturnAdjustmentBaseOffset(Context, FinalOverriderMD, MD);
}
if (!ReturnAdjustmentOffset.isEmpty()) {
ForceReturnAdjustmentMangling = true;
ReturnAdjustment.NonVirtual =
ReturnAdjustmentOffset.NonVirtualOffset.getQuantity();
if (ReturnAdjustmentOffset.VirtualBase) {
const ASTRecordLayout &DerivedLayout =
Context.getASTRecordLayout(ReturnAdjustmentOffset.DerivedClass);
ReturnAdjustment.Virtual.Microsoft.VBPtrOffset =
DerivedLayout.getVBPtrOffset().getQuantity();
ReturnAdjustment.Virtual.Microsoft.VBIndex =
VTables.getVBTableIndex(ReturnAdjustmentOffset.DerivedClass,
ReturnAdjustmentOffset.VirtualBase);
}
}
AddMethod(FinalOverriderMD,
ThunkInfo(ThisAdjustmentOffset, ReturnAdjustment,
ForceReturnAdjustmentMangling ? MD : nullptr));
}
}
static void PrintBasePath(const VPtrInfo::BasePath &Path, raw_ostream &Out) {
for (VPtrInfo::BasePath::const_reverse_iterator I = Path.rbegin(),
E = Path.rend(); I != E; ++I) {
Out << "'";
(*I)->printQualifiedName(Out);
Out << "' in ";
}
}
static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out,
bool ContinueFirstLine) {
const ReturnAdjustment &R = TI.Return;
bool Multiline = false;
const char *LinePrefix = "\n ";
if (!R.isEmpty() || TI.Method) {
if (!ContinueFirstLine)
Out << LinePrefix;
Out << "[return adjustment (to type '"
<< TI.Method->getReturnType().getCanonicalType().getAsString()
<< "'): ";
if (R.Virtual.Microsoft.VBPtrOffset)
Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", ";
if (R.Virtual.Microsoft.VBIndex)
Out << "vbase #" << R.Virtual.Microsoft.VBIndex << ", ";
Out << R.NonVirtual << " non-virtual]";
Multiline = true;
}
const ThisAdjustment &T = TI.This;
if (!T.isEmpty()) {
if (Multiline || !ContinueFirstLine)
Out << LinePrefix;
Out << "[this adjustment: ";
if (!TI.This.Virtual.isEmpty()) {
assert(T.Virtual.Microsoft.VtordispOffset < 0);
Out << "vtordisp at " << T.Virtual.Microsoft.VtordispOffset << ", ";
if (T.Virtual.Microsoft.VBPtrOffset) {
Out << "vbptr at " << T.Virtual.Microsoft.VBPtrOffset
<< " to the left,";
assert(T.Virtual.Microsoft.VBOffsetOffset > 0);
Out << LinePrefix << " vboffset at "
<< T.Virtual.Microsoft.VBOffsetOffset << " in the vbtable, ";
}
}
Out << T.NonVirtual << " non-virtual]";
}
}
void VFTableBuilder::dumpLayout(raw_ostream &Out) {
Out << "VFTable for ";
PrintBasePath(WhichVFPtr.PathToBaseWithVPtr, Out);
Out << "'";
MostDerivedClass->printQualifiedName(Out);
Out << "' (" << Components.size()
<< (Components.size() == 1 ? " entry" : " entries") << ").\n";
for (unsigned I = 0, E = Components.size(); I != E; ++I) {
Out << llvm::format("%4d | ", I);
const VTableComponent &Component = Components[I];
// Dump the component.
switch (Component.getKind()) {
case VTableComponent::CK_RTTI:
Component.getRTTIDecl()->printQualifiedName(Out);
Out << " RTTI";
break;
case VTableComponent::CK_FunctionPointer: {
const CXXMethodDecl *MD = Component.getFunctionDecl();
// FIXME: Figure out how to print the real thunk type, since they can
// differ in the return type.
std::string Str = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
Out << Str;
if (MD->isPure())
Out << " [pure]";
if (MD->isDeleted())
Out << " [deleted]";
ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty())
dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/false);
break;
}
case VTableComponent::CK_DeletingDtorPointer: {
const CXXDestructorDecl *DD = Component.getDestructorDecl();
DD->printQualifiedName(Out);
Out << "() [scalar deleting]";
if (DD->isPure())
Out << " [pure]";
ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty()) {
assert(Thunk.Return.isEmpty() &&
"No return adjustment needed for destructors!");
dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/false);
}
break;
}
default:
DiagnosticsEngine &Diags = Context.getDiagnostics();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"Unexpected vftable component type %0 for component number %1");
Diags.Report(MostDerivedClass->getLocation(), DiagID)
<< I << Component.getKind();
}
Out << '\n';
}
Out << '\n';
if (!Thunks.empty()) {
// We store the method names in a map to get a stable order.
std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
I != E; ++I) {
const CXXMethodDecl *MD = I->first;
std::string MethodName = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
}
for (std::map<std::string, const CXXMethodDecl *>::const_iterator
I = MethodNamesAndDecls.begin(),
E = MethodNamesAndDecls.end();
I != E; ++I) {
const std::string &MethodName = I->first;
const CXXMethodDecl *MD = I->second;
ThunkInfoVectorTy ThunksVector = Thunks[MD];
std::stable_sort(ThunksVector.begin(), ThunksVector.end(),
[](const ThunkInfo &LHS, const ThunkInfo &RHS) {
// Keep different thunks with the same adjustments in the order they
// were put into the vector.
return std::tie(LHS.This, LHS.Return) < std::tie(RHS.This, RHS.Return);
});
Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
const ThunkInfo &Thunk = ThunksVector[I];
Out << llvm::format("%4d | ", I);
dumpMicrosoftThunkAdjustment(Thunk, Out, /*ContinueFirstLine=*/true);
Out << '\n';
}
Out << '\n';
}
}
Out.flush();
}
static bool setsIntersect(const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &A,
ArrayRef<const CXXRecordDecl *> B) {
for (ArrayRef<const CXXRecordDecl *>::iterator I = B.begin(), E = B.end();
I != E; ++I) {
if (A.count(*I))
return true;
}
return false;
}
static bool rebucketPaths(VPtrInfoVector &Paths);
/// Produces MSVC-compatible vbtable data. The symbols produced by this
/// algorithm match those produced by MSVC 2012 and newer, which is different
/// from MSVC 2010.
///
/// MSVC 2012 appears to minimize the vbtable names using the following
/// algorithm. First, walk the class hierarchy in the usual order, depth first,
/// left to right, to find all of the subobjects which contain a vbptr field.
/// Visiting each class node yields a list of inheritance paths to vbptrs. Each
/// record with a vbptr creates an initially empty path.
///
/// To combine paths from child nodes, the paths are compared to check for
/// ambiguity. Paths are "ambiguous" if multiple paths have the same set of
/// components in the same order. Each group of ambiguous paths is extended by
/// appending the class of the base from which it came. If the current class
/// node produced an ambiguous path, its path is extended with the current class.
/// After extending paths, MSVC again checks for ambiguity, and extends any
/// ambiguous path which wasn't already extended. Because each node yields an
/// unambiguous set of paths, MSVC doesn't need to extend any path more than once
/// to produce an unambiguous set of paths.
///
/// TODO: Presumably vftables use the same algorithm.
void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
const CXXRecordDecl *RD,
VPtrInfoVector &Paths) {
assert(Paths.empty());
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Base case: this subobject has its own vptr.
if (ForVBTables ? Layout.hasOwnVBPtr() : Layout.hasOwnVFPtr())
Paths.push_back(new VPtrInfo(RD));
// Recursive case: get all the vbtables from our bases and remove anything
// that shares a virtual base.
llvm::SmallPtrSet<const CXXRecordDecl*, 4> VBasesSeen;
for (const auto &B : RD->bases()) {
const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
if (B.isVirtual() && VBasesSeen.count(Base))
continue;
if (!Base->isDynamicClass())
continue;
const VPtrInfoVector &BasePaths =
ForVBTables ? enumerateVBTables(Base) : getVFPtrOffsets(Base);
for (VPtrInfo *BaseInfo : BasePaths) {
// Don't include the path if it goes through a virtual base that we've
// already included.
if (setsIntersect(VBasesSeen, BaseInfo->ContainingVBases))
continue;
// Copy the path and adjust it as necessary.
VPtrInfo *P = new VPtrInfo(*BaseInfo);
// We mangle Base into the path if the path would've been ambiguous and it
// wasn't already extended with Base.
if (P->MangledPath.empty() || P->MangledPath.back() != Base)
P->NextBaseToMangle = Base;
// Keep track of which vtable the derived class is going to extend with
// new methods or bases. We append to either the vftable of our primary
// base, or the first non-virtual base that has a vbtable.
if (P->ReusingBase == Base &&
Base == (ForVBTables ? Layout.getBaseSharingVBPtr()
: Layout.getPrimaryBase()))
P->ReusingBase = RD;
// Keep track of the full adjustment from the MDC to this vtable. The
// adjustment is captured by an optional vbase and a non-virtual offset.
if (B.isVirtual())
P->ContainingVBases.push_back(Base);
else if (P->ContainingVBases.empty())
P->NonVirtualOffset += Layout.getBaseClassOffset(Base);
// Update the full offset in the MDC.
P->FullOffsetInMDC = P->NonVirtualOffset;
if (const CXXRecordDecl *VB = P->getVBaseWithVPtr())
P->FullOffsetInMDC += Layout.getVBaseClassOffset(VB);
Paths.push_back(P);
}
if (B.isVirtual())
VBasesSeen.insert(Base);
// After visiting any direct base, we've transitively visited all of its
// morally virtual bases.
for (const auto &VB : Base->vbases())
VBasesSeen.insert(VB.getType()->getAsCXXRecordDecl());
}
// Sort the paths into buckets, and if any of them are ambiguous, extend all
// paths in ambiguous buckets.
bool Changed = true;
while (Changed)
Changed = rebucketPaths(Paths);
}
static bool extendPath(VPtrInfo *P) {
if (P->NextBaseToMangle) {
P->MangledPath.push_back(P->NextBaseToMangle);
P->NextBaseToMangle = nullptr;// Prevent the path from being extended twice.
return true;
}
return false;
}
static bool rebucketPaths(VPtrInfoVector &Paths) {
// What we're essentially doing here is bucketing together ambiguous paths.
// Any bucket with more than one path in it gets extended by NextBase, which
// is usually the direct base of the inherited the vbptr. This code uses a
// sorted vector to implement a multiset to form the buckets. Note that the
// ordering is based on pointers, but it doesn't change our output order. The
// current algorithm is designed to match MSVC 2012's names.
VPtrInfoVector PathsSorted(Paths);
std::sort(PathsSorted.begin(), PathsSorted.end(),
[](const VPtrInfo *LHS, const VPtrInfo *RHS) {
return LHS->MangledPath < RHS->MangledPath;
});
bool Changed = false;
for (size_t I = 0, E = PathsSorted.size(); I != E;) {
// Scan forward to find the end of the bucket.
size_t BucketStart = I;
do {
++I;
} while (I != E && PathsSorted[BucketStart]->MangledPath ==
PathsSorted[I]->MangledPath);
// If this bucket has multiple paths, extend them all.
if (I - BucketStart > 1) {
for (size_t II = BucketStart; II != I; ++II)
Changed |= extendPath(PathsSorted[II]);
assert(Changed && "no paths were extended to fix ambiguity");
}
}
return Changed;
}
MicrosoftVTableContext::~MicrosoftVTableContext() {
for (auto &P : VFPtrLocations)
llvm::DeleteContainerPointers(*P.second);
llvm::DeleteContainerSeconds(VFPtrLocations);
llvm::DeleteContainerSeconds(VFTableLayouts);
llvm::DeleteContainerSeconds(VBaseInfo);
}
namespace {
typedef llvm::SetVector<BaseSubobject, std::vector<BaseSubobject>,
llvm::DenseSet<BaseSubobject>> FullPathTy;
}
// This recursive function finds all paths from a subobject centered at
// (RD, Offset) to the subobject located at BaseWithVPtr.
static void findPathsToSubobject(ASTContext &Context,
const ASTRecordLayout &MostDerivedLayout,
const CXXRecordDecl *RD, CharUnits Offset,
BaseSubobject BaseWithVPtr,
FullPathTy &FullPath,
std::list<FullPathTy> &Paths) {
if (BaseSubobject(RD, Offset) == BaseWithVPtr) {
Paths.push_back(FullPath);
return;
}
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
for (const CXXBaseSpecifier &BS : RD->bases()) {
const CXXRecordDecl *Base = BS.getType()->getAsCXXRecordDecl();
CharUnits NewOffset = BS.isVirtual()
? MostDerivedLayout.getVBaseClassOffset(Base)
: Offset + Layout.getBaseClassOffset(Base);
FullPath.insert(BaseSubobject(Base, NewOffset));
findPathsToSubobject(Context, MostDerivedLayout, Base, NewOffset,
BaseWithVPtr, FullPath, Paths);
FullPath.pop_back();
}
}
// Return the paths which are not subsets of other paths.
static void removeRedundantPaths(std::list<FullPathTy> &FullPaths) {
FullPaths.remove_if([&](const FullPathTy &SpecificPath) {
for (const FullPathTy &OtherPath : FullPaths) {
if (&SpecificPath == &OtherPath)
continue;
if (std::all_of(SpecificPath.begin(), SpecificPath.end(),
[&](const BaseSubobject &BSO) {
return OtherPath.count(BSO) != 0;
})) {
return true;
}
}
return false;
});
}
static CharUnits getOffsetOfFullPath(ASTContext &Context,
const CXXRecordDecl *RD,
const FullPathTy &FullPath) {
const ASTRecordLayout &MostDerivedLayout =
Context.getASTRecordLayout(RD);
CharUnits Offset = CharUnits::fromQuantity(-1);
for (const BaseSubobject &BSO : FullPath) {
const CXXRecordDecl *Base = BSO.getBase();
// The first entry in the path is always the most derived record, skip it.
if (Base == RD) {
assert(Offset.getQuantity() == -1);
Offset = CharUnits::Zero();
continue;
}
assert(Offset.getQuantity() != -1);
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// While we know which base has to be traversed, we don't know if that base
// was a virtual base.
const CXXBaseSpecifier *BaseBS = std::find_if(
RD->bases_begin(), RD->bases_end(), [&](const CXXBaseSpecifier &BS) {
return BS.getType()->getAsCXXRecordDecl() == Base;
});
Offset = BaseBS->isVirtual() ? MostDerivedLayout.getVBaseClassOffset(Base)
: Offset + Layout.getBaseClassOffset(Base);
RD = Base;
}
return Offset;
}
// We want to select the path which introduces the most covariant overrides. If
// two paths introduce overrides which the other path doesn't contain, issue a
// diagnostic.
static const FullPathTy *selectBestPath(ASTContext &Context,
const CXXRecordDecl *RD, VPtrInfo *Info,
std::list<FullPathTy> &FullPaths) {
// Handle some easy cases first.
if (FullPaths.empty())
return nullptr;
if (FullPaths.size() == 1)
return &FullPaths.front();
const FullPathTy *BestPath = nullptr;
typedef std::set<const CXXMethodDecl *> OverriderSetTy;
OverriderSetTy LastOverrides;
for (const FullPathTy &SpecificPath : FullPaths) {
assert(!SpecificPath.empty());
OverriderSetTy CurrentOverrides;
const CXXRecordDecl *TopLevelRD = SpecificPath.begin()->getBase();
// Find the distance from the start of the path to the subobject with the
// VPtr.
CharUnits BaseOffset =
getOffsetOfFullPath(Context, TopLevelRD, SpecificPath);
FinalOverriders Overriders(TopLevelRD, CharUnits::Zero(), TopLevelRD);
for (const CXXMethodDecl *MD : Info->BaseWithVPtr->methods()) {
if (!MD->isVirtual())
continue;
FinalOverriders::OverriderInfo OI =
Overriders.getOverrider(MD->getCanonicalDecl(), BaseOffset);
const CXXMethodDecl *OverridingMethod = OI.Method;
// Only overriders which have a return adjustment introduce problematic
// thunks.
if (ComputeReturnAdjustmentBaseOffset(Context, OverridingMethod, MD)
.isEmpty())
continue;
// It's possible that the overrider isn't in this path. If so, skip it
// because this path didn't introduce it.
const CXXRecordDecl *OverridingParent = OverridingMethod->getParent();
if (std::none_of(SpecificPath.begin(), SpecificPath.end(),
[&](const BaseSubobject &BSO) {
return BSO.getBase() == OverridingParent;
}))
continue;
CurrentOverrides.insert(OverridingMethod);
}
OverriderSetTy NewOverrides =
llvm::set_difference(CurrentOverrides, LastOverrides);
if (NewOverrides.empty())
continue;
OverriderSetTy MissingOverrides =
llvm::set_difference(LastOverrides, CurrentOverrides);
if (MissingOverrides.empty()) {
// This path is a strict improvement over the last path, let's use it.
BestPath = &SpecificPath;
std::swap(CurrentOverrides, LastOverrides);
} else {
// This path introduces an overrider with a conflicting covariant thunk.
DiagnosticsEngine &Diags = Context.getDiagnostics();
const CXXMethodDecl *CovariantMD = *NewOverrides.begin();
const CXXMethodDecl *ConflictMD = *MissingOverrides.begin();
Diags.Report(RD->getLocation(), diag::err_vftable_ambiguous_component)
<< RD;
Diags.Report(CovariantMD->getLocation(), diag::note_covariant_thunk)
<< CovariantMD;
Diags.Report(ConflictMD->getLocation(), diag::note_covariant_thunk)
<< ConflictMD;
}
}
// Go with the path that introduced the most covariant overrides. If there is
// no such path, pick the first path.
return BestPath ? BestPath : &FullPaths.front();
}
static void computeFullPathsForVFTables(ASTContext &Context,
const CXXRecordDecl *RD,
VPtrInfoVector &Paths) {
const ASTRecordLayout &MostDerivedLayout = Context.getASTRecordLayout(RD);
FullPathTy FullPath;
std::list<FullPathTy> FullPaths;
for (VPtrInfo *Info : Paths) {
findPathsToSubobject(
Context, MostDerivedLayout, RD, CharUnits::Zero(),
BaseSubobject(Info->BaseWithVPtr, Info->FullOffsetInMDC), FullPath,
FullPaths);
FullPath.clear();
removeRedundantPaths(FullPaths);
Info->PathToBaseWithVPtr.clear();
if (const FullPathTy *BestPath =
selectBestPath(Context, RD, Info, FullPaths))
for (const BaseSubobject &BSO : *BestPath)
Info->PathToBaseWithVPtr.push_back(BSO.getBase());
FullPaths.clear();
}
}
void MicrosoftVTableContext::computeVTableRelatedInformation(
const CXXRecordDecl *RD) {
assert(RD->isDynamicClass());
// Check if we've computed this information before.
if (VFPtrLocations.count(RD))
return;
const VTableLayout::AddressPointsMapTy EmptyAddressPointsMap;
VPtrInfoVector *VFPtrs = new VPtrInfoVector();
computeVTablePaths(/*ForVBTables=*/false, RD, *VFPtrs);
computeFullPathsForVFTables(Context, RD, *VFPtrs);
VFPtrLocations[RD] = VFPtrs;
MethodVFTableLocationsTy NewMethodLocations;
for (VPtrInfoVector::iterator I = VFPtrs->begin(), E = VFPtrs->end();
I != E; ++I) {
VFTableBuilder Builder(*this, RD, *I);
VFTableIdTy id(RD, (*I)->FullOffsetInMDC);
assert(VFTableLayouts.count(id) == 0);
SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks(
Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
VFTableLayouts[id] = new VTableLayout(
Builder.getNumVTableComponents(), Builder.vtable_component_begin(),
VTableThunks.size(), VTableThunks.data(), EmptyAddressPointsMap, true);
Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
for (const auto &Loc : Builder.vtable_locations()) {
GlobalDecl GD = Loc.first;
MethodVFTableLocation NewLoc = Loc.second;
auto M = NewMethodLocations.find(GD);
if (M == NewMethodLocations.end() || NewLoc < M->second)
NewMethodLocations[GD] = NewLoc;
}
}
MethodVFTableLocations.insert(NewMethodLocations.begin(),
NewMethodLocations.end());
if (Context.getLangOpts().DumpVTableLayouts)
dumpMethodLocations(RD, NewMethodLocations, llvm::outs());
}
void MicrosoftVTableContext::dumpMethodLocations(
const CXXRecordDecl *RD, const MethodVFTableLocationsTy &NewMethods,
raw_ostream &Out) {
// Compute the vtable indices for all the member functions.
// Store them in a map keyed by the location so we'll get a sorted table.
std::map<MethodVFTableLocation, std::string> IndicesMap;
bool HasNonzeroOffset = false;
for (MethodVFTableLocationsTy::const_iterator I = NewMethods.begin(),
E = NewMethods.end(); I != E; ++I) {
const CXXMethodDecl *MD = cast<const CXXMethodDecl>(I->first.getDecl());
assert(MD->isVirtual());
std::string MethodName = PredefinedExpr::ComputeName(
PredefinedExpr::PrettyFunctionNoVirtual, MD);
if (isa<CXXDestructorDecl>(MD)) {
IndicesMap[I->second] = MethodName + " [scalar deleting]";
} else {
IndicesMap[I->second] = MethodName;
}
if (!I->second.VFPtrOffset.isZero() || I->second.VBTableIndex != 0)
HasNonzeroOffset = true;
}
// Print the vtable indices for all the member functions.
if (!IndicesMap.empty()) {
Out << "VFTable indices for ";
Out << "'";
RD->printQualifiedName(Out);
Out << "' (" << IndicesMap.size()
<< (IndicesMap.size() == 1 ? " entry" : " entries") << ").\n";
CharUnits LastVFPtrOffset = CharUnits::fromQuantity(-1);
uint64_t LastVBIndex = 0;
for (std::map<MethodVFTableLocation, std::string>::const_iterator
I = IndicesMap.begin(),
E = IndicesMap.end();
I != E; ++I) {
CharUnits VFPtrOffset = I->first.VFPtrOffset;
uint64_t VBIndex = I->first.VBTableIndex;
if (HasNonzeroOffset &&
(VFPtrOffset != LastVFPtrOffset || VBIndex != LastVBIndex)) {
assert(VBIndex > LastVBIndex || VFPtrOffset > LastVFPtrOffset);
Out << " -- accessible via ";
if (VBIndex)
Out << "vbtable index " << VBIndex << ", ";
Out << "vfptr at offset " << VFPtrOffset.getQuantity() << " --\n";
LastVFPtrOffset = VFPtrOffset;
LastVBIndex = VBIndex;
}
uint64_t VTableIndex = I->first.Index;
const std::string &MethodName = I->second;
Out << llvm::format("%4" PRIu64 " | ", VTableIndex) << MethodName << '\n';
}
Out << '\n';
}
Out.flush();
}
const VirtualBaseInfo *MicrosoftVTableContext::computeVBTableRelatedInformation(
const CXXRecordDecl *RD) {
VirtualBaseInfo *VBI;
{
// Get or create a VBI for RD. Don't hold a reference to the DenseMap cell,
// as it may be modified and rehashed under us.
VirtualBaseInfo *&Entry = VBaseInfo[RD];
if (Entry)
return Entry;
Entry = VBI = new VirtualBaseInfo();
}
computeVTablePaths(/*ForVBTables=*/true, RD, VBI->VBPtrPaths);
// First, see if the Derived class shared the vbptr with a non-virtual base.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (const CXXRecordDecl *VBPtrBase = Layout.getBaseSharingVBPtr()) {
// If the Derived class shares the vbptr with a non-virtual base, the shared
// virtual bases come first so that the layout is the same.
const VirtualBaseInfo *BaseInfo =
computeVBTableRelatedInformation(VBPtrBase);
VBI->VBTableIndices.insert(BaseInfo->VBTableIndices.begin(),
BaseInfo->VBTableIndices.end());
}
// New vbases are added to the end of the vbtable.
// Skip the self entry and vbases visited in the non-virtual base, if any.
unsigned VBTableIndex = 1 + VBI->VBTableIndices.size();
for (const auto &VB : RD->vbases()) {
const CXXRecordDecl *CurVBase = VB.getType()->getAsCXXRecordDecl();
if (!VBI->VBTableIndices.count(CurVBase))
VBI->VBTableIndices[CurVBase] = VBTableIndex++;
}
return VBI;
}
unsigned MicrosoftVTableContext::getVBTableIndex(const CXXRecordDecl *Derived,
const CXXRecordDecl *VBase) {
const VirtualBaseInfo *VBInfo = computeVBTableRelatedInformation(Derived);
assert(VBInfo->VBTableIndices.count(VBase));
return VBInfo->VBTableIndices.find(VBase)->second;
}
const VPtrInfoVector &
MicrosoftVTableContext::enumerateVBTables(const CXXRecordDecl *RD) {
return computeVBTableRelatedInformation(RD)->VBPtrPaths;
}
const VPtrInfoVector &
MicrosoftVTableContext::getVFPtrOffsets(const CXXRecordDecl *RD) {
computeVTableRelatedInformation(RD);
assert(VFPtrLocations.count(RD) && "Couldn't find vfptr locations");
return *VFPtrLocations[RD];
}
const VTableLayout &
MicrosoftVTableContext::getVFTableLayout(const CXXRecordDecl *RD,
CharUnits VFPtrOffset) {
computeVTableRelatedInformation(RD);
VFTableIdTy id(RD, VFPtrOffset);
assert(VFTableLayouts.count(id) && "Couldn't find a VFTable at this offset");
return *VFTableLayouts[id];
}
const MicrosoftVTableContext::MethodVFTableLocation &
MicrosoftVTableContext::getMethodVFTableLocation(GlobalDecl GD) {
assert(cast<CXXMethodDecl>(GD.getDecl())->isVirtual() &&
"Only use this method for virtual methods or dtors");
if (isa<CXXDestructorDecl>(GD.getDecl()))
assert(GD.getDtorType() == Dtor_Deleting);
MethodVFTableLocationsTy::iterator I = MethodVFTableLocations.find(GD);
if (I != MethodVFTableLocations.end())
return I->second;
const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
computeVTableRelatedInformation(RD);
I = MethodVFTableLocations.find(GD);
assert(I != MethodVFTableLocations.end() && "Did not find index!");
return I->second;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/InheritViz.cpp | //===- InheritViz.cpp - Graphviz visualization for inheritance --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements CXXRecordDecl::viewInheritance, which
// generates a GraphViz DOT file that depicts the class inheritance
// diagram and then calls Graphviz/dot+gv on it.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/TypeOrdering.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <set>
using namespace clang;
namespace {
/// InheritanceHierarchyWriter - Helper class that writes out a
/// GraphViz file that diagrams the inheritance hierarchy starting at
/// a given C++ class type. Note that we do not use LLVM's
/// GraphWriter, because the interface does not permit us to properly
/// differentiate between uses of types as virtual bases
/// vs. non-virtual bases.
class InheritanceHierarchyWriter {
ASTContext& Context;
raw_ostream &Out;
std::map<QualType, int, QualTypeOrdering> DirectBaseCount;
std::set<QualType, QualTypeOrdering> KnownVirtualBases;
public:
InheritanceHierarchyWriter(ASTContext& Context, raw_ostream& Out)
: Context(Context), Out(Out) { }
void WriteGraph(QualType Type) {
Out << "digraph \"" << llvm::DOT::EscapeString(Type.getAsString())
<< "\" {\n";
WriteNode(Type, false);
Out << "}\n";
}
protected:
/// WriteNode - Write out the description of node in the inheritance
/// diagram, which may be a base class or it may be the root node.
void WriteNode(QualType Type, bool FromVirtual);
/// WriteNodeReference - Write out a reference to the given node,
/// using a unique identifier for each direct base and for the
/// (only) virtual base.
raw_ostream& WriteNodeReference(QualType Type, bool FromVirtual);
};
} // namespace
void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) {
QualType CanonType = Context.getCanonicalType(Type);
if (FromVirtual) {
if (KnownVirtualBases.find(CanonType) != KnownVirtualBases.end())
return;
// We haven't seen this virtual base before, so display it and
// its bases.
KnownVirtualBases.insert(CanonType);
}
// Declare the node itself.
Out << " ";
WriteNodeReference(Type, FromVirtual);
// Give the node a label based on the name of the class.
std::string TypeName = Type.getAsString();
Out << " [ shape=\"box\", label=\"" << llvm::DOT::EscapeString(TypeName);
// If the name of the class was a typedef or something different
// from the "real" class name, show the real class name in
// parentheses so we don't confuse ourselves.
if (TypeName != CanonType.getAsString()) {
Out << "\\n(" << CanonType.getAsString() << ")";
}
// Finished describing the node.
Out << " \"];\n";
// Display the base classes.
const CXXRecordDecl *Decl
= static_cast<const CXXRecordDecl *>(Type->getAs<RecordType>()->getDecl());
for (const auto &Base : Decl->bases()) {
QualType CanonBaseType = Context.getCanonicalType(Base.getType());
// If this is not virtual inheritance, bump the direct base
// count for the type.
if (!Base.isVirtual())
++DirectBaseCount[CanonBaseType];
// Write out the node (if we need to).
WriteNode(Base.getType(), Base.isVirtual());
// Write out the edge.
Out << " ";
WriteNodeReference(Type, FromVirtual);
Out << " -> ";
WriteNodeReference(Base.getType(), Base.isVirtual());
// Write out edge attributes to show the kind of inheritance.
if (Base.isVirtual()) {
Out << " [ style=\"dashed\" ]";
}
Out << ";";
}
}
/// WriteNodeReference - Write out a reference to the given node,
/// using a unique identifier for each direct base and for the
/// (only) virtual base.
raw_ostream&
InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
bool FromVirtual) {
QualType CanonType = Context.getCanonicalType(Type);
Out << "Class_" << CanonType.getAsOpaquePtr();
if (!FromVirtual)
Out << "_" << DirectBaseCount[CanonType];
return Out;
}
/// viewInheritance - Display the inheritance hierarchy of this C++
/// class using GraphViz.
void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
QualType Self = Context.getTypeDeclType(this);
int FD;
SmallString<128> Filename;
if (std::error_code EC = llvm::sys::fs::createTemporaryFile(
Self.getAsString(), "dot", FD, Filename)) {
llvm::errs() << "Error: " << EC.message() << "\n";
return;
}
llvm::errs() << "Writing '" << Filename << "'... ";
llvm::raw_fd_ostream O(FD, true);
InheritanceHierarchyWriter Writer(Context, O);
Writer.WriteGraph(Self);
llvm::errs() << " done. \n";
O.close();
// Display the graph
DisplayGraph(Filename);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/AST/StmtPrinter.cpp | //===--- StmtPrinter.cpp - Printing implementation for Stmt ASTs ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the Stmt::dumpPretty/Stmt::printPretty methods, which
// pretty print the AST back out to C code.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Format.h"
#include "clang/Sema/SemaHLSL.h" // HLSL Change
using namespace clang;
//===----------------------------------------------------------------------===//
// StmtPrinter Visitor
//===----------------------------------------------------------------------===//
namespace {
class StmtPrinter : public StmtVisitor<StmtPrinter> {
raw_ostream &OS;
unsigned IndentLevel;
clang::PrinterHelper* Helper;
PrintingPolicy Policy;
public:
StmtPrinter(raw_ostream &os, PrinterHelper* helper,
const PrintingPolicy &Policy,
unsigned Indentation = 0)
: OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy) {}
void PrintStmt(Stmt *S) {
PrintStmt(S, Policy.Indentation);
}
void PrintStmt(Stmt *S, int SubIndent) {
IndentLevel += SubIndent;
if (S && isa<Expr>(S)) {
// If this is an expr used in a stmt context, indent and newline it.
Indent();
Visit(S);
OS << ";\n";
} else if (S) {
Visit(S);
} else {
Indent() << "<<<NULL STATEMENT>>>\n";
}
IndentLevel -= SubIndent;
}
void PrintRawCompoundStmt(CompoundStmt *S);
void PrintRawDecl(Decl *D);
void PrintRawDeclStmt(const DeclStmt *S);
void PrintRawIfStmt(IfStmt *If);
void PrintRawCXXCatchStmt(CXXCatchStmt *Catch);
void PrintCallArgs(CallExpr *E);
void PrintRawSEHExceptHandler(SEHExceptStmt *S);
void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
void PrintOMPExecutableDirective(OMPExecutableDirective *S);
void PrintExpr(Expr *E) {
if (E)
Visit(E);
else
OS << "<null expr>";
}
raw_ostream &Indent(int Delta = 0) {
for (int i = 0, e = IndentLevel+Delta; i < e; ++i)
OS << " ";
return OS;
}
void Visit(Stmt* S) {
if (Helper && Helper->handledStmt(S,OS))
return;
else StmtVisitor<StmtPrinter>::Visit(S);
}
void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
Indent() << "<<unknown stmt type>>\n";
}
void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
OS << "<<unknown expr type>>";
}
void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
#define ABSTRACT_STMT(CLASS)
#define STMT(CLASS, PARENT) \
void Visit##CLASS(CLASS *Node);
#include "clang/AST/StmtNodes.inc"
};
}
//===----------------------------------------------------------------------===//
// Stmt printing methods.
//===----------------------------------------------------------------------===//
/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and
/// with no newline after the }.
void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
OS << "{\n";
for (auto *I : Node->body())
PrintStmt(I);
Indent() << "}";
}
void StmtPrinter::PrintRawDecl(Decl *D) {
D->print(OS, Policy, IndentLevel);
}
void StmtPrinter::PrintRawDeclStmt(const DeclStmt *S) {
SmallVector<Decl*, 2> Decls(S->decls());
Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel);
}
void StmtPrinter::VisitNullStmt(NullStmt *Node) {
Indent() << ";\n";
}
// HLSL Change: adding support for HLSL discard stmt
void StmtPrinter::VisitDiscardStmt(DiscardStmt *Node) {
Indent();
OS << "discard;\n";
}
void StmtPrinter::VisitDeclStmt(DeclStmt *Node) {
Indent();
PrintRawDeclStmt(Node);
OS << ";\n";
}
void StmtPrinter::VisitCompoundStmt(CompoundStmt *Node) {
Indent();
PrintRawCompoundStmt(Node);
OS << "\n";
}
void StmtPrinter::VisitCaseStmt(CaseStmt *Node) {
Indent(-1) << "case ";
PrintExpr(Node->getLHS());
if (Node->getRHS()) {
OS << " ... ";
PrintExpr(Node->getRHS());
}
OS << ":\n";
PrintStmt(Node->getSubStmt(), 0);
}
void StmtPrinter::VisitDefaultStmt(DefaultStmt *Node) {
Indent(-1) << "default:\n";
PrintStmt(Node->getSubStmt(), 0);
}
void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
Indent(-1) << Node->getName() << ":\n";
PrintStmt(Node->getSubStmt(), 0);
}
void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
// HLSL Change Begin
if (Policy.LangOpts.HLSL) {
for (ArrayRef<const Attr*>::reverse_iterator it = Node->getAttrs().rbegin(),
end = Node->getAttrs().rend(); it != end; ++it) {
hlsl::CustomPrintHLSLAttr((*it), OS, Policy, IndentLevel);
}
PrintStmt(Node->getSubStmt(), 0);
return;
}
// HLSL Change End
for (const auto *Attr : Node->getAttrs()) {
Attr->printPretty(OS, Policy);
}
PrintStmt(Node->getSubStmt(), 0);
}
void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
OS << "if (";
if (const DeclStmt *DS = If->getConditionVariableDeclStmt())
PrintRawDeclStmt(DS);
else
PrintExpr(If->getCond());
OS << ')';
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) {
OS << ' ';
PrintRawCompoundStmt(CS);
OS << (If->getElse() ? ' ' : '\n');
} else {
OS << '\n';
PrintStmt(If->getThen());
if (If->getElse()) Indent();
}
if (Stmt *Else = If->getElse()) {
OS << "else";
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Else)) {
OS << ' ';
PrintRawCompoundStmt(CS);
OS << '\n';
} else if (IfStmt *ElseIf = dyn_cast<IfStmt>(Else)) {
OS << ' ';
PrintRawIfStmt(ElseIf);
} else {
OS << '\n';
PrintStmt(If->getElse());
}
}
}
void StmtPrinter::VisitIfStmt(IfStmt *If) {
Indent();
PrintRawIfStmt(If);
}
void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
Indent() << "switch (";
if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
PrintRawDeclStmt(DS);
else
PrintExpr(Node->getCond());
OS << ")";
// Pretty print compoundstmt bodies (very common).
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
OS << " ";
PrintRawCompoundStmt(CS);
OS << "\n";
} else {
OS << "\n";
PrintStmt(Node->getBody());
}
}
void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
Indent() << "while (";
if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
PrintRawDeclStmt(DS);
else
PrintExpr(Node->getCond());
OS << ")\n";
IndentLevel = IndentLevel - 1; // HLSL Change - TODO: Find the root cause of this issue.
PrintStmt(Node->getBody());
IndentLevel = IndentLevel + 1; // HLSL Change
}
void StmtPrinter::VisitDoStmt(DoStmt *Node) {
Indent() << "do ";
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
PrintRawCompoundStmt(CS);
OS << " ";
} else {
OS << "\n";
PrintStmt(Node->getBody());
Indent();
}
OS << "while (";
PrintExpr(Node->getCond());
OS << ");\n";
}
void StmtPrinter::VisitForStmt(ForStmt *Node) {
Indent() << "for (";
if (Node->getInit()) {
if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getInit()))
PrintRawDeclStmt(DS);
else
PrintExpr(cast<Expr>(Node->getInit()));
}
OS << ";";
if (Node->getCond()) {
OS << " ";
PrintExpr(Node->getCond());
}
OS << ";";
if (Node->getInc()) {
OS << " ";
PrintExpr(Node->getInc());
}
OS << ") ";
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
PrintRawCompoundStmt(CS);
OS << "\n";
} else {
OS << "\n";
PrintStmt(Node->getBody());
}
}
void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
Indent() << "for (";
if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getElement()))
PrintRawDeclStmt(DS);
else
PrintExpr(cast<Expr>(Node->getElement()));
OS << " in ";
PrintExpr(Node->getCollection());
OS << ") ";
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
PrintRawCompoundStmt(CS);
OS << "\n";
} else {
OS << "\n";
PrintStmt(Node->getBody());
}
}
void StmtPrinter::VisitCXXForRangeStmt(CXXForRangeStmt *Node) {
Indent() << "for (";
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressInitializers = true;
Node->getLoopVariable()->print(OS, SubPolicy, IndentLevel);
OS << " : ";
PrintExpr(Node->getRangeInit());
OS << ") {\n";
PrintStmt(Node->getBody());
Indent() << "}";
if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
Indent();
if (Node->isIfExists())
OS << "__if_exists (";
else
OS << "__if_not_exists (";
if (NestedNameSpecifier *Qualifier
= Node->getQualifierLoc().getNestedNameSpecifier())
Qualifier->print(OS, Policy);
OS << Node->getNameInfo() << ") ";
PrintRawCompoundStmt(Node->getSubStmt());
}
void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
Indent() << "goto " << Node->getLabel()->getName() << ";";
if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
Indent() << "goto *";
PrintExpr(Node->getTarget());
OS << ";";
if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
Indent() << "continue;";
if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
Indent() << "break;";
if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
Indent() << "return";
if (Node->getRetValue()) {
OS << " ";
PrintExpr(Node->getRetValue());
}
OS << ";";
if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitGCCAsmStmt(GCCAsmStmt *Node) {
Indent() << "asm ";
if (Node->isVolatile())
OS << "volatile ";
OS << "(";
VisitStringLiteral(Node->getAsmString());
// Outputs
if (Node->getNumOutputs() != 0 || Node->getNumInputs() != 0 ||
Node->getNumClobbers() != 0)
OS << " : ";
for (unsigned i = 0, e = Node->getNumOutputs(); i != e; ++i) {
if (i != 0)
OS << ", ";
if (!Node->getOutputName(i).empty()) {
OS << '[';
OS << Node->getOutputName(i);
OS << "] ";
}
VisitStringLiteral(Node->getOutputConstraintLiteral(i));
OS << " (";
Visit(Node->getOutputExpr(i));
OS << ")";
}
// Inputs
if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0)
OS << " : ";
for (unsigned i = 0, e = Node->getNumInputs(); i != e; ++i) {
if (i != 0)
OS << ", ";
if (!Node->getInputName(i).empty()) {
OS << '[';
OS << Node->getInputName(i);
OS << "] ";
}
VisitStringLiteral(Node->getInputConstraintLiteral(i));
OS << " (";
Visit(Node->getInputExpr(i));
OS << ")";
}
// Clobbers
if (Node->getNumClobbers() != 0)
OS << " : ";
for (unsigned i = 0, e = Node->getNumClobbers(); i != e; ++i) {
if (i != 0)
OS << ", ";
VisitStringLiteral(Node->getClobberStringLiteral(i));
}
OS << ");";
if (Policy.IncludeNewlines) OS << "\n";
}
void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) {
// FIXME: Implement MS style inline asm statement printer.
Indent() << "__asm ";
if (Node->hasBraces())
OS << "{\n";
OS << Node->getAsmString() << "\n";
if (Node->hasBraces())
Indent() << "}\n";
}
void StmtPrinter::VisitCapturedStmt(CapturedStmt *Node) {
PrintStmt(Node->getCapturedDecl()->getBody());
}
void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
Indent() << "@try";
if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
PrintRawCompoundStmt(TS);
OS << "\n";
}
for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) {
ObjCAtCatchStmt *catchStmt = Node->getCatchStmt(I);
Indent() << "@catch(";
if (catchStmt->getCatchParamDecl()) {
if (Decl *DS = catchStmt->getCatchParamDecl())
PrintRawDecl(DS);
}
OS << ")";
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
PrintRawCompoundStmt(CS);
OS << "\n";
}
}
if (ObjCAtFinallyStmt *FS = static_cast<ObjCAtFinallyStmt *>(
Node->getFinallyStmt())) {
Indent() << "@finally";
PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
OS << "\n";
}
}
void StmtPrinter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *Node) {
}
void StmtPrinter::VisitObjCAtCatchStmt (ObjCAtCatchStmt *Node) {
Indent() << "@catch (...) { /* todo */ } \n";
}
void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) {
Indent() << "@throw";
if (Node->getThrowExpr()) {
OS << " ";
PrintExpr(Node->getThrowExpr());
}
OS << ";\n";
}
void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
Indent() << "@synchronized (";
PrintExpr(Node->getSynchExpr());
OS << ")";
PrintRawCompoundStmt(Node->getSynchBody());
OS << "\n";
}
void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) {
Indent() << "@autoreleasepool";
PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt()));
OS << "\n";
}
void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
OS << "catch (";
if (Decl *ExDecl = Node->getExceptionDecl())
PrintRawDecl(ExDecl);
else
OS << "...";
OS << ") ";
PrintRawCompoundStmt(cast<CompoundStmt>(Node->getHandlerBlock()));
}
void StmtPrinter::VisitCXXCatchStmt(CXXCatchStmt *Node) {
Indent();
PrintRawCXXCatchStmt(Node);
OS << "\n";
}
void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) {
Indent() << "try ";
PrintRawCompoundStmt(Node->getTryBlock());
for (unsigned i = 0, e = Node->getNumHandlers(); i < e; ++i) {
OS << " ";
PrintRawCXXCatchStmt(Node->getHandler(i));
}
OS << "\n";
}
void StmtPrinter::VisitSEHTryStmt(SEHTryStmt *Node) {
Indent() << (Node->getIsCXXTry() ? "try " : "__try ");
PrintRawCompoundStmt(Node->getTryBlock());
SEHExceptStmt *E = Node->getExceptHandler();
SEHFinallyStmt *F = Node->getFinallyHandler();
if(E)
PrintRawSEHExceptHandler(E);
else {
assert(F && "Must have a finally block...");
PrintRawSEHFinallyStmt(F);
}
OS << "\n";
}
void StmtPrinter::PrintRawSEHFinallyStmt(SEHFinallyStmt *Node) {
OS << "__finally ";
PrintRawCompoundStmt(Node->getBlock());
OS << "\n";
}
void StmtPrinter::PrintRawSEHExceptHandler(SEHExceptStmt *Node) {
OS << "__except (";
VisitExpr(Node->getFilterExpr());
OS << ")\n";
PrintRawCompoundStmt(Node->getBlock());
OS << "\n";
}
void StmtPrinter::VisitSEHExceptStmt(SEHExceptStmt *Node) {
Indent();
PrintRawSEHExceptHandler(Node);
OS << "\n";
}
void StmtPrinter::VisitSEHFinallyStmt(SEHFinallyStmt *Node) {
Indent();
PrintRawSEHFinallyStmt(Node);
OS << "\n";
}
void StmtPrinter::VisitSEHLeaveStmt(SEHLeaveStmt *Node) {
Indent() << "__leave;";
if (Policy.IncludeNewlines) OS << "\n";
}
//===----------------------------------------------------------------------===//
// OpenMP clauses printing methods
//===----------------------------------------------------------------------===//
namespace {
class OMPClausePrinter : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
/// \brief Process clauses with list of variables.
template <typename T>
void VisitOMPClauseList(T *Node, char StartSym);
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) { }
#define OPENMP_CLAUSE(Name, Class) \
void Visit##Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
};
void OMPClausePrinter::VisitOMPIfClause(OMPIfClause *Node) {
OS << "if(";
Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
void OMPClausePrinter::VisitOMPFinalClause(OMPFinalClause *Node) {
OS << "final(";
Node->getCondition()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
OS << "num_threads(";
Node->getNumThreads()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
OS << "safelen(";
Node->getSafelen()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
void OMPClausePrinter::VisitOMPCollapseClause(OMPCollapseClause *Node) {
OS << "collapse(";
Node->getNumForLoops()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
void OMPClausePrinter::VisitOMPDefaultClause(OMPDefaultClause *Node) {
OS << "default("
<< getOpenMPSimpleClauseTypeName(OMPC_default, Node->getDefaultKind())
<< ")";
}
void OMPClausePrinter::VisitOMPProcBindClause(OMPProcBindClause *Node) {
OS << "proc_bind("
<< getOpenMPSimpleClauseTypeName(OMPC_proc_bind, Node->getProcBindKind())
<< ")";
}
void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
OS << "schedule("
<< getOpenMPSimpleClauseTypeName(OMPC_schedule, Node->getScheduleKind());
if (Node->getChunkSize()) {
OS << ", ";
Node->getChunkSize()->printPretty(OS, nullptr, Policy);
}
OS << ")";
}
void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *) {
OS << "ordered";
}
void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) {
OS << "nowait";
}
void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) {
OS << "untied";
}
void OMPClausePrinter::VisitOMPMergeableClause(OMPMergeableClause *) {
OS << "mergeable";
}
void OMPClausePrinter::VisitOMPReadClause(OMPReadClause *) { OS << "read"; }
void OMPClausePrinter::VisitOMPWriteClause(OMPWriteClause *) { OS << "write"; }
void OMPClausePrinter::VisitOMPUpdateClause(OMPUpdateClause *) {
OS << "update";
}
void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
OS << "capture";
}
void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
OS << "seq_cst";
}
template<typename T>
void OMPClausePrinter::VisitOMPClauseList(T *Node, char StartSym) {
for (typename T::varlist_iterator I = Node->varlist_begin(),
E = Node->varlist_end();
I != E; ++I) {
assert(*I && "Expected non-null Stmt");
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*I)) {
OS << (I == Node->varlist_begin() ? StartSym : ',');
cast<NamedDecl>(DRE->getDecl())->printQualifiedName(OS);
} else {
OS << (I == Node->varlist_begin() ? StartSym : ',');
(*I)->printPretty(OS, nullptr, Policy, 0);
}
}
}
void OMPClausePrinter::VisitOMPPrivateClause(OMPPrivateClause *Node) {
if (!Node->varlist_empty()) {
OS << "private";
VisitOMPClauseList(Node, '(');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPFirstprivateClause(OMPFirstprivateClause *Node) {
if (!Node->varlist_empty()) {
OS << "firstprivate";
VisitOMPClauseList(Node, '(');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPLastprivateClause(OMPLastprivateClause *Node) {
if (!Node->varlist_empty()) {
OS << "lastprivate";
VisitOMPClauseList(Node, '(');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPSharedClause(OMPSharedClause *Node) {
if (!Node->varlist_empty()) {
OS << "shared";
VisitOMPClauseList(Node, '(');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPReductionClause(OMPReductionClause *Node) {
if (!Node->varlist_empty()) {
OS << "reduction(";
NestedNameSpecifier *QualifierLoc =
Node->getQualifierLoc().getNestedNameSpecifier();
OverloadedOperatorKind OOK =
Node->getNameInfo().getName().getCXXOverloadedOperator();
if (QualifierLoc == nullptr && OOK != OO_None) {
// Print reduction identifier in C format
OS << getOperatorSpelling(OOK);
} else {
// Use C++ format
if (QualifierLoc != nullptr)
QualifierLoc->print(OS, Policy);
OS << Node->getNameInfo();
}
OS << ":";
VisitOMPClauseList(Node, ' ');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
if (!Node->varlist_empty()) {
OS << "linear";
VisitOMPClauseList(Node, '(');
if (Node->getStep() != nullptr) {
OS << ": ";
Node->getStep()->printPretty(OS, nullptr, Policy, 0);
}
OS << ")";
}
}
void OMPClausePrinter::VisitOMPAlignedClause(OMPAlignedClause *Node) {
if (!Node->varlist_empty()) {
OS << "aligned";
VisitOMPClauseList(Node, '(');
if (Node->getAlignment() != nullptr) {
OS << ": ";
Node->getAlignment()->printPretty(OS, nullptr, Policy, 0);
}
OS << ")";
}
}
void OMPClausePrinter::VisitOMPCopyinClause(OMPCopyinClause *Node) {
if (!Node->varlist_empty()) {
OS << "copyin";
VisitOMPClauseList(Node, '(');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPCopyprivateClause(OMPCopyprivateClause *Node) {
if (!Node->varlist_empty()) {
OS << "copyprivate";
VisitOMPClauseList(Node, '(');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPFlushClause(OMPFlushClause *Node) {
if (!Node->varlist_empty()) {
VisitOMPClauseList(Node, '(');
OS << ")";
}
}
void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
if (!Node->varlist_empty()) {
OS << "depend(";
OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
Node->getDependencyKind())
<< " :";
VisitOMPClauseList(Node, ' ');
OS << ")";
}
}
}
//===----------------------------------------------------------------------===//
// OpenMP directives printing methods
//===----------------------------------------------------------------------===//
void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S) {
OMPClausePrinter Printer(OS, Policy);
ArrayRef<OMPClause *> Clauses = S->clauses();
for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end();
I != E; ++I)
if (*I && !(*I)->isImplicit()) {
Printer.Visit(*I);
OS << ' ';
}
OS << "\n";
if (S->hasAssociatedStmt() && S->getAssociatedStmt()) {
assert(isa<CapturedStmt>(S->getAssociatedStmt()) &&
"Expected captured statement!");
Stmt *CS = cast<CapturedStmt>(S->getAssociatedStmt())->getCapturedStmt();
PrintStmt(CS);
}
}
void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
Indent() << "#pragma omp parallel ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPSimdDirective(OMPSimdDirective *Node) {
Indent() << "#pragma omp simd ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) {
Indent() << "#pragma omp for ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPForSimdDirective(OMPForSimdDirective *Node) {
Indent() << "#pragma omp for simd ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPSectionsDirective(OMPSectionsDirective *Node) {
Indent() << "#pragma omp sections ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPSectionDirective(OMPSectionDirective *Node) {
Indent() << "#pragma omp section";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPSingleDirective(OMPSingleDirective *Node) {
Indent() << "#pragma omp single ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPMasterDirective(OMPMasterDirective *Node) {
Indent() << "#pragma omp master";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPCriticalDirective(OMPCriticalDirective *Node) {
Indent() << "#pragma omp critical";
if (Node->getDirectiveName().getName()) {
OS << " (";
Node->getDirectiveName().printName(OS);
OS << ")";
}
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPParallelForDirective(OMPParallelForDirective *Node) {
Indent() << "#pragma omp parallel for ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPParallelForSimdDirective(
OMPParallelForSimdDirective *Node) {
Indent() << "#pragma omp parallel for simd ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *Node) {
Indent() << "#pragma omp parallel sections ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTaskDirective(OMPTaskDirective *Node) {
Indent() << "#pragma omp task ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *Node) {
Indent() << "#pragma omp taskyield";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPBarrierDirective(OMPBarrierDirective *Node) {
Indent() << "#pragma omp barrier";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *Node) {
Indent() << "#pragma omp taskwait";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *Node) {
Indent() << "#pragma omp taskgroup";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPFlushDirective(OMPFlushDirective *Node) {
Indent() << "#pragma omp flush ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPOrderedDirective(OMPOrderedDirective *Node) {
Indent() << "#pragma omp ordered";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPAtomicDirective(OMPAtomicDirective *Node) {
Indent() << "#pragma omp atomic ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTargetDirective(OMPTargetDirective *Node) {
Indent() << "#pragma omp target ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPTeamsDirective(OMPTeamsDirective *Node) {
Indent() << "#pragma omp teams ";
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPCancellationPointDirective(
OMPCancellationPointDirective *Node) {
Indent() << "#pragma omp cancellation point "
<< getOpenMPDirectiveName(Node->getCancelRegion());
PrintOMPExecutableDirective(Node);
}
void StmtPrinter::VisitOMPCancelDirective(OMPCancelDirective *Node) {
Indent() << "#pragma omp cancel "
<< getOpenMPDirectiveName(Node->getCancelRegion());
PrintOMPExecutableDirective(Node);
}
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
if (Node->hasExplicitTemplateArgs())
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
}
void StmtPrinter::VisitDependentScopeDeclRefExpr(
DependentScopeDeclRefExpr *Node) {
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
if (Node->hasExplicitTemplateArgs())
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
}
void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
if (Node->getQualifier())
Node->getQualifier()->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
if (Node->hasExplicitTemplateArgs())
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
}
void StmtPrinter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
if (Node->getBase()) {
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->" : ".");
}
OS << *Node->getDecl();
}
void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
if (Node->isSuperReceiver())
OS << "super.";
else if (Node->isObjectReceiver() && Node->getBase()) {
PrintExpr(Node->getBase());
OS << ".";
} else if (Node->isClassReceiver() && Node->getClassReceiver()) {
OS << Node->getClassReceiver()->getName() << ".";
}
if (Node->isImplicitProperty())
Node->getImplicitPropertyGetter()->getSelector().print(OS);
else
OS << Node->getExplicitProperty()->getName();
}
void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
PrintExpr(Node->getBaseExpr());
OS << "[";
PrintExpr(Node->getKeyExpr());
OS << "]";
}
void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
OS << PredefinedExpr::getIdentTypeName(Node->getIdentType());
}
void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
unsigned value = Node->getValue();
switch (Node->getKind()) {
case CharacterLiteral::Ascii: break; // no prefix.
case CharacterLiteral::Wide: OS << 'L'; break;
case CharacterLiteral::UTF16: OS << 'u'; break;
case CharacterLiteral::UTF32: OS << 'U'; break;
}
switch (value) {
case '\\':
OS << "'\\\\'";
break;
case '\'':
OS << "'\\''";
break;
case '\a':
// TODO: K&R: the meaning of '\\a' is different in traditional C
OS << "'\\a'";
break;
case '\b':
OS << "'\\b'";
break;
// Nonstandard escape sequence.
/*case '\e':
OS << "'\\e'";
break;*/
case '\f':
OS << "'\\f'";
break;
case '\n':
OS << "'\\n'";
break;
case '\r':
OS << "'\\r'";
break;
case '\t':
OS << "'\\t'";
break;
case '\v':
OS << "'\\v'";
break;
default:
// HLSL Change Begin
if (Policy.LangOpts.HLSL && value > 255) {
unsigned int truncVal = value & (~0xffffff00);
OS << "'\\x" << llvm::format("%02x", truncVal) << "'";
}
else {
// HLSL Change End
if (value < 256 && isPrintable((unsigned char)value))
OS << "'" << (char)value << "'";
else if (value < 256)
OS << "'\\x" << llvm::format("%02x", value) << "'";
else if (value <= 0xFFFF)
OS << "'\\u" << llvm::format("%04x", value) << "'";
else
OS << "'\\U" << llvm::format("%08x", value) << "'";
}
}
}
void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
bool isSigned = Node->getType()->isSignedIntegerType();
OS << Node->getValue().toString(10, isSigned);
// Emit suffixes. Integer literals are always a builtin integer type.
switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("Unexpected type for integer literal!");
case BuiltinType::Char_S:
case BuiltinType::Char_U: OS << "i8"; break;
case BuiltinType::UChar: OS << "Ui8"; break;
case BuiltinType::Min16Int: // HLSL Change
case BuiltinType::Short: OS << "i16"; break;
case BuiltinType::Min16UInt: // HLSL Change
case BuiltinType::UShort: OS << "Ui16"; break;
case BuiltinType::LitInt: break; // HLSL Change
case BuiltinType::Int: break; // no suffix.
case BuiltinType::UInt: OS << 'U'; break;
case BuiltinType::Long: OS << 'L'; break;
case BuiltinType::ULong: OS << "UL"; break;
case BuiltinType::LongLong: OS << "LL"; break;
case BuiltinType::ULongLong: OS << "ULL"; break;
case BuiltinType::Int128: OS << "i128"; break;
case BuiltinType::UInt128: OS << "Ui128"; break;
case BuiltinType::Int8_4Packed: OS << "i8_4pk"; break; // HLSL Change
case BuiltinType::UInt8_4Packed: OS << "Ui8_4pk"; break; // HLSL Change
}
}
static void PrintFloatingLiteral(raw_ostream &OS, FloatingLiteral *Node,
bool PrintSuffix,
bool HLSLSuffix = false) { // HLSL Change - added HLSLSuffix
SmallString<16> Str;
Node->getValue().toString(Str);
OS << Str;
if (Str.find_first_not_of("-0123456789") == StringRef::npos)
OS << '.'; // Trailing dot in order to separate from ints.
if (!PrintSuffix)
return;
// Emit suffixes. Float literals are always a builtin float type.
// HLSL Change Starts - override suffixes
if (HLSLSuffix) {
switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("Unexpected type for float literal!");
case BuiltinType::LitFloat: break; // HLSL Change -- no suffix
case BuiltinType::Min10Float: break; // no suffix, as this is a literal and 'F' would pollute expression
case BuiltinType::HalfFloat:
case BuiltinType::Min16Float:
case BuiltinType::Half: OS << 'H'; break; // HLSL Change -- added suffix
case BuiltinType::Double: OS << 'L'; break; // HLSL Change -- added suffix
case BuiltinType::Float: OS << 'F'; break;
// Note absence of LongDouble - should not be possible in HLSL
}
return;
}
// HLSL Change Ends
// Emit suffixes. Float literals are always a builtin float type.
switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("Unexpected type for float literal!");
case BuiltinType::Half: break; // FIXME: suffix?
case BuiltinType::Double: break; // no suffix.
case BuiltinType::Float: OS << 'F'; break;
case BuiltinType::LongDouble: OS << 'L'; break;
}
}
void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
PrintFloatingLiteral(OS, Node, /*PrintSuffix=*/true, Policy.LangOpts.HLSL); // HLSL Change
}
void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
PrintExpr(Node->getSubExpr());
OS << "i";
}
void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
Str->outputString(OS);
}
void StmtPrinter::VisitParenExpr(ParenExpr *Node) {
OS << "(";
PrintExpr(Node->getSubExpr());
OS << ")";
}
void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
if (!Node->isPostfix()) {
OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
// Print a space if this is an "identifier operator" like __real, or if
// it might be concatenated incorrectly like '+'.
switch (Node->getOpcode()) {
default: break;
case UO_Real:
case UO_Imag:
case UO_Extension:
OS << ' ';
break;
case UO_Plus:
case UO_Minus:
if (isa<UnaryOperator>(Node->getSubExpr()))
OS << ' ';
break;
}
}
PrintExpr(Node->getSubExpr());
if (Node->isPostfix())
OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
}
void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
OS << "__builtin_offsetof(";
Node->getTypeSourceInfo()->getType().print(OS, Policy);
OS << ", ";
bool PrintedSomething = false;
for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) {
OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i);
if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Array) {
// Array node
OS << "[";
PrintExpr(Node->getIndexExpr(ON.getArrayExprIndex()));
OS << "]";
PrintedSomething = true;
continue;
}
// Skip implicit base indirections.
if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Base)
continue;
// Field or identifier node.
IdentifierInfo *Id = ON.getFieldName();
if (!Id)
continue;
if (PrintedSomething)
OS << ".";
else
PrintedSomething = true;
OS << Id->getName();
}
OS << ")";
}
void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
// HLSL Change Begin
if (Node->getKind() == UETT_ArrayLength) {
PrintExpr(Node->getArgumentExpr());
OS << ".Length";
return;
}
// HLSL Change Ends
switch(Node->getKind()) {
case UETT_SizeOf:
OS << "sizeof";
break;
case UETT_AlignOf:
if (Policy.LangOpts.CPlusPlus)
OS << "alignof";
else if (Policy.LangOpts.C11)
OS << "_Alignof";
else
OS << "__alignof";
break;
case UETT_VecStep:
OS << "vec_step";
break;
case UETT_OpenMPRequiredSimdAlign:
OS << "__builtin_omp_required_simd_align";
break;
}
if (Node->isArgumentType()) {
OS << '(';
Node->getArgumentType().print(OS, Policy);
OS << ')';
} else {
OS << " ";
PrintExpr(Node->getArgumentExpr());
}
}
void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) {
OS << "_Generic(";
PrintExpr(Node->getControllingExpr());
for (unsigned i = 0; i != Node->getNumAssocs(); ++i) {
OS << ", ";
QualType T = Node->getAssocType(i);
if (T.isNull())
OS << "default";
else
T.print(OS, Policy);
OS << ": ";
PrintExpr(Node->getAssocExpr(i));
}
OS << ")";
}
void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
PrintExpr(Node->getLHS());
OS << "[";
PrintExpr(Node->getRHS());
OS << "]";
}
void StmtPrinter::PrintCallArgs(CallExpr *Call) {
for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
// Don't print any defaulted arguments
break;
}
if (i) OS << ", ";
PrintExpr(Call->getArg(i));
}
}
void StmtPrinter::VisitCallExpr(CallExpr *Call) {
PrintExpr(Call->getCallee());
OS << "(";
PrintCallArgs(Call);
OS << ")";
}
void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
// FIXME: Suppress printing implicit bases (like "this")
PrintExpr(Node->getBase());
MemberExpr *ParentMember = dyn_cast<MemberExpr>(Node->getBase());
FieldDecl *ParentDecl = ParentMember
? dyn_cast<FieldDecl>(ParentMember->getMemberDecl()) : nullptr;
if (!ParentDecl || !ParentDecl->isAnonymousStructOrUnion())
OS << (!Policy.LangOpts.HLSL && Node->isArrow() ? "->" : "."); // HLSL Change
if (FieldDecl *FD = dyn_cast<FieldDecl>(Node->getMemberDecl()))
if (FD->isAnonymousStructOrUnion())
return;
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
if (Node->hasExplicitTemplateArgs())
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
}
void StmtPrinter::VisitObjCIsaExpr(ObjCIsaExpr *Node) {
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->isa" : ".isa");
}
void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
PrintExpr(Node->getBase());
OS << ".";
OS << Node->getAccessor().getName();
}
// HLSL Change Starts
void StmtPrinter::VisitExtMatrixElementExpr(ExtMatrixElementExpr *Node) {
PrintExpr(Node->getBase());
OS << ".";
OS << Node->getAccessor().getName();
}
void StmtPrinter::VisitHLSLVectorElementExpr(HLSLVectorElementExpr *Node) {
PrintExpr(Node->getBase());
OS << ".";
OS << Node->getAccessor().getName();
}
// HLSL Change Ends
void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
OS << '(';
Node->getTypeAsWritten().print(OS, Policy);
OS << ')';
PrintExpr(Node->getSubExpr());
}
void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
OS << '(';
Node->getType().print(OS, Policy);
OS << ')';
PrintExpr(Node->getInitializer());
}
void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
// No need to print anything, simply forward to the subexpression.
PrintExpr(Node->getSubExpr());
}
void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
PrintExpr(Node->getLHS());
OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
PrintExpr(Node->getRHS());
}
void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
PrintExpr(Node->getLHS());
OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
PrintExpr(Node->getRHS());
}
void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) {
PrintExpr(Node->getCond());
OS << " ? ";
PrintExpr(Node->getLHS());
OS << " : ";
PrintExpr(Node->getRHS());
}
// GNU extensions.
void
StmtPrinter::VisitBinaryConditionalOperator(BinaryConditionalOperator *Node) {
PrintExpr(Node->getCommon());
OS << " ?: ";
PrintExpr(Node->getFalseExpr());
}
void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) {
OS << "&&" << Node->getLabel()->getName();
}
void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
OS << "(";
PrintRawCompoundStmt(E->getSubStmt());
OS << ")";
}
void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
OS << "__builtin_choose_expr(";
PrintExpr(Node->getCond());
OS << ", ";
PrintExpr(Node->getLHS());
OS << ", ";
PrintExpr(Node->getRHS());
OS << ")";
}
void StmtPrinter::VisitGNUNullExpr(GNUNullExpr *) {
OS << "__null";
}
void StmtPrinter::VisitShuffleVectorExpr(ShuffleVectorExpr *Node) {
OS << "__builtin_shufflevector(";
for (unsigned i = 0, e = Node->getNumSubExprs(); i != e; ++i) {
if (i) OS << ", ";
PrintExpr(Node->getExpr(i));
}
OS << ")";
}
void StmtPrinter::VisitConvertVectorExpr(ConvertVectorExpr *Node) {
OS << "__builtin_convertvector(";
PrintExpr(Node->getSrcExpr());
OS << ", ";
Node->getType().print(OS, Policy);
OS << ")";
}
void StmtPrinter::VisitInitListExpr(InitListExpr* Node) {
if (Node->getSyntacticForm()) {
Visit(Node->getSyntacticForm());
return;
}
if (!Policy.LangOpts.HLSL ||
!Node->isVectorInitWithCXXFunctionalCastExpr()) // HLSL Change
OS << "{ ";
for (unsigned i = 0, e = Node->getNumInits(); i != e; ++i) {
if (i) OS << ", ";
if (Node->getInit(i))
PrintExpr(Node->getInit(i));
else
OS << "{}";
}
if (!Policy.LangOpts.HLSL ||
!Node->isVectorInitWithCXXFunctionalCastExpr()) // HLSL Change
OS << " }";
}
void StmtPrinter::VisitParenListExpr(ParenListExpr* Node) {
OS << "(";
for (unsigned i = 0, e = Node->getNumExprs(); i != e; ++i) {
if (i) OS << ", ";
PrintExpr(Node->getExpr(i));
}
OS << ")";
}
void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) {
bool NeedsEquals = true;
for (DesignatedInitExpr::designators_iterator D = Node->designators_begin(),
DEnd = Node->designators_end();
D != DEnd; ++D) {
if (D->isFieldDesignator()) {
if (D->getDotLoc().isInvalid()) {
if (IdentifierInfo *II = D->getFieldName()) {
OS << II->getName() << ":";
NeedsEquals = false;
}
} else {
OS << "." << D->getFieldName()->getName();
}
} else {
OS << "[";
if (D->isArrayDesignator()) {
PrintExpr(Node->getArrayIndex(*D));
} else {
PrintExpr(Node->getArrayRangeStart(*D));
OS << " ... ";
PrintExpr(Node->getArrayRangeEnd(*D));
}
OS << "]";
}
}
if (NeedsEquals)
OS << " = ";
else
OS << " ";
PrintExpr(Node->getInit());
}
void StmtPrinter::VisitDesignatedInitUpdateExpr(
DesignatedInitUpdateExpr *Node) {
OS << "{";
OS << "/*base*/";
PrintExpr(Node->getBase());
OS << ", ";
OS << "/*updater*/";
PrintExpr(Node->getUpdater());
OS << "}";
}
void StmtPrinter::VisitNoInitExpr(NoInitExpr *Node) {
OS << "/*no init*/";
}
void StmtPrinter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *Node) {
if (Policy.LangOpts.CPlusPlus) {
OS << "/*implicit*/";
Node->getType().print(OS, Policy);
OS << "()";
} else {
OS << "/*implicit*/(";
Node->getType().print(OS, Policy);
OS << ')';
if (Node->getType()->isRecordType())
OS << "{}";
else
OS << 0;
}
}
void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
OS << "__builtin_va_arg(";
PrintExpr(Node->getSubExpr());
OS << ", ";
Node->getType().print(OS, Policy);
OS << ")";
}
void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) {
PrintExpr(Node->getSyntacticForm());
}
void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
const char *Name = nullptr;
switch (Node->getOp()) {
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case AtomicExpr::AO ## ID: \
Name = #ID "("; \
break;
#include "clang/Basic/Builtins.def"
}
OS << Name;
// AtomicExpr stores its subexpressions in a permuted order.
PrintExpr(Node->getPtr());
if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
Node->getOp() != AtomicExpr::AO__atomic_load_n) {
OS << ", ";
PrintExpr(Node->getVal1());
}
if (Node->getOp() == AtomicExpr::AO__atomic_exchange ||
Node->isCmpXChg()) {
OS << ", ";
PrintExpr(Node->getVal2());
}
if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) {
OS << ", ";
PrintExpr(Node->getWeak());
}
if (Node->getOp() != AtomicExpr::AO__c11_atomic_init) {
OS << ", ";
PrintExpr(Node->getOrder());
}
if (Node->isCmpXChg()) {
OS << ", ";
PrintExpr(Node->getOrderFail());
}
OS << ")";
}
// C++
void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
const char *OpStrings[NUM_OVERLOADED_OPERATORS] = {
"",
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
Spelling,
#include "clang/Basic/OperatorKinds.def"
};
OverloadedOperatorKind Kind = Node->getOperator();
if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
if (Node->getNumArgs() == 1) {
OS << OpStrings[Kind] << ' ';
PrintExpr(Node->getArg(0));
} else {
PrintExpr(Node->getArg(0));
OS << ' ' << OpStrings[Kind];
}
} else if (Kind == OO_Arrow) {
PrintExpr(Node->getArg(0));
} else if (Kind == OO_Call) {
PrintExpr(Node->getArg(0));
OS << '(';
for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) {
if (ArgIdx > 1)
OS << ", ";
if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx)))
PrintExpr(Node->getArg(ArgIdx));
}
OS << ')';
} else if (Kind == OO_Subscript) {
PrintExpr(Node->getArg(0));
OS << '[';
PrintExpr(Node->getArg(1));
OS << ']';
} else if (Node->getNumArgs() == 1) {
OS << OpStrings[Kind] << ' ';
PrintExpr(Node->getArg(0));
} else if (Node->getNumArgs() == 2) {
PrintExpr(Node->getArg(0));
OS << ' ' << OpStrings[Kind] << ' ';
PrintExpr(Node->getArg(1));
} else {
llvm_unreachable("unknown overloaded operator");
}
}
void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
// If we have a conversion operator call only print the argument.
CXXMethodDecl *MD = Node->getMethodDecl();
if (MD && isa<CXXConversionDecl>(MD)) {
PrintExpr(Node->getImplicitObjectArgument());
return;
}
VisitCallExpr(cast<CallExpr>(Node));
}
void StmtPrinter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *Node) {
PrintExpr(Node->getCallee());
OS << "<<<";
PrintCallArgs(Node->getConfig());
OS << ">>>(";
PrintCallArgs(Node);
OS << ")";
}
void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
OS << Node->getCastName() << '<';
Node->getTypeAsWritten().print(OS, Policy);
OS << ">(";
PrintExpr(Node->getSubExpr());
OS << ")";
}
void StmtPrinter::VisitCXXStaticCastExpr(CXXStaticCastExpr *Node) {
VisitCXXNamedCastExpr(Node);
}
void StmtPrinter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *Node) {
VisitCXXNamedCastExpr(Node);
}
void StmtPrinter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *Node) {
VisitCXXNamedCastExpr(Node);
}
void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
VisitCXXNamedCastExpr(Node);
}
void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
OS << "typeid(";
if (Node->isTypeOperand()) {
Node->getTypeOperandSourceInfo()->getType().print(OS, Policy);
} else {
PrintExpr(Node->getExprOperand());
}
OS << ")";
}
void StmtPrinter::VisitCXXUuidofExpr(CXXUuidofExpr *Node) {
OS << "__uuidof(";
if (Node->isTypeOperand()) {
Node->getTypeOperandSourceInfo()->getType().print(OS, Policy);
} else {
PrintExpr(Node->getExprOperand());
}
OS << ")";
}
void StmtPrinter::VisitMSPropertyRefExpr(MSPropertyRefExpr *Node) {
PrintExpr(Node->getBaseExpr());
if (Node->isArrow())
OS << "->";
else
OS << ".";
if (NestedNameSpecifier *Qualifier =
Node->getQualifierLoc().getNestedNameSpecifier())
Qualifier->print(OS, Policy);
OS << Node->getPropertyDecl()->getDeclName();
}
void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
switch (Node->getLiteralOperatorKind()) {
case UserDefinedLiteral::LOK_Raw:
OS << cast<StringLiteral>(Node->getArg(0)->IgnoreImpCasts())->getString();
break;
case UserDefinedLiteral::LOK_Template: {
DeclRefExpr *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts());
const TemplateArgumentList *Args =
cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
assert(Args);
if (Args->size() != 1) {
OS << "operator \"\" " << Node->getUDSuffix()->getName();
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Args->data(), Args->size(), Policy);
OS << "()";
return;
}
const TemplateArgument &Pack = Args->get(0);
for (const auto &P : Pack.pack_elements()) {
char C = (char)P.getAsIntegral().getZExtValue();
OS << C;
}
break;
}
case UserDefinedLiteral::LOK_Integer: {
// Print integer literal without suffix.
IntegerLiteral *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
OS << Int->getValue().toString(10, /*isSigned*/false);
break;
}
case UserDefinedLiteral::LOK_Floating: {
// Print floating literal without suffix.
FloatingLiteral *Float = cast<FloatingLiteral>(Node->getCookedLiteral());
PrintFloatingLiteral(OS, Float, /*PrintSuffix=*/false, Policy.LangOpts.HLSL); // HLSL Change
break;
}
case UserDefinedLiteral::LOK_String:
case UserDefinedLiteral::LOK_Character:
PrintExpr(Node->getCookedLiteral());
break;
}
OS << Node->getUDSuffix()->getName();
}
void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
OS << (Node->getValue() ? "true" : "false");
}
void StmtPrinter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *Node) {
OS << "nullptr";
}
void StmtPrinter::VisitCXXThisExpr(CXXThisExpr *Node) {
OS << "this";
}
void StmtPrinter::VisitCXXThrowExpr(CXXThrowExpr *Node) {
if (!Node->getSubExpr())
OS << "throw";
else {
OS << "throw ";
PrintExpr(Node->getSubExpr());
}
}
void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) {
// Nothing to print: we picked up the default argument.
}
void StmtPrinter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Node) {
// Nothing to print: we picked up the default initializer.
}
void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
Node->getType().print(OS, Policy);
// If there are no parens, this is list-initialization, and the braces are
// part of the syntax of the inner construct.
if (Node->getLParenLoc().isValid())
OS << "(";
PrintExpr(Node->getSubExpr());
if (Node->getLParenLoc().isValid())
OS << ")";
}
void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
PrintExpr(Node->getSubExpr());
}
void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
Node->getType().print(OS, Policy);
if (Node->isStdInitListInitialization())
/* Nothing to do; braces are part of creating the std::initializer_list. */;
else if (Node->isListInitialization())
OS << "{";
else
OS << "(";
for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
ArgEnd = Node->arg_end();
Arg != ArgEnd; ++Arg) {
if (Arg->isDefaultArgument())
break;
if (Arg != Node->arg_begin())
OS << ", ";
PrintExpr(*Arg);
}
if (Node->isStdInitListInitialization())
/* See above. */;
else if (Node->isListInitialization())
OS << "}";
else
OS << ")";
}
void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
OS << '[';
bool NeedComma = false;
switch (Node->getCaptureDefault()) {
case LCD_None:
break;
case LCD_ByCopy:
OS << '=';
NeedComma = true;
break;
case LCD_ByRef:
OS << '&';
NeedComma = true;
break;
}
for (LambdaExpr::capture_iterator C = Node->explicit_capture_begin(),
CEnd = Node->explicit_capture_end();
C != CEnd;
++C) {
if (NeedComma)
OS << ", ";
NeedComma = true;
switch (C->getCaptureKind()) {
case LCK_This:
OS << "this";
break;
case LCK_ByRef:
if (Node->getCaptureDefault() != LCD_ByRef || Node->isInitCapture(C))
OS << '&';
OS << C->getCapturedVar()->getName();
break;
case LCK_ByCopy:
OS << C->getCapturedVar()->getName();
break;
case LCK_VLAType:
llvm_unreachable("VLA type in explicit captures.");
}
if (Node->isInitCapture(C))
PrintExpr(C->getCapturedVar()->getInit());
}
OS << ']';
if (Node->hasExplicitParameters()) {
OS << " (";
CXXMethodDecl *Method = Node->getCallOperator();
NeedComma = false;
for (auto P : Method->params()) {
if (NeedComma) {
OS << ", ";
} else {
NeedComma = true;
}
std::string ParamStr = P->getNameAsString();
P->getOriginalType().print(OS, Policy, ParamStr);
}
if (Method->isVariadic()) {
if (NeedComma)
OS << ", ";
OS << "...";
}
OS << ')';
if (Node->isMutable())
OS << " mutable";
const FunctionProtoType *Proto
= Method->getType()->getAs<FunctionProtoType>();
Proto->printExceptionSpecification(OS, Policy);
// FIXME: Attributes
// Print the trailing return type if it was specified in the source.
if (Node->hasExplicitResultType()) {
OS << " -> ";
Proto->getReturnType().print(OS, Policy);
}
}
// Print the body.
CompoundStmt *Body = Node->getBody();
OS << ' ';
PrintStmt(Body);
}
void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
if (TypeSourceInfo *TSInfo = Node->getTypeSourceInfo())
TSInfo->getType().print(OS, Policy);
else
Node->getType().print(OS, Policy);
OS << "()";
}
void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
if (E->isGlobalNew())
OS << "::";
OS << "new ";
unsigned NumPlace = E->getNumPlacementArgs();
if (NumPlace > 0 && !isa<CXXDefaultArgExpr>(E->getPlacementArg(0))) {
OS << "(";
PrintExpr(E->getPlacementArg(0));
for (unsigned i = 1; i < NumPlace; ++i) {
if (isa<CXXDefaultArgExpr>(E->getPlacementArg(i)))
break;
OS << ", ";
PrintExpr(E->getPlacementArg(i));
}
OS << ") ";
}
if (E->isParenTypeId())
OS << "(";
std::string TypeS;
if (Expr *Size = E->getArraySize()) {
llvm::raw_string_ostream s(TypeS);
s << '[';
Size->printPretty(s, Helper, Policy);
s << ']';
}
E->getAllocatedType().print(OS, Policy, TypeS);
if (E->isParenTypeId())
OS << ")";
CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle();
if (InitStyle) {
if (InitStyle == CXXNewExpr::CallInit)
OS << "(";
PrintExpr(E->getInitializer());
if (InitStyle == CXXNewExpr::CallInit)
OS << ")";
}
}
void StmtPrinter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
if (E->isGlobalDelete())
OS << "::";
OS << "delete ";
if (E->isArrayForm())
OS << "[] ";
PrintExpr(E->getArgument());
}
void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
PrintExpr(E->getBase());
if (E->isArrow())
OS << "->";
else
OS << '.';
if (E->getQualifier())
E->getQualifier()->print(OS, Policy);
OS << "~";
if (IdentifierInfo *II = E->getDestroyedTypeIdentifier())
OS << II->getName();
else
E->getDestroyedType().print(OS, Policy);
}
void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) {
if (E->isListInitialization() && !E->isStdInitListInitialization())
OS << "{";
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
if (isa<CXXDefaultArgExpr>(E->getArg(i))) {
// Don't print any defaulted arguments
break;
}
if (i) OS << ", ";
PrintExpr(E->getArg(i));
}
if (E->isListInitialization() && !E->isStdInitListInitialization())
OS << "}";
}
void StmtPrinter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
PrintExpr(E->getSubExpr());
}
void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
// Just forward to the subexpression.
PrintExpr(E->getSubExpr());
}
void
StmtPrinter::VisitCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *Node) {
Node->getTypeAsWritten().print(OS, Policy);
OS << "(";
for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
ArgEnd = Node->arg_end();
Arg != ArgEnd; ++Arg) {
if (Arg != Node->arg_begin())
OS << ", ";
PrintExpr(*Arg);
}
OS << ")";
}
void StmtPrinter::VisitCXXDependentScopeMemberExpr(
CXXDependentScopeMemberExpr *Node) {
if (!Node->isImplicitAccess()) {
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->" : ".");
}
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
if (Node->hasExplicitTemplateArgs())
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
}
void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
if (!Node->isImplicitAccess()) {
PrintExpr(Node->getBase());
OS << (Node->isArrow() ? "->" : ".");
}
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
if (Node->hasExplicitTemplateArgs())
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Node->getTemplateArgs(), Node->getNumTemplateArgs(), Policy);
}
static const char *getTypeTraitName(TypeTrait TT) {
switch (TT) {
#define TYPE_TRAIT_1(Spelling, Name, Key) \
case clang::UTT_##Name: return #Spelling;
#define TYPE_TRAIT_2(Spelling, Name, Key) \
case clang::BTT_##Name: return #Spelling;
#define TYPE_TRAIT_N(Spelling, Name, Key) \
case clang::TT_##Name: return #Spelling;
#include "clang/Basic/TokenKinds.def"
}
llvm_unreachable("Type trait not covered by switch");
}
static const char *getTypeTraitName(ArrayTypeTrait ATT) {
switch (ATT) {
case ATT_ArrayRank: return "__array_rank";
case ATT_ArrayExtent: return "__array_extent";
}
llvm_unreachable("Array type trait not covered by switch");
}
static const char *getExpressionTraitName(ExpressionTrait ET) {
switch (ET) {
case ET_IsLValueExpr: return "__is_lvalue_expr";
case ET_IsRValueExpr: return "__is_rvalue_expr";
}
llvm_unreachable("Expression type trait not covered by switch");
}
void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
OS << getTypeTraitName(E->getTrait()) << "(";
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
if (I > 0)
OS << ", ";
E->getArg(I)->getType().print(OS, Policy);
}
OS << ")";
}
void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
OS << getTypeTraitName(E->getTrait()) << '(';
E->getQueriedType().print(OS, Policy);
OS << ')';
}
void StmtPrinter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
OS << getExpressionTraitName(E->getTrait()) << '(';
PrintExpr(E->getQueriedExpression());
OS << ')';
}
void StmtPrinter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
OS << "noexcept(";
PrintExpr(E->getOperand());
OS << ")";
}
void StmtPrinter::VisitPackExpansionExpr(PackExpansionExpr *E) {
PrintExpr(E->getPattern());
OS << "...";
}
void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
OS << "sizeof...(" << *E->getPack() << ")";
}
void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *Node) {
OS << *Node->getParameterPack();
}
void StmtPrinter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *Node) {
Visit(Node->getReplacement());
}
void StmtPrinter::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
OS << *E->getParameterPack();
}
void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){
PrintExpr(Node->GetTemporaryExpr());
}
void StmtPrinter::VisitCXXFoldExpr(CXXFoldExpr *E) {
OS << "(";
if (E->getLHS()) {
PrintExpr(E->getLHS());
OS << " " << BinaryOperator::getOpcodeStr(E->getOperator()) << " ";
}
OS << "...";
if (E->getRHS()) {
OS << " " << BinaryOperator::getOpcodeStr(E->getOperator()) << " ";
PrintExpr(E->getRHS());
}
OS << ")";
}
// Obj-C
void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
OS << "@";
VisitStringLiteral(Node->getString());
}
void StmtPrinter::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
OS << "@";
Visit(E->getSubExpr());
}
void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
OS << "@[ ";
StmtRange ch = E->children();
if (ch.first != ch.second) {
while (1) {
Visit(*ch.first);
++ch.first;
if (ch.first == ch.second) break;
OS << ", ";
}
}
OS << " ]";
}
void StmtPrinter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
OS << "@{ ";
for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
if (I > 0)
OS << ", ";
ObjCDictionaryElement Element = E->getKeyValueElement(I);
Visit(Element.Key);
OS << " : ";
Visit(Element.Value);
if (Element.isPackExpansion())
OS << "...";
}
OS << " }";
}
void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
OS << "@encode(";
Node->getEncodedType().print(OS, Policy);
OS << ')';
}
void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
OS << "@selector(";
Node->getSelector().print(OS);
OS << ')';
}
void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
OS << "@protocol(" << *Node->getProtocol() << ')';
}
void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
OS << "[";
switch (Mess->getReceiverKind()) {
case ObjCMessageExpr::Instance:
PrintExpr(Mess->getInstanceReceiver());
break;
case ObjCMessageExpr::Class:
Mess->getClassReceiver().print(OS, Policy);
break;
case ObjCMessageExpr::SuperInstance:
case ObjCMessageExpr::SuperClass:
OS << "Super";
break;
}
OS << ' ';
Selector selector = Mess->getSelector();
if (selector.isUnarySelector()) {
OS << selector.getNameForSlot(0);
} else {
for (unsigned i = 0, e = Mess->getNumArgs(); i != e; ++i) {
if (i < selector.getNumArgs()) {
if (i > 0) OS << ' ';
if (selector.getIdentifierInfoForSlot(i))
OS << selector.getIdentifierInfoForSlot(i)->getName() << ':';
else
OS << ":";
}
else OS << ", "; // Handle variadic methods.
PrintExpr(Mess->getArg(i));
}
}
OS << "]";
}
void StmtPrinter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
OS << (Node->getValue() ? "__objc_yes" : "__objc_no");
}
void
StmtPrinter::VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
PrintExpr(E->getSubExpr());
}
void
StmtPrinter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
OS << '(' << E->getBridgeKindName();
E->getType().print(OS, Policy);
OS << ')';
PrintExpr(E->getSubExpr());
}
void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
BlockDecl *BD = Node->getBlockDecl();
OS << "^";
const FunctionType *AFT = Node->getFunctionType();
if (isa<FunctionNoProtoType>(AFT)) {
OS << "()";
} else if (!BD->param_empty() || cast<FunctionProtoType>(AFT)->isVariadic()) {
OS << '(';
for (BlockDecl::param_iterator AI = BD->param_begin(),
E = BD->param_end(); AI != E; ++AI) {
if (AI != BD->param_begin()) OS << ", ";
std::string ParamStr = (*AI)->getNameAsString();
(*AI)->getType().print(OS, Policy, ParamStr);
}
const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
if (FT->isVariadic()) {
if (!BD->param_empty()) OS << ", ";
OS << "...";
}
OS << ')';
}
OS << "{ }";
}
void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
PrintExpr(Node->getSourceExpr());
}
void StmtPrinter::VisitTypoExpr(TypoExpr *Node) {
// TODO: Print something reasonable for a TypoExpr, if necessary.
assert(false && "Cannot print TypoExpr nodes");
}
void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
OS << "__builtin_astype(";
PrintExpr(Node->getSrcExpr());
OS << ", ";
Node->getType().print(OS, Policy);
OS << ")";
}
//===----------------------------------------------------------------------===//
// Stmt method implementations
//===----------------------------------------------------------------------===//
void Stmt::dumpPretty(const ASTContext &Context) const {
printPretty(llvm::errs(), nullptr, PrintingPolicy(Context.getLangOpts()));
}
void Stmt::printPretty(raw_ostream &OS,
PrinterHelper *Helper,
const PrintingPolicy &Policy,
unsigned Indentation) const {
StmtPrinter P(OS, Helper, Policy, Indentation);
P.Visit(const_cast<Stmt*>(this));
}
//===----------------------------------------------------------------------===//
// PrinterHelper
//===----------------------------------------------------------------------===//
// Implement virtual destructor.
PrinterHelper::~PrinterHelper() {}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Index/CommentToXML.cpp | //===--- CommentToXML.cpp - Convert comments to XML representation --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Index/CommentToXML.h"
#include "SimpleFormatContext.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Comment.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/Format/Format.h"
#include "clang/Index/USRGeneration.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace clang::comments;
using namespace clang::index;
namespace {
/// This comparison will sort parameters with valid index by index, then vararg
/// parameters, and invalid (unresolved) parameters last.
class ParamCommandCommentCompareIndex {
public:
bool operator()(const ParamCommandComment *LHS,
const ParamCommandComment *RHS) const {
unsigned LHSIndex = UINT_MAX;
unsigned RHSIndex = UINT_MAX;
if (LHS->isParamIndexValid()) {
if (LHS->isVarArgParam())
LHSIndex = UINT_MAX - 1;
else
LHSIndex = LHS->getParamIndex();
}
if (RHS->isParamIndexValid()) {
if (RHS->isVarArgParam())
RHSIndex = UINT_MAX - 1;
else
RHSIndex = RHS->getParamIndex();
}
return LHSIndex < RHSIndex;
}
};
/// This comparison will sort template parameters in the following order:
/// \li real template parameters (depth = 1) in index order;
/// \li all other names (depth > 1);
/// \li unresolved names.
class TParamCommandCommentComparePosition {
public:
bool operator()(const TParamCommandComment *LHS,
const TParamCommandComment *RHS) const {
// Sort unresolved names last.
if (!LHS->isPositionValid())
return false;
if (!RHS->isPositionValid())
return true;
if (LHS->getDepth() > 1)
return false;
if (RHS->getDepth() > 1)
return true;
// Sort template parameters in index order.
if (LHS->getDepth() == 1 && RHS->getDepth() == 1)
return LHS->getIndex(0) < RHS->getIndex(0);
// Leave all other names in source order.
return true;
}
};
/// Separate parts of a FullComment.
struct FullCommentParts {
/// Take a full comment apart and initialize members accordingly.
FullCommentParts(const FullComment *C,
const CommandTraits &Traits);
const BlockContentComment *Brief;
const BlockContentComment *Headerfile;
const ParagraphComment *FirstParagraph;
SmallVector<const BlockCommandComment *, 4> Returns;
SmallVector<const ParamCommandComment *, 8> Params;
SmallVector<const TParamCommandComment *, 4> TParams;
llvm::TinyPtrVector<const BlockCommandComment *> Exceptions;
SmallVector<const BlockContentComment *, 8> MiscBlocks;
};
FullCommentParts::FullCommentParts(const FullComment *C,
const CommandTraits &Traits) :
Brief(nullptr), Headerfile(nullptr), FirstParagraph(nullptr) {
for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
I != E; ++I) {
const Comment *Child = *I;
if (!Child)
continue;
switch (Child->getCommentKind()) {
case Comment::NoCommentKind:
continue;
case Comment::ParagraphCommentKind: {
const ParagraphComment *PC = cast<ParagraphComment>(Child);
if (PC->isWhitespace())
break;
if (!FirstParagraph)
FirstParagraph = PC;
MiscBlocks.push_back(PC);
break;
}
case Comment::BlockCommandCommentKind: {
const BlockCommandComment *BCC = cast<BlockCommandComment>(Child);
const CommandInfo *Info = Traits.getCommandInfo(BCC->getCommandID());
if (!Brief && Info->IsBriefCommand) {
Brief = BCC;
break;
}
if (!Headerfile && Info->IsHeaderfileCommand) {
Headerfile = BCC;
break;
}
if (Info->IsReturnsCommand) {
Returns.push_back(BCC);
break;
}
if (Info->IsThrowsCommand) {
Exceptions.push_back(BCC);
break;
}
MiscBlocks.push_back(BCC);
break;
}
case Comment::ParamCommandCommentKind: {
const ParamCommandComment *PCC = cast<ParamCommandComment>(Child);
if (!PCC->hasParamName())
break;
if (!PCC->isDirectionExplicit() && !PCC->hasNonWhitespaceParagraph())
break;
Params.push_back(PCC);
break;
}
case Comment::TParamCommandCommentKind: {
const TParamCommandComment *TPCC = cast<TParamCommandComment>(Child);
if (!TPCC->hasParamName())
break;
if (!TPCC->hasNonWhitespaceParagraph())
break;
TParams.push_back(TPCC);
break;
}
case Comment::VerbatimBlockCommentKind:
MiscBlocks.push_back(cast<BlockCommandComment>(Child));
break;
case Comment::VerbatimLineCommentKind: {
const VerbatimLineComment *VLC = cast<VerbatimLineComment>(Child);
const CommandInfo *Info = Traits.getCommandInfo(VLC->getCommandID());
if (!Info->IsDeclarationCommand)
MiscBlocks.push_back(VLC);
break;
}
case Comment::TextCommentKind:
case Comment::InlineCommandCommentKind:
case Comment::HTMLStartTagCommentKind:
case Comment::HTMLEndTagCommentKind:
case Comment::VerbatimBlockLineCommentKind:
case Comment::FullCommentKind:
llvm_unreachable("AST node of this kind can't be a child of "
"a FullComment");
}
}
// Sort params in order they are declared in the function prototype.
// Unresolved parameters are put at the end of the list in the same order
// they were seen in the comment.
std::stable_sort(Params.begin(), Params.end(),
ParamCommandCommentCompareIndex());
std::stable_sort(TParams.begin(), TParams.end(),
TParamCommandCommentComparePosition());
}
void printHTMLStartTagComment(const HTMLStartTagComment *C,
llvm::raw_svector_ostream &Result) {
Result << "<" << C->getTagName();
if (C->getNumAttrs() != 0) {
for (unsigned i = 0, e = C->getNumAttrs(); i != e; i++) {
Result << " ";
const HTMLStartTagComment::Attribute &Attr = C->getAttr(i);
Result << Attr.Name;
if (!Attr.Value.empty())
Result << "=\"" << Attr.Value << "\"";
}
}
if (!C->isSelfClosing())
Result << ">";
else
Result << "/>";
}
class CommentASTToHTMLConverter :
public ConstCommentVisitor<CommentASTToHTMLConverter> {
public:
/// \param Str accumulator for HTML.
CommentASTToHTMLConverter(const FullComment *FC,
SmallVectorImpl<char> &Str,
const CommandTraits &Traits) :
FC(FC), Result(Str), Traits(Traits)
{ }
// Inline content.
void visitTextComment(const TextComment *C);
void visitInlineCommandComment(const InlineCommandComment *C);
void visitHTMLStartTagComment(const HTMLStartTagComment *C);
void visitHTMLEndTagComment(const HTMLEndTagComment *C);
// Block content.
void visitParagraphComment(const ParagraphComment *C);
void visitBlockCommandComment(const BlockCommandComment *C);
void visitParamCommandComment(const ParamCommandComment *C);
void visitTParamCommandComment(const TParamCommandComment *C);
void visitVerbatimBlockComment(const VerbatimBlockComment *C);
void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
void visitVerbatimLineComment(const VerbatimLineComment *C);
void visitFullComment(const FullComment *C);
// Helpers.
/// Convert a paragraph that is not a block by itself (an argument to some
/// command).
void visitNonStandaloneParagraphComment(const ParagraphComment *C);
void appendToResultWithHTMLEscaping(StringRef S);
private:
const FullComment *FC;
/// Output stream for HTML.
llvm::raw_svector_ostream Result;
const CommandTraits &Traits;
};
} // end unnamed namespace
void CommentASTToHTMLConverter::visitTextComment(const TextComment *C) {
appendToResultWithHTMLEscaping(C->getText());
}
void CommentASTToHTMLConverter::visitInlineCommandComment(
const InlineCommandComment *C) {
// Nothing to render if no arguments supplied.
if (C->getNumArgs() == 0)
return;
// Nothing to render if argument is empty.
StringRef Arg0 = C->getArgText(0);
if (Arg0.empty())
return;
switch (C->getRenderKind()) {
case InlineCommandComment::RenderNormal:
for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) {
appendToResultWithHTMLEscaping(C->getArgText(i));
Result << " ";
}
return;
case InlineCommandComment::RenderBold:
assert(C->getNumArgs() == 1);
Result << "<b>";
appendToResultWithHTMLEscaping(Arg0);
Result << "</b>";
return;
case InlineCommandComment::RenderMonospaced:
assert(C->getNumArgs() == 1);
Result << "<tt>";
appendToResultWithHTMLEscaping(Arg0);
Result<< "</tt>";
return;
case InlineCommandComment::RenderEmphasized:
assert(C->getNumArgs() == 1);
Result << "<em>";
appendToResultWithHTMLEscaping(Arg0);
Result << "</em>";
return;
}
}
void CommentASTToHTMLConverter::visitHTMLStartTagComment(
const HTMLStartTagComment *C) {
printHTMLStartTagComment(C, Result);
}
void CommentASTToHTMLConverter::visitHTMLEndTagComment(
const HTMLEndTagComment *C) {
Result << "</" << C->getTagName() << ">";
}
void CommentASTToHTMLConverter::visitParagraphComment(
const ParagraphComment *C) {
if (C->isWhitespace())
return;
Result << "<p>";
for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
I != E; ++I) {
visit(*I);
}
Result << "</p>";
}
void CommentASTToHTMLConverter::visitBlockCommandComment(
const BlockCommandComment *C) {
const CommandInfo *Info = Traits.getCommandInfo(C->getCommandID());
if (Info->IsBriefCommand) {
Result << "<p class=\"para-brief\">";
visitNonStandaloneParagraphComment(C->getParagraph());
Result << "</p>";
return;
}
if (Info->IsReturnsCommand) {
Result << "<p class=\"para-returns\">"
"<span class=\"word-returns\">Returns</span> ";
visitNonStandaloneParagraphComment(C->getParagraph());
Result << "</p>";
return;
}
// We don't know anything about this command. Just render the paragraph.
visit(C->getParagraph());
}
void CommentASTToHTMLConverter::visitParamCommandComment(
const ParamCommandComment *C) {
if (C->isParamIndexValid()) {
if (C->isVarArgParam()) {
Result << "<dt class=\"param-name-index-vararg\">";
appendToResultWithHTMLEscaping(C->getParamNameAsWritten());
} else {
Result << "<dt class=\"param-name-index-"
<< C->getParamIndex()
<< "\">";
appendToResultWithHTMLEscaping(C->getParamName(FC));
}
} else {
Result << "<dt class=\"param-name-index-invalid\">";
appendToResultWithHTMLEscaping(C->getParamNameAsWritten());
}
Result << "</dt>";
if (C->isParamIndexValid()) {
if (C->isVarArgParam())
Result << "<dd class=\"param-descr-index-vararg\">";
else
Result << "<dd class=\"param-descr-index-"
<< C->getParamIndex()
<< "\">";
} else
Result << "<dd class=\"param-descr-index-invalid\">";
visitNonStandaloneParagraphComment(C->getParagraph());
Result << "</dd>";
}
void CommentASTToHTMLConverter::visitTParamCommandComment(
const TParamCommandComment *C) {
if (C->isPositionValid()) {
if (C->getDepth() == 1)
Result << "<dt class=\"tparam-name-index-"
<< C->getIndex(0)
<< "\">";
else
Result << "<dt class=\"tparam-name-index-other\">";
appendToResultWithHTMLEscaping(C->getParamName(FC));
} else {
Result << "<dt class=\"tparam-name-index-invalid\">";
appendToResultWithHTMLEscaping(C->getParamNameAsWritten());
}
Result << "</dt>";
if (C->isPositionValid()) {
if (C->getDepth() == 1)
Result << "<dd class=\"tparam-descr-index-"
<< C->getIndex(0)
<< "\">";
else
Result << "<dd class=\"tparam-descr-index-other\">";
} else
Result << "<dd class=\"tparam-descr-index-invalid\">";
visitNonStandaloneParagraphComment(C->getParagraph());
Result << "</dd>";
}
void CommentASTToHTMLConverter::visitVerbatimBlockComment(
const VerbatimBlockComment *C) {
unsigned NumLines = C->getNumLines();
if (NumLines == 0)
return;
Result << "<pre>";
for (unsigned i = 0; i != NumLines; ++i) {
appendToResultWithHTMLEscaping(C->getText(i));
if (i + 1 != NumLines)
Result << '\n';
}
Result << "</pre>";
}
void CommentASTToHTMLConverter::visitVerbatimBlockLineComment(
const VerbatimBlockLineComment *C) {
llvm_unreachable("should not see this AST node");
}
void CommentASTToHTMLConverter::visitVerbatimLineComment(
const VerbatimLineComment *C) {
Result << "<pre>";
appendToResultWithHTMLEscaping(C->getText());
Result << "</pre>";
}
void CommentASTToHTMLConverter::visitFullComment(const FullComment *C) {
FullCommentParts Parts(C, Traits);
bool FirstParagraphIsBrief = false;
if (Parts.Headerfile)
visit(Parts.Headerfile);
if (Parts.Brief)
visit(Parts.Brief);
else if (Parts.FirstParagraph) {
Result << "<p class=\"para-brief\">";
visitNonStandaloneParagraphComment(Parts.FirstParagraph);
Result << "</p>";
FirstParagraphIsBrief = true;
}
for (unsigned i = 0, e = Parts.MiscBlocks.size(); i != e; ++i) {
const Comment *C = Parts.MiscBlocks[i];
if (FirstParagraphIsBrief && C == Parts.FirstParagraph)
continue;
visit(C);
}
if (Parts.TParams.size() != 0) {
Result << "<dl>";
for (unsigned i = 0, e = Parts.TParams.size(); i != e; ++i)
visit(Parts.TParams[i]);
Result << "</dl>";
}
if (Parts.Params.size() != 0) {
Result << "<dl>";
for (unsigned i = 0, e = Parts.Params.size(); i != e; ++i)
visit(Parts.Params[i]);
Result << "</dl>";
}
if (Parts.Returns.size() != 0) {
Result << "<div class=\"result-discussion\">";
for (unsigned i = 0, e = Parts.Returns.size(); i != e; ++i)
visit(Parts.Returns[i]);
Result << "</div>";
}
Result.flush();
}
void CommentASTToHTMLConverter::visitNonStandaloneParagraphComment(
const ParagraphComment *C) {
if (!C)
return;
for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
I != E; ++I) {
visit(*I);
}
}
void CommentASTToHTMLConverter::appendToResultWithHTMLEscaping(StringRef S) {
for (StringRef::iterator I = S.begin(), E = S.end(); I != E; ++I) {
const char C = *I;
switch (C) {
case '&':
Result << "&";
break;
case '<':
Result << "<";
break;
case '>':
Result << ">";
break;
case '"':
Result << """;
break;
case '\'':
Result << "'";
break;
case '/':
Result << "/";
break;
default:
Result << C;
break;
}
}
}
namespace {
class CommentASTToXMLConverter :
public ConstCommentVisitor<CommentASTToXMLConverter> {
public:
/// \param Str accumulator for XML.
CommentASTToXMLConverter(const FullComment *FC,
SmallVectorImpl<char> &Str,
const CommandTraits &Traits,
const SourceManager &SM,
SimpleFormatContext &SFC,
unsigned FUID) :
FC(FC), Result(Str), Traits(Traits), SM(SM),
FormatRewriterContext(SFC),
FormatInMemoryUniqueId(FUID) { }
// Inline content.
void visitTextComment(const TextComment *C);
void visitInlineCommandComment(const InlineCommandComment *C);
void visitHTMLStartTagComment(const HTMLStartTagComment *C);
void visitHTMLEndTagComment(const HTMLEndTagComment *C);
// Block content.
void visitParagraphComment(const ParagraphComment *C);
void appendParagraphCommentWithKind(const ParagraphComment *C,
StringRef Kind);
void visitBlockCommandComment(const BlockCommandComment *C);
void visitParamCommandComment(const ParamCommandComment *C);
void visitTParamCommandComment(const TParamCommandComment *C);
void visitVerbatimBlockComment(const VerbatimBlockComment *C);
void visitVerbatimBlockLineComment(const VerbatimBlockLineComment *C);
void visitVerbatimLineComment(const VerbatimLineComment *C);
void visitFullComment(const FullComment *C);
// Helpers.
void appendToResultWithXMLEscaping(StringRef S);
void appendToResultWithCDATAEscaping(StringRef S);
void formatTextOfDeclaration(const DeclInfo *DI,
SmallString<128> &Declaration);
private:
const FullComment *FC;
/// Output stream for XML.
llvm::raw_svector_ostream Result;
const CommandTraits &Traits;
const SourceManager &SM;
SimpleFormatContext &FormatRewriterContext;
unsigned FormatInMemoryUniqueId;
};
void getSourceTextOfDeclaration(const DeclInfo *ThisDecl,
SmallVectorImpl<char> &Str) {
ASTContext &Context = ThisDecl->CurrentDecl->getASTContext();
const LangOptions &LangOpts = Context.getLangOpts();
llvm::raw_svector_ostream OS(Str);
PrintingPolicy PPolicy(LangOpts);
PPolicy.PolishForDeclaration = true;
PPolicy.TerseOutput = true;
ThisDecl->CurrentDecl->print(OS, PPolicy,
/*Indentation*/0, /*PrintInstantiation*/false);
}
void CommentASTToXMLConverter::formatTextOfDeclaration(
const DeclInfo *DI, SmallString<128> &Declaration) {
// FIXME. formatting API expects null terminated input string.
// There might be more efficient way of doing this.
std::string StringDecl = Declaration.str();
// Formatter specific code.
// Form a unique in memory buffer name.
SmallString<128> filename;
filename += "xmldecl";
filename += llvm::utostr(FormatInMemoryUniqueId);
filename += ".xd";
FileID ID = FormatRewriterContext.createInMemoryFile(filename, StringDecl);
SourceLocation Start = FormatRewriterContext.Sources.getLocForStartOfFile(ID)
.getLocWithOffset(0);
unsigned Length = Declaration.size();
tooling::Replacements Replace = reformat(
format::getLLVMStyle(), FormatRewriterContext.Sources, ID,
CharSourceRange::getCharRange(Start, Start.getLocWithOffset(Length)));
applyAllReplacements(Replace, FormatRewriterContext.Rewrite);
Declaration = FormatRewriterContext.getRewrittenText(ID);
}
} // end unnamed namespace
void CommentASTToXMLConverter::visitTextComment(const TextComment *C) {
appendToResultWithXMLEscaping(C->getText());
}
void CommentASTToXMLConverter::visitInlineCommandComment(
const InlineCommandComment *C) {
// Nothing to render if no arguments supplied.
if (C->getNumArgs() == 0)
return;
// Nothing to render if argument is empty.
StringRef Arg0 = C->getArgText(0);
if (Arg0.empty())
return;
switch (C->getRenderKind()) {
case InlineCommandComment::RenderNormal:
for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) {
appendToResultWithXMLEscaping(C->getArgText(i));
Result << " ";
}
return;
case InlineCommandComment::RenderBold:
assert(C->getNumArgs() == 1);
Result << "<bold>";
appendToResultWithXMLEscaping(Arg0);
Result << "</bold>";
return;
case InlineCommandComment::RenderMonospaced:
assert(C->getNumArgs() == 1);
Result << "<monospaced>";
appendToResultWithXMLEscaping(Arg0);
Result << "</monospaced>";
return;
case InlineCommandComment::RenderEmphasized:
assert(C->getNumArgs() == 1);
Result << "<emphasized>";
appendToResultWithXMLEscaping(Arg0);
Result << "</emphasized>";
return;
}
}
void CommentASTToXMLConverter::visitHTMLStartTagComment(
const HTMLStartTagComment *C) {
Result << "<rawHTML";
if (C->isMalformed())
Result << " isMalformed=\"1\"";
Result << ">";
{
SmallString<32> Tag;
{
llvm::raw_svector_ostream TagOS(Tag);
printHTMLStartTagComment(C, TagOS);
}
appendToResultWithCDATAEscaping(Tag);
}
Result << "</rawHTML>";
}
void
CommentASTToXMLConverter::visitHTMLEndTagComment(const HTMLEndTagComment *C) {
Result << "<rawHTML";
if (C->isMalformed())
Result << " isMalformed=\"1\"";
Result << "></" << C->getTagName() << "></rawHTML>";
}
void
CommentASTToXMLConverter::visitParagraphComment(const ParagraphComment *C) {
appendParagraphCommentWithKind(C, StringRef());
}
void CommentASTToXMLConverter::appendParagraphCommentWithKind(
const ParagraphComment *C,
StringRef ParagraphKind) {
if (C->isWhitespace())
return;
if (ParagraphKind.empty())
Result << "<Para>";
else
Result << "<Para kind=\"" << ParagraphKind << "\">";
for (Comment::child_iterator I = C->child_begin(), E = C->child_end();
I != E; ++I) {
visit(*I);
}
Result << "</Para>";
}
void CommentASTToXMLConverter::visitBlockCommandComment(
const BlockCommandComment *C) {
StringRef ParagraphKind;
switch (C->getCommandID()) {
case CommandTraits::KCI_attention:
case CommandTraits::KCI_author:
case CommandTraits::KCI_authors:
case CommandTraits::KCI_bug:
case CommandTraits::KCI_copyright:
case CommandTraits::KCI_date:
case CommandTraits::KCI_invariant:
case CommandTraits::KCI_note:
case CommandTraits::KCI_post:
case CommandTraits::KCI_pre:
case CommandTraits::KCI_remark:
case CommandTraits::KCI_remarks:
case CommandTraits::KCI_sa:
case CommandTraits::KCI_see:
case CommandTraits::KCI_since:
case CommandTraits::KCI_todo:
case CommandTraits::KCI_version:
case CommandTraits::KCI_warning:
ParagraphKind = C->getCommandName(Traits);
break;
default:
break;
}
appendParagraphCommentWithKind(C->getParagraph(), ParagraphKind);
}
void CommentASTToXMLConverter::visitParamCommandComment(
const ParamCommandComment *C) {
Result << "<Parameter><Name>";
appendToResultWithXMLEscaping(C->isParamIndexValid()
? C->getParamName(FC)
: C->getParamNameAsWritten());
Result << "</Name>";
if (C->isParamIndexValid()) {
if (C->isVarArgParam())
Result << "<IsVarArg />";
else
Result << "<Index>" << C->getParamIndex() << "</Index>";
}
Result << "<Direction isExplicit=\"" << C->isDirectionExplicit() << "\">";
switch (C->getDirection()) {
case ParamCommandComment::In:
Result << "in";
break;
case ParamCommandComment::Out:
Result << "out";
break;
case ParamCommandComment::InOut:
Result << "in,out";
break;
}
Result << "</Direction><Discussion>";
visit(C->getParagraph());
Result << "</Discussion></Parameter>";
}
void CommentASTToXMLConverter::visitTParamCommandComment(
const TParamCommandComment *C) {
Result << "<Parameter><Name>";
appendToResultWithXMLEscaping(C->isPositionValid() ? C->getParamName(FC)
: C->getParamNameAsWritten());
Result << "</Name>";
if (C->isPositionValid() && C->getDepth() == 1) {
Result << "<Index>" << C->getIndex(0) << "</Index>";
}
Result << "<Discussion>";
visit(C->getParagraph());
Result << "</Discussion></Parameter>";
}
void CommentASTToXMLConverter::visitVerbatimBlockComment(
const VerbatimBlockComment *C) {
unsigned NumLines = C->getNumLines();
if (NumLines == 0)
return;
switch (C->getCommandID()) {
case CommandTraits::KCI_code:
Result << "<Verbatim xml:space=\"preserve\" kind=\"code\">";
break;
default:
Result << "<Verbatim xml:space=\"preserve\" kind=\"verbatim\">";
break;
}
for (unsigned i = 0; i != NumLines; ++i) {
appendToResultWithXMLEscaping(C->getText(i));
if (i + 1 != NumLines)
Result << '\n';
}
Result << "</Verbatim>";
}
void CommentASTToXMLConverter::visitVerbatimBlockLineComment(
const VerbatimBlockLineComment *C) {
llvm_unreachable("should not see this AST node");
}
void CommentASTToXMLConverter::visitVerbatimLineComment(
const VerbatimLineComment *C) {
Result << "<Verbatim xml:space=\"preserve\" kind=\"verbatim\">";
appendToResultWithXMLEscaping(C->getText());
Result << "</Verbatim>";
}
void CommentASTToXMLConverter::visitFullComment(const FullComment *C) {
FullCommentParts Parts(C, Traits);
const DeclInfo *DI = C->getDeclInfo();
StringRef RootEndTag;
if (DI) {
switch (DI->getKind()) {
case DeclInfo::OtherKind:
RootEndTag = "</Other>";
Result << "<Other";
break;
case DeclInfo::FunctionKind:
RootEndTag = "</Function>";
Result << "<Function";
switch (DI->TemplateKind) {
case DeclInfo::NotTemplate:
break;
case DeclInfo::Template:
Result << " templateKind=\"template\"";
break;
case DeclInfo::TemplateSpecialization:
Result << " templateKind=\"specialization\"";
break;
case DeclInfo::TemplatePartialSpecialization:
llvm_unreachable("partial specializations of functions "
"are not allowed in C++");
}
if (DI->IsInstanceMethod)
Result << " isInstanceMethod=\"1\"";
if (DI->IsClassMethod)
Result << " isClassMethod=\"1\"";
break;
case DeclInfo::ClassKind:
RootEndTag = "</Class>";
Result << "<Class";
switch (DI->TemplateKind) {
case DeclInfo::NotTemplate:
break;
case DeclInfo::Template:
Result << " templateKind=\"template\"";
break;
case DeclInfo::TemplateSpecialization:
Result << " templateKind=\"specialization\"";
break;
case DeclInfo::TemplatePartialSpecialization:
Result << " templateKind=\"partialSpecialization\"";
break;
}
break;
case DeclInfo::VariableKind:
RootEndTag = "</Variable>";
Result << "<Variable";
break;
case DeclInfo::NamespaceKind:
RootEndTag = "</Namespace>";
Result << "<Namespace";
break;
case DeclInfo::TypedefKind:
RootEndTag = "</Typedef>";
Result << "<Typedef";
break;
case DeclInfo::EnumKind:
RootEndTag = "</Enum>";
Result << "<Enum";
break;
}
{
// Print line and column number.
SourceLocation Loc = DI->CurrentDecl->getLocation();
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
FileID FID = LocInfo.first;
unsigned FileOffset = LocInfo.second;
if (!FID.isInvalid()) {
if (const FileEntry *FE = SM.getFileEntryForID(FID)) {
Result << " file=\"";
appendToResultWithXMLEscaping(FE->getName());
Result << "\"";
}
Result << " line=\"" << SM.getLineNumber(FID, FileOffset)
<< "\" column=\"" << SM.getColumnNumber(FID, FileOffset)
<< "\"";
}
}
// Finish the root tag.
Result << ">";
bool FoundName = false;
if (const NamedDecl *ND = dyn_cast<NamedDecl>(DI->CommentDecl)) {
if (DeclarationName DeclName = ND->getDeclName()) {
Result << "<Name>";
std::string Name = DeclName.getAsString();
appendToResultWithXMLEscaping(Name);
FoundName = true;
Result << "</Name>";
}
}
if (!FoundName)
Result << "<Name><anonymous></Name>";
{
// Print USR.
SmallString<128> USR;
generateUSRForDecl(DI->CommentDecl, USR);
if (!USR.empty()) {
Result << "<USR>";
appendToResultWithXMLEscaping(USR);
Result << "</USR>";
}
}
} else {
// No DeclInfo -- just emit some root tag and name tag.
RootEndTag = "</Other>";
Result << "<Other><Name>unknown</Name>";
}
if (Parts.Headerfile) {
Result << "<Headerfile>";
visit(Parts.Headerfile);
Result << "</Headerfile>";
}
{
// Pretty-print the declaration.
Result << "<Declaration>";
SmallString<128> Declaration;
getSourceTextOfDeclaration(DI, Declaration);
formatTextOfDeclaration(DI, Declaration);
appendToResultWithXMLEscaping(Declaration);
Result << "</Declaration>";
}
bool FirstParagraphIsBrief = false;
if (Parts.Brief) {
Result << "<Abstract>";
visit(Parts.Brief);
Result << "</Abstract>";
} else if (Parts.FirstParagraph) {
Result << "<Abstract>";
visit(Parts.FirstParagraph);
Result << "</Abstract>";
FirstParagraphIsBrief = true;
}
if (Parts.TParams.size() != 0) {
Result << "<TemplateParameters>";
for (unsigned i = 0, e = Parts.TParams.size(); i != e; ++i)
visit(Parts.TParams[i]);
Result << "</TemplateParameters>";
}
if (Parts.Params.size() != 0) {
Result << "<Parameters>";
for (unsigned i = 0, e = Parts.Params.size(); i != e; ++i)
visit(Parts.Params[i]);
Result << "</Parameters>";
}
if (Parts.Exceptions.size() != 0) {
Result << "<Exceptions>";
for (unsigned i = 0, e = Parts.Exceptions.size(); i != e; ++i)
visit(Parts.Exceptions[i]);
Result << "</Exceptions>";
}
if (Parts.Returns.size() != 0) {
Result << "<ResultDiscussion>";
for (unsigned i = 0, e = Parts.Returns.size(); i != e; ++i)
visit(Parts.Returns[i]);
Result << "</ResultDiscussion>";
}
if (DI->CommentDecl->hasAttrs()) {
const AttrVec &Attrs = DI->CommentDecl->getAttrs();
for (unsigned i = 0, e = Attrs.size(); i != e; i++) {
const AvailabilityAttr *AA = dyn_cast<AvailabilityAttr>(Attrs[i]);
if (!AA) {
if (const DeprecatedAttr *DA = dyn_cast<DeprecatedAttr>(Attrs[i])) {
if (DA->getMessage().empty())
Result << "<Deprecated/>";
else {
Result << "<Deprecated>";
appendToResultWithXMLEscaping(DA->getMessage());
Result << "</Deprecated>";
}
}
else if (const UnavailableAttr *UA = dyn_cast<UnavailableAttr>(Attrs[i])) {
if (UA->getMessage().empty())
Result << "<Unavailable/>";
else {
Result << "<Unavailable>";
appendToResultWithXMLEscaping(UA->getMessage());
Result << "</Unavailable>";
}
}
continue;
}
// 'availability' attribute.
Result << "<Availability";
StringRef Distribution;
if (AA->getPlatform()) {
Distribution = AvailabilityAttr::getPrettyPlatformName(
AA->getPlatform()->getName());
if (Distribution.empty())
Distribution = AA->getPlatform()->getName();
}
Result << " distribution=\"" << Distribution << "\">";
VersionTuple IntroducedInVersion = AA->getIntroduced();
if (!IntroducedInVersion.empty()) {
Result << "<IntroducedInVersion>"
<< IntroducedInVersion.getAsString()
<< "</IntroducedInVersion>";
}
VersionTuple DeprecatedInVersion = AA->getDeprecated();
if (!DeprecatedInVersion.empty()) {
Result << "<DeprecatedInVersion>"
<< DeprecatedInVersion.getAsString()
<< "</DeprecatedInVersion>";
}
VersionTuple RemovedAfterVersion = AA->getObsoleted();
if (!RemovedAfterVersion.empty()) {
Result << "<RemovedAfterVersion>"
<< RemovedAfterVersion.getAsString()
<< "</RemovedAfterVersion>";
}
StringRef DeprecationSummary = AA->getMessage();
if (!DeprecationSummary.empty()) {
Result << "<DeprecationSummary>";
appendToResultWithXMLEscaping(DeprecationSummary);
Result << "</DeprecationSummary>";
}
if (AA->getUnavailable())
Result << "<Unavailable/>";
Result << "</Availability>";
}
}
{
bool StartTagEmitted = false;
for (unsigned i = 0, e = Parts.MiscBlocks.size(); i != e; ++i) {
const Comment *C = Parts.MiscBlocks[i];
if (FirstParagraphIsBrief && C == Parts.FirstParagraph)
continue;
if (!StartTagEmitted) {
Result << "<Discussion>";
StartTagEmitted = true;
}
visit(C);
}
if (StartTagEmitted)
Result << "</Discussion>";
}
Result << RootEndTag;
Result.flush();
}
void CommentASTToXMLConverter::appendToResultWithXMLEscaping(StringRef S) {
for (StringRef::iterator I = S.begin(), E = S.end(); I != E; ++I) {
const char C = *I;
switch (C) {
case '&':
Result << "&";
break;
case '<':
Result << "<";
break;
case '>':
Result << ">";
break;
case '"':
Result << """;
break;
case '\'':
Result << "'";
break;
default:
Result << C;
break;
}
}
}
void CommentASTToXMLConverter::appendToResultWithCDATAEscaping(StringRef S) {
if (S.empty())
return;
Result << "<![CDATA[";
while (!S.empty()) {
size_t Pos = S.find("]]>");
if (Pos == 0) {
Result << "]]]]><![CDATA[>";
S = S.drop_front(3);
continue;
}
if (Pos == StringRef::npos)
Pos = S.size();
Result << S.substr(0, Pos);
S = S.drop_front(Pos);
}
Result << "]]>";
}
CommentToXMLConverter::CommentToXMLConverter() : FormatInMemoryUniqueId(0) {}
CommentToXMLConverter::~CommentToXMLConverter() {}
void CommentToXMLConverter::convertCommentToHTML(const FullComment *FC,
SmallVectorImpl<char> &HTML,
const ASTContext &Context) {
CommentASTToHTMLConverter Converter(FC, HTML,
Context.getCommentCommandTraits());
Converter.visit(FC);
}
void CommentToXMLConverter::convertHTMLTagNodeToText(
const comments::HTMLTagComment *HTC, SmallVectorImpl<char> &Text,
const ASTContext &Context) {
CommentASTToHTMLConverter Converter(nullptr, Text,
Context.getCommentCommandTraits());
Converter.visit(HTC);
}
void CommentToXMLConverter::convertCommentToXML(const FullComment *FC,
SmallVectorImpl<char> &XML,
const ASTContext &Context) {
if (!FormatContext || (FormatInMemoryUniqueId % 1000) == 0) {
// Create a new format context, or re-create it after some number of
// iterations, so the buffers don't grow too large.
FormatContext.reset(new SimpleFormatContext(Context.getLangOpts()));
}
CommentASTToXMLConverter Converter(FC, XML, Context.getCommentCommandTraits(),
Context.getSourceManager(), *FormatContext,
FormatInMemoryUniqueId++);
Converter.visit(FC);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Index/SimpleFormatContext.h | //===--- SimpleFormatContext.h ----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
///
/// \brief Defines a utility class for use of clang-format in libclang
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
#define LLVM_CLANG_LIB_INDEX_SIMPLEFORMATCONTEXT_H
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
namespace clang {
namespace index {
/// \brief A small class to be used by libclang clients to format
/// a declaration string in memory. This object is instantiated once
/// and used each time a formatting is needed.
class SimpleFormatContext {
public:
SimpleFormatContext(LangOptions Options)
: DiagOpts(new DiagnosticOptions()),
Diagnostics(new DiagnosticsEngine(new DiagnosticIDs,
DiagOpts.get())),
Files((FileSystemOptions())),
Sources(*Diagnostics, Files),
Rewrite(Sources, Options) {
Diagnostics->setClient(new IgnoringDiagConsumer, true);
}
FileID createInMemoryFile(StringRef Name, StringRef Content) {
std::unique_ptr<llvm::MemoryBuffer> Source =
llvm::MemoryBuffer::getMemBuffer(Content);
const FileEntry *Entry =
Files.getVirtualFile(Name, Source->getBufferSize(), 0);
Sources.overrideFileContents(Entry, std::move(Source));
assert(Entry != nullptr);
return Sources.createFileID(Entry, SourceLocation(), SrcMgr::C_User);
}
std::string getRewrittenText(FileID ID) {
std::string Result;
llvm::raw_string_ostream OS(Result);
Rewrite.getEditBuffer(ID).write(OS);
OS.flush();
return Result;
}
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
FileManager Files;
SourceManager Sources;
Rewriter Rewrite;
};
} // end namespace index
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Index/CMakeLists.txt | set(LLVM_LINK_COMPONENTS
Support
)
add_clang_library(clangIndex
CommentToXML.cpp
USRGeneration.cpp
ADDITIONAL_HEADERS
SimpleFormatContext.h
LINK_LIBS
clangAST
clangBasic
clangFormat
clangRewrite
clangToolingCore
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Index/USRGeneration.cpp | //===- USRGeneration.cpp - Routines for USR generation --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Index/USRGeneration.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
using namespace clang::index;
//===----------------------------------------------------------------------===//
// USR generation.
//===----------------------------------------------------------------------===//
/// \returns true on error.
static bool printLoc(llvm::raw_ostream &OS, SourceLocation Loc,
const SourceManager &SM, bool IncludeOffset) {
if (Loc.isInvalid()) {
return true;
}
Loc = SM.getExpansionLoc(Loc);
const std::pair<FileID, unsigned> &Decomposed = SM.getDecomposedLoc(Loc);
const FileEntry *FE = SM.getFileEntryForID(Decomposed.first);
if (FE) {
OS << llvm::sys::path::filename(FE->getName());
} else {
// This case really isn't interesting.
return true;
}
if (IncludeOffset) {
// Use the offest into the FileID to represent the location. Using
// a line/column can cause us to look back at the original source file,
// which is expensive.
OS << '@' << Decomposed.second;
}
return false;
}
namespace {
class USRGenerator : public ConstDeclVisitor<USRGenerator> {
SmallVectorImpl<char> &Buf;
llvm::raw_svector_ostream Out;
bool IgnoreResults;
ASTContext *Context;
bool generatedLoc;
llvm::DenseMap<const Type *, unsigned> TypeSubstitutions;
public:
explicit USRGenerator(ASTContext *Ctx, SmallVectorImpl<char> &Buf)
: Buf(Buf),
Out(Buf),
IgnoreResults(false),
Context(Ctx),
generatedLoc(false)
{
// Add the USR space prefix.
Out << getUSRSpacePrefix();
}
bool ignoreResults() const { return IgnoreResults; }
// Visitation methods from generating USRs from AST elements.
void VisitDeclContext(const DeclContext *D);
void VisitFieldDecl(const FieldDecl *D);
void VisitFunctionDecl(const FunctionDecl *D);
void VisitNamedDecl(const NamedDecl *D);
void VisitNamespaceDecl(const NamespaceDecl *D);
void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D);
void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
void VisitClassTemplateDecl(const ClassTemplateDecl *D);
void VisitObjCContainerDecl(const ObjCContainerDecl *CD);
void VisitObjCMethodDecl(const ObjCMethodDecl *MD);
void VisitObjCPropertyDecl(const ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D);
void VisitTagDecl(const TagDecl *D);
void VisitTypedefDecl(const TypedefDecl *D);
void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
void VisitVarDecl(const VarDecl *D);
void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
void VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
IgnoreResults = true;
}
void VisitUsingDirectiveDecl(const UsingDirectiveDecl *D) {
IgnoreResults = true;
}
void VisitUsingDecl(const UsingDecl *D) {
IgnoreResults = true;
}
void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D) {
IgnoreResults = true;
}
void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D) {
IgnoreResults = true;
}
bool ShouldGenerateLocation(const NamedDecl *D);
bool isLocal(const NamedDecl *D) {
return D->getParentFunctionOrMethod() != nullptr;
}
/// Generate the string component containing the location of the
/// declaration.
bool GenLoc(const Decl *D, bool IncludeOffset);
/// String generation methods used both by the visitation methods
/// and from other clients that want to directly generate USRs. These
/// methods do not construct complete USRs (which incorporate the parents
/// of an AST element), but only the fragments concerning the AST element
/// itself.
/// Generate a USR for an Objective-C class.
void GenObjCClass(StringRef cls) {
generateUSRForObjCClass(cls, Out);
}
/// Generate a USR for an Objective-C class category.
void GenObjCCategory(StringRef cls, StringRef cat) {
generateUSRForObjCCategory(cls, cat, Out);
}
/// Generate a USR fragment for an Objective-C property.
void GenObjCProperty(StringRef prop) {
generateUSRForObjCProperty(prop, Out);
}
/// Generate a USR for an Objective-C protocol.
void GenObjCProtocol(StringRef prot) {
generateUSRForObjCProtocol(prot, Out);
}
void VisitType(QualType T);
void VisitTemplateParameterList(const TemplateParameterList *Params);
void VisitTemplateName(TemplateName Name);
void VisitTemplateArgument(const TemplateArgument &Arg);
/// Emit a Decl's name using NamedDecl::printName() and return true if
/// the decl had no name.
bool EmitDeclName(const NamedDecl *D);
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Generating USRs from ASTS.
//===----------------------------------------------------------------------===//
bool USRGenerator::EmitDeclName(const NamedDecl *D) {
Out.flush();
const unsigned startSize = Buf.size();
D->printName(Out);
Out.flush();
const unsigned endSize = Buf.size();
return startSize == endSize;
}
bool USRGenerator::ShouldGenerateLocation(const NamedDecl *D) {
if (D->isExternallyVisible())
return false;
if (D->getParentFunctionOrMethod())
return true;
const SourceManager &SM = Context->getSourceManager();
return !SM.isInSystemHeader(D->getLocation());
}
void USRGenerator::VisitDeclContext(const DeclContext *DC) {
if (const NamedDecl *D = dyn_cast<NamedDecl>(DC))
Visit(D);
}
void USRGenerator::VisitFieldDecl(const FieldDecl *D) {
// The USR for an ivar declared in a class extension is based on the
// ObjCInterfaceDecl, not the ObjCCategoryDecl.
if (const ObjCInterfaceDecl *ID = Context->getObjContainingInterface(D))
Visit(ID);
else
VisitDeclContext(D->getDeclContext());
Out << (isa<ObjCIvarDecl>(D) ? "@" : "@FI@");
if (EmitDeclName(D)) {
// Bit fields can be anonymous.
IgnoreResults = true;
return;
}
}
void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
VisitDeclContext(D->getDeclContext());
bool IsTemplate = false;
if (FunctionTemplateDecl *FunTmpl = D->getDescribedFunctionTemplate()) {
IsTemplate = true;
Out << "@FT@";
VisitTemplateParameterList(FunTmpl->getTemplateParameters());
} else
Out << "@F@";
D->printName(Out);
ASTContext &Ctx = *Context;
if (!Ctx.getLangOpts().CPlusPlus || D->isExternC())
return;
if (const TemplateArgumentList *
SpecArgs = D->getTemplateSpecializationArgs()) {
Out << '<';
for (unsigned I = 0, N = SpecArgs->size(); I != N; ++I) {
Out << '#';
VisitTemplateArgument(SpecArgs->get(I));
}
Out << '>';
}
// Mangle in type information for the arguments.
for (auto PD : D->params()) {
Out << '#';
VisitType(PD->getType());
}
if (D->isVariadic())
Out << '.';
if (IsTemplate) {
// Function templates can be overloaded by return type, for example:
// \code
// template <class T> typename T::A foo() {}
// template <class T> typename T::B foo() {}
// \endcode
Out << '#';
VisitType(D->getReturnType());
}
Out << '#';
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->isStatic())
Out << 'S';
if (unsigned quals = MD->getTypeQualifiers())
Out << (char)('0' + quals);
switch (MD->getRefQualifier()) {
case RQ_None: break;
case RQ_LValue: Out << '&'; break;
case RQ_RValue: Out << "&&"; break;
}
}
}
void USRGenerator::VisitNamedDecl(const NamedDecl *D) {
VisitDeclContext(D->getDeclContext());
Out << "@";
if (EmitDeclName(D)) {
// The string can be empty if the declaration has no name; e.g., it is
// the ParmDecl with no name for declaration of a function pointer type,
// e.g.: void (*f)(void *);
// In this case, don't generate a USR.
IgnoreResults = true;
}
}
void USRGenerator::VisitVarDecl(const VarDecl *D) {
// VarDecls can be declared 'extern' within a function or method body,
// but their enclosing DeclContext is the function, not the TU. We need
// to check the storage class to correctly generate the USR.
if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
VisitDeclContext(D->getDeclContext());
// Variables always have simple names.
StringRef s = D->getName();
// The string can be empty if the declaration has no name; e.g., it is
// the ParmDecl with no name for declaration of a function pointer type, e.g.:
// void (*f)(void *);
// In this case, don't generate a USR.
if (s.empty())
IgnoreResults = true;
else
Out << '@' << s;
}
void USRGenerator::VisitNonTypeTemplateParmDecl(
const NonTypeTemplateParmDecl *D) {
GenLoc(D, /*IncludeOffset=*/true);
return;
}
void USRGenerator::VisitTemplateTemplateParmDecl(
const TemplateTemplateParmDecl *D) {
GenLoc(D, /*IncludeOffset=*/true);
return;
}
void USRGenerator::VisitNamespaceDecl(const NamespaceDecl *D) {
if (D->isAnonymousNamespace()) {
Out << "@aN";
return;
}
VisitDeclContext(D->getDeclContext());
if (!IgnoreResults)
Out << "@N@" << D->getName();
}
void USRGenerator::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
VisitFunctionDecl(D->getTemplatedDecl());
}
void USRGenerator::VisitClassTemplateDecl(const ClassTemplateDecl *D) {
VisitTagDecl(D->getTemplatedDecl());
}
void USRGenerator::VisitNamespaceAliasDecl(const NamespaceAliasDecl *D) {
VisitDeclContext(D->getDeclContext());
if (!IgnoreResults)
Out << "@NA@" << D->getName();
}
void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
const DeclContext *container = D->getDeclContext();
if (const ObjCProtocolDecl *pd = dyn_cast<ObjCProtocolDecl>(container)) {
Visit(pd);
}
else {
// The USR for a method declared in a class extension or category is based on
// the ObjCInterfaceDecl, not the ObjCCategoryDecl.
const ObjCInterfaceDecl *ID = D->getClassInterface();
if (!ID) {
IgnoreResults = true;
return;
}
Visit(ID);
}
// Ideally we would use 'GenObjCMethod', but this is such a hot path
// for Objective-C code that we don't want to use
// DeclarationName::getAsString().
Out << (D->isInstanceMethod() ? "(im)" : "(cm)")
<< DeclarationName(D->getSelector());
}
void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) {
switch (D->getKind()) {
default:
llvm_unreachable("Invalid ObjC container.");
case Decl::ObjCInterface:
case Decl::ObjCImplementation:
GenObjCClass(D->getName());
break;
case Decl::ObjCCategory: {
const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
const ObjCInterfaceDecl *ID = CD->getClassInterface();
if (!ID) {
// Handle invalid code where the @interface might not
// have been specified.
// FIXME: We should be able to generate this USR even if the
// @interface isn't available.
IgnoreResults = true;
return;
}
// Specially handle class extensions, which are anonymous categories.
// We want to mangle in the location to uniquely distinguish them.
if (CD->IsClassExtension()) {
Out << "objc(ext)" << ID->getName() << '@';
GenLoc(CD, /*IncludeOffset=*/true);
}
else
GenObjCCategory(ID->getName(), CD->getName());
break;
}
case Decl::ObjCCategoryImpl: {
const ObjCCategoryImplDecl *CD = cast<ObjCCategoryImplDecl>(D);
const ObjCInterfaceDecl *ID = CD->getClassInterface();
if (!ID) {
// Handle invalid code where the @interface might not
// have been specified.
// FIXME: We should be able to generate this USR even if the
// @interface isn't available.
IgnoreResults = true;
return;
}
GenObjCCategory(ID->getName(), CD->getName());
break;
}
case Decl::ObjCProtocol:
GenObjCProtocol(cast<ObjCProtocolDecl>(D)->getName());
break;
}
}
void USRGenerator::VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
// The USR for a property declared in a class extension or category is based
// on the ObjCInterfaceDecl, not the ObjCCategoryDecl.
if (const ObjCInterfaceDecl *ID = Context->getObjContainingInterface(D))
Visit(ID);
else
Visit(cast<Decl>(D->getDeclContext()));
GenObjCProperty(D->getName());
}
void USRGenerator::VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D) {
if (ObjCPropertyDecl *PD = D->getPropertyDecl()) {
VisitObjCPropertyDecl(PD);
return;
}
IgnoreResults = true;
}
void USRGenerator::VisitTagDecl(const TagDecl *D) {
// Add the location of the tag decl to handle resolution across
// translation units.
if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
D = D->getCanonicalDecl();
VisitDeclContext(D->getDeclContext());
bool AlreadyStarted = false;
if (const CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(D)) {
if (ClassTemplateDecl *ClassTmpl = CXXRecord->getDescribedClassTemplate()) {
AlreadyStarted = true;
switch (D->getTagKind()) {
case TTK_Interface:
case TTK_Class:
case TTK_Struct: Out << "@ST"; break;
case TTK_Union: Out << "@UT"; break;
case TTK_Enum: llvm_unreachable("enum template");
}
VisitTemplateParameterList(ClassTmpl->getTemplateParameters());
} else if (const ClassTemplatePartialSpecializationDecl *PartialSpec
= dyn_cast<ClassTemplatePartialSpecializationDecl>(CXXRecord)) {
AlreadyStarted = true;
switch (D->getTagKind()) {
case TTK_Interface:
case TTK_Class:
case TTK_Struct: Out << "@SP"; break;
case TTK_Union: Out << "@UP"; break;
case TTK_Enum: llvm_unreachable("enum partial specialization");
}
VisitTemplateParameterList(PartialSpec->getTemplateParameters());
}
}
if (!AlreadyStarted) {
switch (D->getTagKind()) {
case TTK_Interface:
case TTK_Class:
case TTK_Struct: Out << "@S"; break;
case TTK_Union: Out << "@U"; break;
case TTK_Enum: Out << "@E"; break;
}
}
Out << '@';
Out.flush();
assert(Buf.size() > 0);
const unsigned off = Buf.size() - 1;
if (EmitDeclName(D)) {
if (const TypedefNameDecl *TD = D->getTypedefNameForAnonDecl()) {
Buf[off] = 'A';
Out << '@' << *TD;
}
else {
if (D->isEmbeddedInDeclarator() && !D->isFreeStanding()) {
printLoc(Out, D->getLocation(), Context->getSourceManager(), true);
} else
Buf[off] = 'a';
}
}
// For a class template specialization, mangle the template arguments.
if (const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(D)) {
const TemplateArgumentList &Args = Spec->getTemplateInstantiationArgs();
Out << '>';
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
Out << '#';
VisitTemplateArgument(Args.get(I));
}
}
}
void USRGenerator::VisitTypedefDecl(const TypedefDecl *D) {
if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
const DeclContext *DC = D->getDeclContext();
if (const NamedDecl *DCN = dyn_cast<NamedDecl>(DC))
Visit(DCN);
Out << "@T@";
Out << D->getName();
}
void USRGenerator::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
GenLoc(D, /*IncludeOffset=*/true);
return;
}
bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) {
if (generatedLoc)
return IgnoreResults;
generatedLoc = true;
// Guard against null declarations in invalid code.
if (!D) {
IgnoreResults = true;
return true;
}
// Use the location of canonical decl.
D = D->getCanonicalDecl();
IgnoreResults =
IgnoreResults || printLoc(Out, D->getLocStart(),
Context->getSourceManager(), IncludeOffset);
return IgnoreResults;
}
void USRGenerator::VisitType(QualType T) {
// This method mangles in USR information for types. It can possibly
// just reuse the naming-mangling logic used by codegen, although the
// requirements for USRs might not be the same.
ASTContext &Ctx = *Context;
do {
T = Ctx.getCanonicalType(T);
Qualifiers Q = T.getQualifiers();
unsigned qVal = 0;
if (Q.hasConst())
qVal |= 0x1;
if (Q.hasVolatile())
qVal |= 0x2;
if (Q.hasRestrict())
qVal |= 0x4;
if(qVal)
Out << ((char) ('0' + qVal));
// Mangle in ObjC GC qualifiers?
if (const PackExpansionType *Expansion = T->getAs<PackExpansionType>()) {
Out << 'P';
T = Expansion->getPattern();
}
if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
unsigned char c = '\0';
switch (BT->getKind()) {
case BuiltinType::Void:
c = 'v'; break;
case BuiltinType::Bool:
c = 'b'; break;
case BuiltinType::UChar:
c = 'c'; break;
case BuiltinType::Char16:
c = 'q'; break;
case BuiltinType::Char32:
c = 'w'; break;
case BuiltinType::Min16UInt: // HLSL Change
case BuiltinType::UShort:
c = 's'; break;
case BuiltinType::UInt:
c = 'i'; break;
case BuiltinType::ULong:
c = 'l'; break;
case BuiltinType::ULongLong:
c = 'k'; break;
case BuiltinType::UInt128:
c = 'j'; break;
case BuiltinType::Char_U:
case BuiltinType::Char_S:
c = 'C'; break;
case BuiltinType::SChar:
c = 'r'; break;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
c = 'W'; break;
case BuiltinType::Min16Int: // HLSL Change
case BuiltinType::Short:
c = 'S'; break;
case BuiltinType::Int:
c = 'I'; break;
case BuiltinType::Long:
c = 'L'; break;
case BuiltinType::LongLong:
c = 'K'; break;
case BuiltinType::Int128:
c = 'J'; break;
case BuiltinType::HalfFloat: // HLSL Change
case BuiltinType::Half:
c = 'h'; break;
case BuiltinType::Float:
c = 'f'; break;
case BuiltinType::Double:
c = 'd'; break;
case BuiltinType::LongDouble:
c = 'D'; break;
// HLSL Change Starts
case BuiltinType::Min10Float:
case BuiltinType::Min16Float:
c = 'r'; break;
case BuiltinType::Min12Int:
c = 'R'; break;
case BuiltinType::LitFloat:
c = '?'; break;
case BuiltinType::LitInt:
c = '?'; break;
case BuiltinType::Int8_4Packed:
c = '?'; break;
case BuiltinType::UInt8_4Packed:
c = '?'; break;
// HLSL Change Ends
case BuiltinType::NullPtr:
c = 'n'; break;
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
case BuiltinType::OCLImage1d:
case BuiltinType::OCLImage1dArray:
case BuiltinType::OCLImage1dBuffer:
case BuiltinType::OCLImage2d:
case BuiltinType::OCLImage2dArray:
case BuiltinType::OCLImage3d:
case BuiltinType::OCLEvent:
case BuiltinType::OCLSampler:
IgnoreResults = true;
return;
case BuiltinType::ObjCId:
c = 'o'; break;
case BuiltinType::ObjCClass:
c = 'O'; break;
case BuiltinType::ObjCSel:
c = 'e'; break;
}
Out << c;
return;
}
// If we have already seen this (non-built-in) type, use a substitution
// encoding.
llvm::DenseMap<const Type *, unsigned>::iterator Substitution
= TypeSubstitutions.find(T.getTypePtr());
if (Substitution != TypeSubstitutions.end()) {
Out << 'S' << Substitution->second << '_';
return;
} else {
// Record this as a substitution.
unsigned Number = TypeSubstitutions.size();
TypeSubstitutions[T.getTypePtr()] = Number;
}
if (const PointerType *PT = T->getAs<PointerType>()) {
Out << '*';
T = PT->getPointeeType();
continue;
}
if (const RValueReferenceType *RT = T->getAs<RValueReferenceType>()) {
Out << "&&";
T = RT->getPointeeType();
continue;
}
if (const ReferenceType *RT = T->getAs<ReferenceType>()) {
Out << '&';
T = RT->getPointeeType();
continue;
}
if (const FunctionProtoType *FT = T->getAs<FunctionProtoType>()) {
Out << 'F';
VisitType(FT->getReturnType());
for (const auto &I : FT->param_types())
VisitType(I);
if (FT->isVariadic())
Out << '.';
return;
}
if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) {
Out << 'B';
T = BT->getPointeeType();
continue;
}
if (const ComplexType *CT = T->getAs<ComplexType>()) {
Out << '<';
T = CT->getElementType();
continue;
}
if (const TagType *TT = T->getAs<TagType>()) {
Out << '$';
VisitTagDecl(TT->getDecl());
return;
}
if (const TemplateTypeParmType *TTP = T->getAs<TemplateTypeParmType>()) {
Out << 't' << TTP->getDepth() << '.' << TTP->getIndex();
return;
}
if (const TemplateSpecializationType *Spec
= T->getAs<TemplateSpecializationType>()) {
Out << '>';
VisitTemplateName(Spec->getTemplateName());
Out << Spec->getNumArgs();
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
VisitTemplateArgument(Spec->getArg(I));
return;
}
if (const DependentNameType *DNT = T->getAs<DependentNameType>()) {
Out << '^';
// FIXME: Encode the qualifier, don't just print it.
PrintingPolicy PO(Ctx.getLangOpts());
PO.SuppressTagKeyword = true;
PO.SuppressUnwrittenScope = true;
PO.ConstantArraySizeAsWritten = false;
PO.AnonymousTagLocations = false;
DNT->getQualifier()->print(Out, PO);
Out << ':' << DNT->getIdentifier()->getName();
return;
}
if (const InjectedClassNameType *InjT = T->getAs<InjectedClassNameType>()) {
T = InjT->getInjectedSpecializationType();
continue;
}
// Unhandled type.
Out << ' ';
break;
} while (true);
}
void USRGenerator::VisitTemplateParameterList(
const TemplateParameterList *Params) {
if (!Params)
return;
Out << '>' << Params->size();
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
Out << '#';
if (isa<TemplateTypeParmDecl>(*P)) {
if (cast<TemplateTypeParmDecl>(*P)->isParameterPack())
Out<< 'p';
Out << 'T';
continue;
}
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->isParameterPack())
Out << 'p';
Out << 'N';
VisitType(NTTP->getType());
continue;
}
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
if (TTP->isParameterPack())
Out << 'p';
Out << 't';
VisitTemplateParameterList(TTP->getTemplateParameters());
}
}
void USRGenerator::VisitTemplateName(TemplateName Name) {
if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Template)) {
Out << 't' << TTP->getDepth() << '.' << TTP->getIndex();
return;
}
Visit(Template);
return;
}
// FIXME: Visit dependent template names.
}
void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
break;
case TemplateArgument::Declaration:
Visit(Arg.getAsDecl());
break;
case TemplateArgument::NullPtr:
break;
case TemplateArgument::TemplateExpansion:
Out << 'P'; // pack expansion of...
LLVM_FALLTHROUGH; // HLSL Change
case TemplateArgument::Template:
VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
break;
case TemplateArgument::Expression:
// FIXME: Visit expressions.
break;
case TemplateArgument::Pack:
Out << 'p' << Arg.pack_size();
for (const auto &P : Arg.pack_elements())
VisitTemplateArgument(P);
break;
case TemplateArgument::Type:
VisitType(Arg.getAsType());
break;
case TemplateArgument::Integral:
Out << 'V';
VisitType(Arg.getIntegralType());
Out << Arg.getAsIntegral();
break;
}
}
//===----------------------------------------------------------------------===//
// USR generation functions.
//===----------------------------------------------------------------------===//
void clang::index::generateUSRForObjCClass(StringRef Cls, raw_ostream &OS) {
OS << "objc(cs)" << Cls;
}
void clang::index::generateUSRForObjCCategory(StringRef Cls, StringRef Cat,
raw_ostream &OS) {
OS << "objc(cy)" << Cls << '@' << Cat;
}
void clang::index::generateUSRForObjCIvar(StringRef Ivar, raw_ostream &OS) {
OS << '@' << Ivar;
}
void clang::index::generateUSRForObjCMethod(StringRef Sel,
bool IsInstanceMethod,
raw_ostream &OS) {
OS << (IsInstanceMethod ? "(im)" : "(cm)") << Sel;
}
void clang::index::generateUSRForObjCProperty(StringRef Prop, raw_ostream &OS) {
OS << "(py)" << Prop;
}
void clang::index::generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS) {
OS << "objc(pl)" << Prot;
}
bool clang::index::generateUSRForDecl(const Decl *D,
SmallVectorImpl<char> &Buf) {
// Don't generate USRs for things with invalid locations.
if (!D || D->getLocStart().isInvalid())
return true;
USRGenerator UG(&D->getASTContext(), Buf);
UG.Visit(D);
return UG.ignoreResults();
}
bool clang::index::generateUSRForMacro(const MacroDefinitionRecord *MD,
const SourceManager &SM,
SmallVectorImpl<char> &Buf) {
// Don't generate USRs for things with invalid locations.
if (!MD || MD->getLocation().isInvalid())
return true;
llvm::raw_svector_ostream Out(Buf);
// Assume that system headers are sane. Don't put source location
// information into the USR if the macro comes from a system header.
SourceLocation Loc = MD->getLocation();
bool ShouldGenerateLocation = !SM.isInSystemHeader(Loc);
Out << getUSRSpacePrefix();
if (ShouldGenerateLocation)
printLoc(Out, Loc, SM, /*IncludeOffset=*/true);
Out << "@macro@";
Out << MD->getName()->getName();
return false;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/CFGStmtMap.cpp | //===--- CFGStmtMap.h - Map from Stmt* to CFGBlock* -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the CFGStmtMap class, which defines a mapping from
// Stmt* to CFGBlock*
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DenseMap.h"
#include "clang/AST/ParentMap.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
using namespace clang;
typedef llvm::DenseMap<const Stmt*, CFGBlock*> SMap;
static SMap *AsMap(void *m) { return (SMap*) m; }
CFGStmtMap::~CFGStmtMap() { delete AsMap(M); }
CFGBlock *CFGStmtMap::getBlock(Stmt *S) {
SMap *SM = AsMap(M);
Stmt *X = S;
// If 'S' isn't in the map, walk the ParentMap to see if one of its ancestors
// is in the map.
while (X) {
SMap::iterator I = SM->find(X);
if (I != SM->end()) {
CFGBlock *B = I->second;
// Memoize this lookup.
if (X != S)
(*SM)[X] = B;
return B;
}
X = PM->getParentIgnoreParens(X);
}
return nullptr;
}
static void Accumulate(SMap &SM, CFGBlock *B) {
// First walk the block-level expressions.
for (CFGBlock::iterator I = B->begin(), E = B->end(); I != E; ++I) {
const CFGElement &CE = *I;
Optional<CFGStmt> CS = CE.getAs<CFGStmt>();
if (!CS)
continue;
CFGBlock *&Entry = SM[CS->getStmt()];
// If 'Entry' is already initialized (e.g., a terminator was already),
// skip.
if (Entry)
continue;
Entry = B;
}
// Look at the label of the block.
if (Stmt *Label = B->getLabel())
SM[Label] = B;
// Finally, look at the terminator. If the terminator was already added
// because it is a block-level expression in another block, overwrite
// that mapping.
if (Stmt *Term = B->getTerminator())
SM[Term] = B;
}
CFGStmtMap *CFGStmtMap::Build(CFG *C, ParentMap *PM) {
if (!C || !PM)
return nullptr;
SMap *SM = new SMap();
// Walk all blocks, accumulating the block-level expressions, labels,
// and terminators.
for (CFG::iterator I = C->begin(), E = C->end(); I != E; ++I)
Accumulate(*SM, *I);
return new CFGStmtMap(PM, SM);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/UninitializedValues.cpp | //==- UninitializedValues.cpp - Find Uninitialized Values -------*- C++ --*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements uninitialized values analysis for source-level CFGs.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/UninitializedValues.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PackedVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/SaveAndRestore.h"
#include <utility>
using namespace clang;
#define DEBUG_LOGGING 0
static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
if (vd->hasAttr<HLSLOutAttr>() && !vd->hasAttr<HLSLInAttr>()) {
QualType ty = vd->getType().getNonReferenceType();
// FIXME: HLSL doesn't model parameter passing of structs correctly in the
// AST. Struct types are passed by value in the AST regardless of whether
// they are out or inout, which results in LValueToRValue casts in the AST
// which look like uses (because they are). For now this analysis can
// generate too many false positives for structs.
return ty->isScalarType();
}
// HLSL Change End - Treat `out` parameters as uninitialized values.
if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
!vd->isExceptionVariable() && !vd->isInitCapture() &&
!vd->isImplicit() && vd->getDeclContext() == dc) {
QualType ty = vd->getType();
return ty->isScalarType() || ty->isVectorType() || ty->isRecordType();
}
return false;
}
//------------------------------------------------------------------------====//
// DeclToIndex: a mapping from Decls we track to value indices.
//====------------------------------------------------------------------------//
namespace {
class DeclToIndex {
llvm::DenseMap<const VarDecl *, unsigned> map;
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
llvm::SmallVector<const VarDecl *, 4> hlslOutParams;
// HLSL Change End - Treat `out` parameters as uninitialized values.
public:
DeclToIndex() {}
/// Compute the actual mapping from declarations to bits.
void computeMap(const DeclContext &dc);
/// Return the number of declarations in the map.
unsigned size() const { return map.size(); }
/// Returns the bit vector index for a given declaration.
Optional<unsigned> getValueIndex(const VarDecl *d) const;
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
const llvm::SmallVector<const VarDecl *, 4> &getHLSLOutParams() {
return hlslOutParams;
}
// HLSL Change End - Treat `out` parameters as uninitialized values.
};
}
void DeclToIndex::computeMap(const DeclContext &dc) {
unsigned count = 0;
DeclContext::specific_decl_iterator<VarDecl> I(dc.decls_begin()),
E(dc.decls_end());
for ( ; I != E; ++I) {
const VarDecl *vd = *I;
if (isTrackedVar(vd, &dc))
map[vd] = count++;
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
else
continue;
// Keep HLSL parameters in a separate index.
if (vd->hasAttr<HLSLOutAttr>() && !vd->hasAttr<HLSLInAttr>())
hlslOutParams.push_back(vd);
// HLSL Change End - Treat `out` parameters as uninitialized values.
}
}
Optional<unsigned> DeclToIndex::getValueIndex(const VarDecl *d) const {
llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I = map.find(d);
if (I == map.end())
return None;
return I->second;
}
//------------------------------------------------------------------------====//
// CFGBlockValues: dataflow values for CFG blocks.
//====------------------------------------------------------------------------//
// These values are defined in such a way that a merge can be done using
// a bitwise OR.
enum Value { Unknown = 0x0, /* 00 */
Initialized = 0x1, /* 01 */
Uninitialized = 0x2, /* 10 */
MayUninitialized = 0x3 /* 11 */ };
static bool isUninitialized(const Value v) {
return v >= Uninitialized;
}
static bool isAlwaysUninit(const Value v) {
return v == Uninitialized;
}
namespace {
typedef llvm::PackedVector<Value, 2, llvm::SmallBitVector> ValueVector;
class CFGBlockValues {
const CFG &cfg;
SmallVector<ValueVector, 8> vals;
ValueVector scratch;
DeclToIndex declToIndex;
public:
CFGBlockValues(const CFG &cfg);
unsigned getNumEntries() const { return declToIndex.size(); }
void computeSetOfDeclarations(const DeclContext &dc);
ValueVector &getValueVector(const CFGBlock *block) {
return vals[block->getBlockID()];
}
void setAllScratchValues(Value V);
void mergeIntoScratch(ValueVector const &source, bool isFirst);
bool updateValueVectorWithScratch(const CFGBlock *block);
bool hasNoDeclarations() const {
return declToIndex.size() == 0;
}
void resetScratch();
ValueVector::reference operator[](const VarDecl *vd);
Value getValue(const CFGBlock *block, const CFGBlock *dstBlock,
const VarDecl *vd) {
const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
assert(idx.hasValue());
return getValueVector(block)[idx.getValue()];
}
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
const llvm::SmallVector<const VarDecl *, 4> &getHLSLOutParams() {
return declToIndex.getHLSLOutParams();
}
// HLSL Change End - Treat `out` parameters as uninitialized values.
};
} // end anonymous namespace
CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {}
void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
declToIndex.computeMap(dc);
unsigned decls = declToIndex.size();
scratch.resize(decls);
unsigned n = cfg.getNumBlockIDs();
if (!n)
return;
vals.resize(n);
for (unsigned i = 0; i < n; ++i)
vals[i].resize(decls);
}
#if DEBUG_LOGGING
static void printVector(const CFGBlock *block, ValueVector &bv,
unsigned num) {
llvm::errs() << block->getBlockID() << " :";
for (unsigned i = 0; i < bv.size(); ++i) {
llvm::errs() << ' ' << bv[i];
}
llvm::errs() << " : " << num << '\n';
}
#endif
void CFGBlockValues::setAllScratchValues(Value V) {
for (unsigned I = 0, E = scratch.size(); I != E; ++I)
scratch[I] = V;
}
void CFGBlockValues::mergeIntoScratch(ValueVector const &source,
bool isFirst) {
if (isFirst)
scratch = source;
else
scratch |= source;
}
bool CFGBlockValues::updateValueVectorWithScratch(const CFGBlock *block) {
ValueVector &dst = getValueVector(block);
bool changed = (dst != scratch);
if (changed)
dst = scratch;
#if DEBUG_LOGGING
printVector(block, scratch, 0);
#endif
return changed;
}
void CFGBlockValues::resetScratch() {
scratch.reset();
}
ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
assert(idx.hasValue());
return scratch[idx.getValue()];
}
//------------------------------------------------------------------------====//
// Worklist: worklist for dataflow analysis.
//====------------------------------------------------------------------------//
namespace {
class DataflowWorklist {
PostOrderCFGView::iterator PO_I, PO_E;
SmallVector<const CFGBlock *, 20> worklist;
llvm::BitVector enqueuedBlocks;
public:
DataflowWorklist(const CFG &cfg, PostOrderCFGView &view)
: PO_I(view.begin()), PO_E(view.end()),
enqueuedBlocks(cfg.getNumBlockIDs(), true) {
// Treat the first block as already analyzed.
if (PO_I != PO_E) {
assert(*PO_I == &cfg.getEntry());
enqueuedBlocks[(*PO_I)->getBlockID()] = false;
++PO_I;
}
}
void enqueueSuccessors(const CFGBlock *block);
const CFGBlock *dequeue();
};
}
void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
for (CFGBlock::const_succ_iterator I = block->succ_begin(),
E = block->succ_end(); I != E; ++I) {
const CFGBlock *Successor = *I;
if (!Successor || enqueuedBlocks[Successor->getBlockID()])
continue;
worklist.push_back(Successor);
enqueuedBlocks[Successor->getBlockID()] = true;
}
}
const CFGBlock *DataflowWorklist::dequeue() {
const CFGBlock *B = nullptr;
// First dequeue from the worklist. This can represent
// updates along backedges that we want propagated as quickly as possible.
if (!worklist.empty())
B = worklist.pop_back_val();
// Next dequeue from the initial reverse post order. This is the
// theoretical ideal in the presence of no back edges.
else if (PO_I != PO_E) {
B = *PO_I;
++PO_I;
}
else {
return nullptr;
}
assert(enqueuedBlocks[B->getBlockID()] == true);
enqueuedBlocks[B->getBlockID()] = false;
return B;
}
//------------------------------------------------------------------------====//
// Classification of DeclRefExprs as use or initialization.
//====------------------------------------------------------------------------//
namespace {
class FindVarResult {
const VarDecl *vd;
const DeclRefExpr *dr;
public:
FindVarResult(const VarDecl *vd, const DeclRefExpr *dr) : vd(vd), dr(dr) {}
const DeclRefExpr *getDeclRefExpr() const { return dr; }
const VarDecl *getDecl() const { return vd; }
};
static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
while (Ex) {
Ex = Ex->IgnoreParenNoopCasts(C);
if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
if (CE->getCastKind() == CK_LValueBitCast) {
Ex = CE->getSubExpr();
continue;
}
}
break;
}
return Ex;
}
/// If E is an expression comprising a reference to a single variable, find that
/// variable.
static FindVarResult findVar(const Expr *E, const DeclContext *DC) {
if (const DeclRefExpr *DRE =
dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
if (isTrackedVar(VD, DC))
return FindVarResult(VD, DRE);
return FindVarResult(nullptr, nullptr);
}
/// \brief Classify each DeclRefExpr as an initialization or a use. Any
/// DeclRefExpr which isn't explicitly classified will be assumed to have
/// escaped the analysis and will be treated as an initialization.
class ClassifyRefs : public StmtVisitor<ClassifyRefs> {
public:
enum Class {
Init,
Use,
SelfInit,
Ignore
};
private:
const DeclContext *DC;
llvm::DenseMap<const DeclRefExpr*, Class> Classification;
bool isTrackedVar(const VarDecl *VD) const {
return ::isTrackedVar(VD, DC);
}
void classify(const Expr *E, Class C);
public:
ClassifyRefs(AnalysisDeclContext &AC) : DC(cast<DeclContext>(AC.getDecl())) {}
void VisitDeclStmt(DeclStmt *DS);
void VisitUnaryOperator(UnaryOperator *UO);
void VisitBinaryOperator(BinaryOperator *BO);
void VisitCallExpr(CallExpr *CE);
void VisitCastExpr(CastExpr *CE);
void operator()(Stmt *S) { Visit(S); }
Class get(const DeclRefExpr *DRE) const {
llvm::DenseMap<const DeclRefExpr*, Class>::const_iterator I
= Classification.find(DRE);
if (I != Classification.end())
return I->second;
const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
if (!VD || !isTrackedVar(VD))
return Ignore;
return Init;
}
};
}
static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
if (VD->getType()->isRecordType()) return nullptr;
if (Expr *Init = VD->getInit()) {
const DeclRefExpr *DRE
= dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
if (DRE && DRE->getDecl() == VD)
return DRE;
}
return nullptr;
}
void ClassifyRefs::classify(const Expr *E, Class C) {
// The result of a ?: could also be an lvalue.
E = E->IgnoreParens();
if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
classify(CO->getTrueExpr(), C);
classify(CO->getFalseExpr(), C);
return;
}
if (const BinaryConditionalOperator *BCO =
dyn_cast<BinaryConditionalOperator>(E)) {
classify(BCO->getFalseExpr(), C);
return;
}
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
classify(OVE->getSourceExpr(), C);
return;
}
if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
if (VarDecl *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
if (!VD->isStaticDataMember())
classify(ME->getBase(), C);
}
return;
}
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
switch (BO->getOpcode()) {
case BO_PtrMemD:
case BO_PtrMemI:
classify(BO->getLHS(), C);
return;
case BO_Comma:
classify(BO->getRHS(), C);
return;
default:
return;
}
}
FindVarResult Var = findVar(E, DC);
if (const DeclRefExpr *DRE = Var.getDeclRefExpr())
Classification[DRE] = std::max(Classification[DRE], C);
}
void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) {
for (auto *DI : DS->decls()) {
VarDecl *VD = dyn_cast<VarDecl>(DI);
if (VD && isTrackedVar(VD))
if (const DeclRefExpr *DRE = getSelfInitExpr(VD))
Classification[DRE] = SelfInit;
}
}
void ClassifyRefs::VisitBinaryOperator(BinaryOperator *BO) {
// Ignore the evaluation of a DeclRefExpr on the LHS of an assignment. If this
// is not a compound-assignment, we will treat it as initializing the variable
// when TransferFunctions visits it. A compound-assignment does not affect
// whether a variable is uninitialized, and there's no point counting it as a
// use.
if (BO->isCompoundAssignmentOp())
classify(BO->getLHS(), Use);
else if (BO->getOpcode() == BO_Assign || BO->getOpcode() == BO_Comma)
classify(BO->getLHS(), Ignore);
}
void ClassifyRefs::VisitUnaryOperator(UnaryOperator *UO) {
// Increment and decrement are uses despite there being no lvalue-to-rvalue
// conversion.
if (UO->isIncrementDecrementOp())
classify(UO->getSubExpr(), Use);
}
static bool isPointerToConst(const QualType &QT) {
return QT->isAnyPointerType() && QT->getPointeeType().isConstQualified();
}
void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
// Classify arguments to std::move as used.
if (CE->getNumArgs() == 1) {
if (FunctionDecl *FD = CE->getDirectCallee()) {
if (FD->isInStdNamespace() && FD->getIdentifier() &&
FD->getIdentifier()->isStr("move")) {
// RecordTypes are handled in SemaDeclCXX.cpp.
if (!CE->getArg(0)->getType()->isRecordType())
classify(CE->getArg(0), Use);
return;
}
}
}
// If a value is passed by const pointer or by const reference to a function,
// we should not assume that it is initialized by the call, and we
// conservatively do not assume that it is used.
unsigned ParamIdx = 0; // HLSL Change
for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
I != E; ++I, ++ParamIdx) { // HLSL Change - add ParamIdx
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
if (auto FD = CE->getDirectCallee()) {
if (FD->getNumParams() > ParamIdx) {
ParmVarDecl *PD = FD->getParamDecl(ParamIdx);
bool HasIn = PD->hasAttr<HLSLInAttr>();
bool HasInOut = PD->hasAttr<HLSLInOutAttr>();
// If we have an explicit `in` or `inout` annotation we should treat
// this as a use, otherwise leave it up to C/C++ rules.
if(HasIn || HasInOut)
classify(*I, Use);
}
}
// HLSL Change End - Treat `out` parameters as uninitialized values.
if ((*I)->isGLValue()) {
if ((*I)->getType().isConstQualified())
classify((*I), Ignore);
} else if (isPointerToConst((*I)->getType())) {
const Expr *Ex = stripCasts(DC->getParentASTContext(), *I);
const UnaryOperator *UO = dyn_cast<UnaryOperator>(Ex);
if (UO && UO->getOpcode() == UO_AddrOf)
Ex = UO->getSubExpr();
classify(Ex, Ignore);
}
}
}
void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
if (CE->getCastKind() == CK_LValueToRValue)
classify(CE->getSubExpr(), Use);
else if (CStyleCastExpr *CSE = dyn_cast<CStyleCastExpr>(CE)) {
if (CSE->getType()->isVoidType()) {
// Squelch any detected load of an uninitialized value if
// we cast it to void.
// e.g. (void) x;
classify(CSE->getSubExpr(), Ignore);
}
}
}
//------------------------------------------------------------------------====//
// Transfer function for uninitialized values analysis.
//====------------------------------------------------------------------------//
namespace {
class TransferFunctions : public StmtVisitor<TransferFunctions> {
CFGBlockValues &vals;
const CFG &cfg;
const CFGBlock *block;
AnalysisDeclContext ∾
const ClassifyRefs &classification;
ObjCNoReturn objCNoRet;
UninitVariablesHandler &handler;
public:
TransferFunctions(CFGBlockValues &vals, const CFG &cfg,
const CFGBlock *block, AnalysisDeclContext &ac,
const ClassifyRefs &classification,
UninitVariablesHandler &handler)
: vals(vals), cfg(cfg), block(block), ac(ac),
classification(classification), objCNoRet(ac.getASTContext()),
handler(handler) {}
void reportUse(const Expr *ex, const VarDecl *vd);
void VisitBinaryOperator(BinaryOperator *bo);
void VisitBlockExpr(BlockExpr *be);
void VisitCallExpr(CallExpr *ce);
void VisitDeclRefExpr(DeclRefExpr *dr);
void VisitDeclStmt(DeclStmt *ds);
void VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS);
void VisitObjCMessageExpr(ObjCMessageExpr *ME);
void VisitReturnStmt(ReturnStmt *RS); // HLSL Change
void HandleHLSLImplicitUse(SourceLocation Loc); // HLSL Change
bool isTrackedVar(const VarDecl *vd) {
return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
}
FindVarResult findVar(const Expr *ex) {
return ::findVar(ex, cast<DeclContext>(ac.getDecl()));
}
UninitUse getUninitUse(const Expr *ex, const VarDecl *vd, Value v) {
UninitUse Use(ex, isAlwaysUninit(v));
assert(isUninitialized(v));
if (Use.getKind() == UninitUse::Always)
return Use;
// If an edge which leads unconditionally to this use did not initialize
// the variable, we can say something stronger than 'may be uninitialized':
// we can say 'either it's used uninitialized or you have dead code'.
//
// We track the number of successors of a node which have been visited, and
// visit a node once we have visited all of its successors. Only edges where
// the variable might still be uninitialized are followed. Since a variable
// can't transfer from being initialized to being uninitialized, this will
// trace out the subgraph which inevitably leads to the use and does not
// initialize the variable. We do not want to skip past loops, since their
// non-termination might be correlated with the initialization condition.
//
// For example:
//
// void f(bool a, bool b) {
// block1: int n;
// if (a) {
// block2: if (b)
// block3: n = 1;
// block4: } else if (b) {
// block5: while (!a) {
// block6: do_work(&a);
// n = 2;
// }
// }
// block7: if (a)
// block8: g();
// block9: return n;
// }
//
// Starting from the maybe-uninitialized use in block 9:
// * Block 7 is not visited because we have only visited one of its two
// successors.
// * Block 8 is visited because we've visited its only successor.
// From block 8:
// * Block 7 is visited because we've now visited both of its successors.
// From block 7:
// * Blocks 1, 2, 4, 5, and 6 are not visited because we didn't visit all
// of their successors (we didn't visit 4, 3, 5, 6, and 5, respectively).
// * Block 3 is not visited because it initializes 'n'.
// Now the algorithm terminates, having visited blocks 7 and 8, and having
// found the frontier is blocks 2, 4, and 5.
//
// 'n' is definitely uninitialized for two edges into block 7 (from blocks 2
// and 4), so we report that any time either of those edges is taken (in
// each case when 'b == false'), 'n' is used uninitialized.
SmallVector<const CFGBlock*, 32> Queue;
SmallVector<unsigned, 32> SuccsVisited(cfg.getNumBlockIDs(), 0);
Queue.push_back(block);
// Specify that we've already visited all successors of the starting block.
// This has the dual purpose of ensuring we never add it to the queue, and
// of marking it as not being a candidate element of the frontier.
SuccsVisited[block->getBlockID()] = block->succ_size();
while (!Queue.empty()) {
const CFGBlock *B = Queue.pop_back_val();
// If the use is always reached from the entry block, make a note of that.
if (B == &cfg.getEntry())
Use.setUninitAfterCall();
for (CFGBlock::const_pred_iterator I = B->pred_begin(), E = B->pred_end();
I != E; ++I) {
const CFGBlock *Pred = *I;
if (!Pred)
continue;
Value AtPredExit = vals.getValue(Pred, B, vd);
if (AtPredExit == Initialized)
// This block initializes the variable.
continue;
if (AtPredExit == MayUninitialized &&
vals.getValue(B, nullptr, vd) == Uninitialized) {
// This block declares the variable (uninitialized), and is reachable
// from a block that initializes the variable. We can't guarantee to
// give an earlier location for the diagnostic (and it appears that
// this code is intended to be reachable) so give a diagnostic here
// and go no further down this path.
Use.setUninitAfterDecl();
continue;
}
unsigned &SV = SuccsVisited[Pred->getBlockID()];
if (!SV) {
// When visiting the first successor of a block, mark all NULL
// successors as having been visited.
for (CFGBlock::const_succ_iterator SI = Pred->succ_begin(),
SE = Pred->succ_end();
SI != SE; ++SI)
if (!*SI)
++SV;
}
if (++SV == Pred->succ_size())
// All paths from this block lead to the use and don't initialize the
// variable.
Queue.push_back(Pred);
}
}
// Scan the frontier, looking for blocks where the variable was
// uninitialized.
for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
const CFGBlock *Block = *BI;
unsigned BlockID = Block->getBlockID();
const Stmt *Term = Block->getTerminator();
if (SuccsVisited[BlockID] && SuccsVisited[BlockID] < Block->succ_size() &&
Term) {
// This block inevitably leads to the use. If we have an edge from here
// to a post-dominator block, and the variable is uninitialized on that
// edge, we have found a bug.
for (CFGBlock::const_succ_iterator I = Block->succ_begin(),
E = Block->succ_end(); I != E; ++I) {
const CFGBlock *Succ = *I;
if (Succ && SuccsVisited[Succ->getBlockID()] >= Succ->succ_size() &&
vals.getValue(Block, Succ, vd) == Uninitialized) {
// Switch cases are a special case: report the label to the caller
// as the 'terminator', not the switch statement itself. Suppress
// situations where no label matched: we can't be sure that's
// possible.
if (isa<SwitchStmt>(Term)) {
const Stmt *Label = Succ->getLabel();
if (!Label || !isa<SwitchCase>(Label))
// Might not be possible.
continue;
UninitUse::Branch Branch;
Branch.Terminator = Label;
Branch.Output = 0; // Ignored.
Use.addUninitBranch(Branch);
} else {
UninitUse::Branch Branch;
Branch.Terminator = Term;
Branch.Output = I - Block->succ_begin();
Use.addUninitBranch(Branch);
}
}
}
}
}
return Use;
}
};
}
void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
Value v = vals[vd];
if (isUninitialized(v))
handler.handleUseOfUninitVariable(vd, getUninitUse(ex, vd, v));
}
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
// This represents an initialization of the 'element' value.
if (DeclStmt *DS = dyn_cast<DeclStmt>(FS->getElement())) {
const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
if (isTrackedVar(VD))
vals[VD] = Initialized;
}
}
void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
const BlockDecl *bd = be->getBlockDecl();
for (const auto &I : bd->captures()) {
const VarDecl *vd = I.getVariable();
if (!isTrackedVar(vd))
continue;
if (I.isByRef()) {
vals[vd] = Initialized;
continue;
}
reportUse(be, vd);
}
}
void TransferFunctions::VisitCallExpr(CallExpr *ce) {
if (Decl *Callee = ce->getCalleeDecl()) {
if (Callee->hasAttr<ReturnsTwiceAttr>()) {
// After a call to a function like setjmp or vfork, any variable which is
// initialized anywhere within this function may now be initialized. For
// now, just assume such a call initializes all variables. FIXME: Only
// mark variables as initialized if they have an initializer which is
// reachable from here.
vals.setAllScratchValues(Initialized);
}
else if (Callee->hasAttr<AnalyzerNoReturnAttr>()) {
// Functions labeled like "analyzer_noreturn" are often used to denote
// "panic" functions that in special debug situations can still return,
// but for the most part should not be treated as returning. This is a
// useful annotation borrowed from the static analyzer that is useful for
// suppressing branch-specific false positives when we call one of these
// functions but keep pretending the path continues (when in reality the
// user doesn't care).
vals.setAllScratchValues(Unknown);
}
}
}
void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
switch (classification.get(dr)) {
case ClassifyRefs::Ignore:
break;
case ClassifyRefs::Use:
reportUse(dr, cast<VarDecl>(dr->getDecl()));
break;
case ClassifyRefs::Init:
vals[cast<VarDecl>(dr->getDecl())] = Initialized;
break;
case ClassifyRefs::SelfInit:
handler.handleSelfInit(cast<VarDecl>(dr->getDecl()));
break;
}
}
void TransferFunctions::VisitBinaryOperator(BinaryOperator *BO) {
if (BO->getOpcode() == BO_Assign) {
FindVarResult Var = findVar(BO->getLHS());
if (const VarDecl *VD = Var.getDecl())
vals[VD] = Initialized;
}
}
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
for (auto *DI : DS->decls()) {
VarDecl *VD = dyn_cast<VarDecl>(DI);
if (VD && isTrackedVar(VD)) {
if (getSelfInitExpr(VD)) {
// If the initializer consists solely of a reference to itself, we
// explicitly mark the variable as uninitialized. This allows code
// like the following:
//
// int x = x;
//
// to deliberately leave a variable uninitialized. Different analysis
// clients can detect this pattern and adjust their reporting
// appropriately, but we need to continue to analyze subsequent uses
// of the variable.
vals[VD] = Uninitialized;
} else if (VD->getInit()) {
// Treat the new variable as initialized.
vals[VD] = Initialized;
} else {
// No initializer: the variable is now uninitialized. This matters
// for cases like:
// while (...) {
// int n;
// use(n);
// n = 0;
// }
// FIXME: Mark the variable as uninitialized whenever its scope is
// left, since its scope could be re-entered by a jump over the
// declaration.
vals[VD] = Uninitialized;
}
}
}
}
void TransferFunctions::VisitObjCMessageExpr(ObjCMessageExpr *ME) {
// If the Objective-C message expression is an implicit no-return that
// is not modeled in the CFG, set the tracked dataflow values to Unknown.
if (objCNoRet.isImplicitNoReturn(ME)) {
vals.setAllScratchValues(Unknown);
}
}
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
void TransferFunctions::VisitReturnStmt(ReturnStmt *RS) {
// Visit the statment normally first so that it's expression can be processed.
VisitStmt(RS);
HandleHLSLImplicitUse(RS->getLocStart());
}
void TransferFunctions::HandleHLSLImplicitUse(SourceLocation Loc) {
// Create a dummy use DeclRefExpr for all the `out` params.
for (auto *P : vals.getHLSLOutParams()) {
Value v = vals[P];
if (!isUninitialized(v))
continue;
// Skip diagnostics for always uninitialized values if they are marked maybe
// unused. This allows us to continue emitting other diagnostics for
// sometimes uninitialized values.
if (P->hasAttr<HLSLMaybeUnusedAttr>() && isAlwaysUninit(v))
continue;
auto *DRE = DeclRefExpr::Create(
P->getASTContext(), NestedNameSpecifierLoc(), SourceLocation(),
const_cast<VarDecl *>(P), false,
DeclarationNameInfo(P->getDeclName(), Loc),
P->getASTContext().VoidTy, ExprValueKind::VK_RValue);
reportUse(DRE, P);
}
}
// HLSL Change End - Treat `out` parameters as uninitialized values.
//------------------------------------------------------------------------====//
// High-level "driver" logic for uninitialized values analysis.
//====------------------------------------------------------------------------//
static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
AnalysisDeclContext &ac, CFGBlockValues &vals,
const ClassifyRefs &classification,
llvm::BitVector &wasAnalyzed,
UninitVariablesHandler &handler,
const DeclContext &dc) { // HLSL Change - Add dc
wasAnalyzed[block->getBlockID()] = true;
vals.resetScratch();
// Merge in values of predecessor blocks.
bool isFirst = true;
for (CFGBlock::const_pred_iterator I = block->pred_begin(),
E = block->pred_end(); I != E; ++I) {
const CFGBlock *pred = *I;
if (!pred)
continue;
if (wasAnalyzed[pred->getBlockID()]) {
vals.mergeIntoScratch(vals.getValueVector(pred), isFirst);
isFirst = false;
}
}
// Apply the transfer function.
TransferFunctions tf(vals, cfg, block, ac, classification, handler);
for (CFGBlock::const_iterator I = block->begin(), E = block->end();
I != E; ++I) {
if (Optional<CFGStmt> cs = I->getAs<CFGStmt>())
tf.Visit(const_cast<Stmt*>(cs->getStmt()));
}
// HLSL Change Begin - Treat `out` parameters as uninitialized values.
// If this block has no successors and does not have a terminator stmt which
// we would have handled above.
if (block->succ_size() == 0 && !block->getTerminator())
tf.HandleHLSLImplicitUse(cast<Decl>(&dc)->getBody()->getLocEnd());
// HLSL Change End - Treat `out` parameters as uninitialized values.
return vals.updateValueVectorWithScratch(block);
}
/// PruneBlocksHandler is a special UninitVariablesHandler that is used
/// to detect when a CFGBlock has any *potential* use of an uninitialized
/// variable. It is mainly used to prune out work during the final
/// reporting pass.
namespace {
struct PruneBlocksHandler : public UninitVariablesHandler {
PruneBlocksHandler(unsigned numBlocks)
: hadUse(numBlocks, false), hadAnyUse(false),
currentBlock(0) {}
~PruneBlocksHandler() override {}
/// Records if a CFGBlock had a potential use of an uninitialized variable.
llvm::BitVector hadUse;
/// Records if any CFGBlock had a potential use of an uninitialized variable.
bool hadAnyUse;
/// The current block to scribble use information.
unsigned currentBlock;
void handleUseOfUninitVariable(const VarDecl *vd,
const UninitUse &use) override {
hadUse[currentBlock] = true;
hadAnyUse = true;
}
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
/// are handled by handleUseOfUninitVariable.
void handleSelfInit(const VarDecl *vd) override {
hadUse[currentBlock] = true;
hadAnyUse = true;
}
};
}
void clang::runUninitializedVariablesAnalysis(
const DeclContext &dc,
const CFG &cfg,
AnalysisDeclContext &ac,
UninitVariablesHandler &handler,
UninitVariablesAnalysisStats &stats) {
CFGBlockValues vals(cfg);
vals.computeSetOfDeclarations(dc);
if (vals.hasNoDeclarations())
return;
stats.NumVariablesAnalyzed = vals.getNumEntries();
// Precompute which expressions are uses and which are initializations.
ClassifyRefs classification(ac);
cfg.VisitBlockStmts(classification);
// Mark all variables uninitialized at the entry.
const CFGBlock &entry = cfg.getEntry();
ValueVector &vec = vals.getValueVector(&entry);
const unsigned n = vals.getNumEntries();
for (unsigned j = 0; j < n ; ++j) {
vec[j] = Uninitialized;
}
// Proceed with the workist.
DataflowWorklist worklist(cfg, *ac.getAnalysis<PostOrderCFGView>());
llvm::BitVector previouslyVisited(cfg.getNumBlockIDs());
worklist.enqueueSuccessors(&cfg.getEntry());
llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false);
wasAnalyzed[cfg.getEntry().getBlockID()] = true;
PruneBlocksHandler PBH(cfg.getNumBlockIDs());
while (const CFGBlock *block = worklist.dequeue()) {
PBH.currentBlock = block->getBlockID();
// Did the block change?
// HLSL Change Begin - Add dc
bool changed = runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed,
PBH, dc);
// HLSL Change End - Add dc
++stats.NumBlockVisits;
if (changed || !previouslyVisited[block->getBlockID()])
worklist.enqueueSuccessors(block);
previouslyVisited[block->getBlockID()] = true;
}
if (!PBH.hadAnyUse)
return;
// Run through the blocks one more time, and report uninitialized variables.
for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
const CFGBlock *block = *BI;
if (PBH.hadUse[block->getBlockID()]) {
// HLSL Change Begin - Add dc
runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, handler,
dc);
// HLSL Change End - Add dc
++stats.NumBlockVisits;
}
}
}
UninitVariablesHandler::~UninitVariablesHandler() {}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp | //== PseudoConstantAnalysis.cpp - Find Pseudoconstants in the AST-*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file tracks the usage of variables in a Decl body to see if they are
// never written to, implying that they constant. This is useful in static
// analysis to see if a developer might have intended a variable to be const.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Stmt.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <deque>
using namespace clang;
// The number of ValueDecls we want to keep track of by default (per-function)
#define VARDECL_SET_SIZE 256
typedef llvm::SmallPtrSet<const VarDecl*, VARDECL_SET_SIZE> VarDeclSet;
PseudoConstantAnalysis::PseudoConstantAnalysis(const Stmt *DeclBody) :
DeclBody(DeclBody), Analyzed(false) {
NonConstantsImpl = new VarDeclSet;
UsedVarsImpl = new VarDeclSet;
}
PseudoConstantAnalysis::~PseudoConstantAnalysis() {
delete (VarDeclSet*)NonConstantsImpl;
delete (VarDeclSet*)UsedVarsImpl;
}
// Returns true if the given ValueDecl is never written to in the given DeclBody
bool PseudoConstantAnalysis::isPseudoConstant(const VarDecl *VD) {
// Only local and static variables can be pseudoconstants
if (!VD->hasLocalStorage() && !VD->isStaticLocal())
return false;
if (!Analyzed) {
RunAnalysis();
Analyzed = true;
}
VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
return !NonConstants->count(VD);
}
// Returns true if the variable was used (self assignments don't count)
bool PseudoConstantAnalysis::wasReferenced(const VarDecl *VD) {
if (!Analyzed) {
RunAnalysis();
Analyzed = true;
}
VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
return UsedVars->count(VD);
}
// Returns a Decl from a (Block)DeclRefExpr (if any)
const Decl *PseudoConstantAnalysis::getDecl(const Expr *E) {
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
return DR->getDecl();
else
return nullptr;
}
void PseudoConstantAnalysis::RunAnalysis() {
std::deque<const Stmt *> WorkList;
VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
// Start with the top level statement of the function
WorkList.push_back(DeclBody);
while (!WorkList.empty()) {
const Stmt *Head = WorkList.front();
WorkList.pop_front();
if (const Expr *Ex = dyn_cast<Expr>(Head))
Head = Ex->IgnoreParenCasts();
switch (Head->getStmtClass()) {
// Case 1: Assignment operators modifying VarDecls
case Stmt::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(Head);
// Look for a Decl on the LHS
const Decl *LHSDecl = getDecl(BO->getLHS()->IgnoreParenCasts());
if (!LHSDecl)
break;
// We found a binary operator with a DeclRefExpr on the LHS. We now check
// for any of the assignment operators, implying that this Decl is being
// written to.
switch (BO->getOpcode()) {
// Self-assignments don't count as use of a variable
case BO_Assign: {
// Look for a DeclRef on the RHS
const Decl *RHSDecl = getDecl(BO->getRHS()->IgnoreParenCasts());
// If the Decls match, we have self-assignment
if (LHSDecl == RHSDecl)
// Do not visit the children
continue;
LLVM_FALLTHROUGH; // HLSL Change
}
case BO_AddAssign:
case BO_SubAssign:
case BO_MulAssign:
case BO_DivAssign:
case BO_AndAssign:
case BO_OrAssign:
case BO_XorAssign:
case BO_ShlAssign:
case BO_ShrAssign: {
const VarDecl *VD = dyn_cast<VarDecl>(LHSDecl);
// The DeclRefExpr is being assigned to - mark it as non-constant
if (VD)
NonConstants->insert(VD);
break;
}
default:
break;
}
break;
}
// Case 2: Pre/post increment/decrement and address of
case Stmt::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(Head);
// Look for a DeclRef in the subexpression
const Decl *D = getDecl(UO->getSubExpr()->IgnoreParenCasts());
if (!D)
break;
// We found a unary operator with a DeclRef as a subexpression. We now
// check for any of the increment/decrement operators, as well as
// addressOf.
switch (UO->getOpcode()) {
case UO_PostDec:
case UO_PostInc:
case UO_PreDec:
case UO_PreInc:
// The DeclRef is being changed - mark it as non-constant
case UO_AddrOf: {
// If we are taking the address of the DeclRefExpr, assume it is
// non-constant.
const VarDecl *VD = dyn_cast<VarDecl>(D);
if (VD)
NonConstants->insert(VD);
break;
}
default:
break;
}
break;
}
// Case 3: Reference Declarations
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(Head);
// Iterate over each decl and see if any of them contain reference decls
for (const auto *I : DS->decls()) {
// We only care about VarDecls
const VarDecl *VD = dyn_cast<VarDecl>(I);
if (!VD)
continue;
// We found a VarDecl; make sure it is a reference type
if (!VD->getType().getTypePtr()->isReferenceType())
continue;
// Try to find a Decl in the initializer
const Decl *D = getDecl(VD->getInit()->IgnoreParenCasts());
if (!D)
break;
// If the reference is to another var, add the var to the non-constant
// list
if (const VarDecl *RefVD = dyn_cast<VarDecl>(D)) {
NonConstants->insert(RefVD);
continue;
}
}
break;
}
// Case 4: Variable references
case Stmt::DeclRefExprClass: {
const DeclRefExpr *DR = cast<DeclRefExpr>(Head);
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Add the Decl to the used list
UsedVars->insert(VD);
continue;
}
break;
}
// Case 5: Block expressions
case Stmt::BlockExprClass: {
const BlockExpr *B = cast<BlockExpr>(Head);
// Add the body of the block to the list
WorkList.push_back(B->getBody());
continue;
}
default:
break;
} // switch (head->getStmtClass())
// Add all substatements to the worklist
for (const Stmt *SubStmt : Head->children())
if (SubStmt)
WorkList.push_back(SubStmt);
} // while (!WorkList.empty())
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/AnalysisDeclContext.cpp | //== AnalysisDeclContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines AnalysisDeclContext, a class that manages the analysis context
// data for path sensitive analysis.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/AnalysisContext.h"
#include "BodyFarm.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/Support/BumpVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d,
const CFG::BuildOptions &buildOptions)
: Manager(Mgr),
D(d),
cfgBuildOptions(buildOptions),
forcedBlkExprs(nullptr),
builtCFG(false),
builtCompleteCFG(false),
ReferencedBlockVars(nullptr),
ManagedAnalyses(nullptr)
{
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d)
: Manager(Mgr),
D(d),
forcedBlkExprs(nullptr),
builtCFG(false),
builtCompleteCFG(false),
ReferencedBlockVars(nullptr),
ManagedAnalyses(nullptr)
{
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
AnalysisDeclContextManager::AnalysisDeclContextManager(bool useUnoptimizedCFG,
bool addImplicitDtors,
bool addInitializers,
bool addTemporaryDtors,
bool synthesizeBodies,
bool addStaticInitBranch,
bool addCXXNewAllocator,
CodeInjector *injector)
: Injector(injector), SynthesizeBodies(synthesizeBodies)
{
cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
cfgBuildOptions.AddImplicitDtors = addImplicitDtors;
cfgBuildOptions.AddInitializers = addInitializers;
cfgBuildOptions.AddTemporaryDtors = addTemporaryDtors;
cfgBuildOptions.AddStaticInitBranches = addStaticInitBranch;
cfgBuildOptions.AddCXXNewAllocator = addCXXNewAllocator;
}
void AnalysisDeclContextManager::clear() {
llvm::DeleteContainerSeconds(Contexts);
}
static BodyFarm &getBodyFarm(ASTContext &C, CodeInjector *injector = nullptr) {
static BodyFarm *BF = new BodyFarm(C, injector);
return *BF;
}
Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
IsAutosynthesized = false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
Stmt *Body = FD->getBody();
if (!Body && Manager && Manager->synthesizeBodies()) {
Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(FD);
if (Body)
IsAutosynthesized = true;
}
return Body;
}
else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
Stmt *Body = MD->getBody();
if (!Body && Manager && Manager->synthesizeBodies()) {
Body = getBodyFarm(getASTContext(), Manager->Injector.get()).getBody(MD);
if (Body)
IsAutosynthesized = true;
}
return Body;
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
return BD->getBody();
else if (const FunctionTemplateDecl *FunTmpl
= dyn_cast_or_null<FunctionTemplateDecl>(D))
return FunTmpl->getTemplatedDecl()->getBody();
llvm_unreachable("unknown code decl");
}
Stmt *AnalysisDeclContext::getBody() const {
bool Tmp;
return getBody(Tmp);
}
bool AnalysisDeclContext::isBodyAutosynthesized() const {
bool Tmp;
getBody(Tmp);
return Tmp;
}
bool AnalysisDeclContext::isBodyAutosynthesizedFromModelFile() const {
bool Tmp;
Stmt *Body = getBody(Tmp);
return Tmp && Body->getLocStart().isValid();
}
const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSelfDecl();
if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
// See if 'self' was captured by the block.
for (const auto &I : BD->captures()) {
const VarDecl *VD = I.getVariable();
if (VD->getName() == "self")
return dyn_cast<ImplicitParamDecl>(VD);
}
}
return nullptr;
}
void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
if (!forcedBlkExprs)
forcedBlkExprs = new CFG::BuildOptions::ForcedBlkExprs();
// Default construct an entry for 'stmt'.
if (const Expr *e = dyn_cast<Expr>(stmt))
stmt = e->IgnoreParens();
(void) (*forcedBlkExprs)[stmt];
}
const CFGBlock *
AnalysisDeclContext::getBlockForRegisteredExpression(const Stmt *stmt) {
assert(forcedBlkExprs);
if (const Expr *e = dyn_cast<Expr>(stmt))
stmt = e->IgnoreParens();
CFG::BuildOptions::ForcedBlkExprs::const_iterator itr =
forcedBlkExprs->find(stmt);
assert(itr != forcedBlkExprs->end());
return itr->second;
}
/// Add each synthetic statement in the CFG to the parent map, using the
/// source statement's parent.
static void addParentsForSyntheticStmts(const CFG *TheCFG, ParentMap &PM) {
if (!TheCFG)
return;
for (CFG::synthetic_stmt_iterator I = TheCFG->synthetic_stmt_begin(),
E = TheCFG->synthetic_stmt_end();
I != E; ++I) {
PM.setParent(I->first, PM.getParent(I->second));
}
}
CFG *AnalysisDeclContext::getCFG() {
if (!cfgBuildOptions.PruneTriviallyFalseEdges)
return getUnoptimizedCFG();
if (!builtCFG) {
cfg = CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
// Even when the cfg is not successfully built, we don't
// want to try building it again.
builtCFG = true;
if (PM)
addParentsForSyntheticStmts(cfg.get(), *PM);
// The Observer should only observe one build of the CFG.
getCFGBuildOptions().Observer = nullptr;
}
return cfg.get();
}
CFG *AnalysisDeclContext::getUnoptimizedCFG() {
if (!builtCompleteCFG) {
SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges,
false);
completeCFG =
CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
// Even when the cfg is not successfully built, we don't
// want to try building it again.
builtCompleteCFG = true;
if (PM)
addParentsForSyntheticStmts(completeCFG.get(), *PM);
// The Observer should only observe one build of the CFG.
getCFGBuildOptions().Observer = nullptr;
}
return completeCFG.get();
}
CFGStmtMap *AnalysisDeclContext::getCFGStmtMap() {
if (cfgStmtMap)
return cfgStmtMap.get();
if (CFG *c = getCFG()) {
cfgStmtMap.reset(CFGStmtMap::Build(c, &getParentMap()));
return cfgStmtMap.get();
}
return nullptr;
}
CFGReverseBlockReachabilityAnalysis *AnalysisDeclContext::getCFGReachablityAnalysis() {
if (CFA)
return CFA.get();
if (CFG *c = getCFG()) {
CFA.reset(new CFGReverseBlockReachabilityAnalysis(*c));
return CFA.get();
}
return nullptr;
}
void AnalysisDeclContext::dumpCFG(bool ShowColors) {
getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
}
ParentMap &AnalysisDeclContext::getParentMap() {
if (!PM) {
PM.reset(new ParentMap(getBody()));
if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
for (const auto *I : C->inits()) {
PM->addStmt(I->getInit());
}
}
if (builtCFG)
addParentsForSyntheticStmts(getCFG(), *PM);
if (builtCompleteCFG)
addParentsForSyntheticStmts(getUnoptimizedCFG(), *PM);
}
return *PM;
}
PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
if (!PCA)
PCA.reset(new PseudoConstantAnalysis(getBody()));
return PCA.get();
}
AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
// that has the body.
FD->hasBody(FD);
D = FD;
}
AnalysisDeclContext *&AC = Contexts[D];
if (!AC)
AC = new AnalysisDeclContext(this, D, cfgBuildOptions);
return AC;
}
const StackFrameContext *
AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
const CFGBlock *Blk, unsigned Idx) {
return getLocationContextManager().getStackFrame(this, Parent, S, Blk, Idx);
}
const BlockInvocationContext *
AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
const clang::BlockDecl *BD,
const void *ContextData) {
return getLocationContextManager().getBlockInvocationContext(this, parent,
BD, ContextData);
}
LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
assert(Manager &&
"Cannot create LocationContexts without an AnalysisDeclContextManager!");
return Manager->getLocationContextManager();
}
//===----------------------------------------------------------------------===//
// FoldingSet profiling.
//===----------------------------------------------------------------------===//
void LocationContext::ProfileCommon(llvm::FoldingSetNodeID &ID,
ContextKind ck,
AnalysisDeclContext *ctx,
const LocationContext *parent,
const void *data) {
ID.AddInteger(ck);
ID.AddPointer(ctx);
ID.AddPointer(parent);
ID.AddPointer(data);
}
void StackFrameContext::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getAnalysisDeclContext(), getParent(), CallSite, Block, Index);
}
void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getAnalysisDeclContext(), getParent(), Enter);
}
void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getAnalysisDeclContext(), getParent(), BD, ContextData);
}
//===----------------------------------------------------------------------===//
// LocationContext creation.
//===----------------------------------------------------------------------===//
template <typename LOC, typename DATA>
const LOC*
LocationContextManager::getLocationContext(AnalysisDeclContext *ctx,
const LocationContext *parent,
const DATA *d) {
llvm::FoldingSetNodeID ID;
LOC::Profile(ID, ctx, parent, d);
void *InsertPos;
LOC *L = cast_or_null<LOC>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!L) {
L = new LOC(ctx, parent, d);
Contexts.InsertNode(L, InsertPos);
}
return L;
}
const StackFrameContext*
LocationContextManager::getStackFrame(AnalysisDeclContext *ctx,
const LocationContext *parent,
const Stmt *s,
const CFGBlock *blk, unsigned idx) {
llvm::FoldingSetNodeID ID;
StackFrameContext::Profile(ID, ctx, parent, s, blk, idx);
void *InsertPos;
StackFrameContext *L =
cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!L) {
L = new StackFrameContext(ctx, parent, s, blk, idx);
Contexts.InsertNode(L, InsertPos);
}
return L;
}
const ScopeContext *
LocationContextManager::getScope(AnalysisDeclContext *ctx,
const LocationContext *parent,
const Stmt *s) {
return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
}
const BlockInvocationContext *
LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
const LocationContext *parent,
const BlockDecl *BD,
const void *ContextData) {
llvm::FoldingSetNodeID ID;
BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
void *InsertPos;
BlockInvocationContext *L =
cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
InsertPos));
if (!L) {
L = new BlockInvocationContext(ctx, parent, BD, ContextData);
Contexts.InsertNode(L, InsertPos);
}
return L;
}
//===----------------------------------------------------------------------===//
// LocationContext methods.
//===----------------------------------------------------------------------===//
const StackFrameContext *LocationContext::getCurrentStackFrame() const {
const LocationContext *LC = this;
while (LC) {
if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC))
return SFC;
LC = LC->getParent();
}
return nullptr;
}
bool LocationContext::inTopFrame() const {
return getCurrentStackFrame()->inTopFrame();
}
bool LocationContext::isParentOf(const LocationContext *LC) const {
do {
const LocationContext *Parent = LC->getParent();
if (Parent == this)
return true;
else
LC = Parent;
} while (LC);
return false;
}
void LocationContext::dumpStack(raw_ostream &OS, StringRef Indent) const {
ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
PrintingPolicy PP(Ctx.getLangOpts());
PP.TerseOutput = 1;
unsigned Frame = 0;
for (const LocationContext *LCtx = this; LCtx; LCtx = LCtx->getParent()) {
switch (LCtx->getKind()) {
case StackFrame:
OS << Indent << '#' << Frame++ << ' ';
cast<StackFrameContext>(LCtx)->getDecl()->print(OS, PP);
OS << '\n';
break;
case Scope:
OS << Indent << " (scope)\n";
break;
case Block:
OS << Indent << " (block context: "
<< cast<BlockInvocationContext>(LCtx)->getContextData()
<< ")\n";
break;
}
}
}
LLVM_DUMP_METHOD void LocationContext::dumpStack() const {
dumpStack(llvm::errs());
}
//===----------------------------------------------------------------------===//
// Lazily generated map to query the external variables referenced by a Block.
//===----------------------------------------------------------------------===//
namespace {
class FindBlockDeclRefExprsVals : public StmtVisitor<FindBlockDeclRefExprsVals>{
BumpVector<const VarDecl*> &BEVals;
BumpVectorContext &BC;
llvm::SmallPtrSet<const VarDecl*, 4> Visited;
llvm::SmallPtrSet<const DeclContext*, 4> IgnoredContexts;
public:
FindBlockDeclRefExprsVals(BumpVector<const VarDecl*> &bevals,
BumpVectorContext &bc)
: BEVals(bevals), BC(bc) {}
void VisitStmt(Stmt *S) {
for (Stmt *Child : S->children())
if (Child)
Visit(Child);
}
void VisitDeclRefExpr(DeclRefExpr *DR) {
// Non-local variables are also directly modified.
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
if (!VD->hasLocalStorage()) {
if (Visited.insert(VD).second)
BEVals.push_back(VD, BC);
}
}
}
void VisitBlockExpr(BlockExpr *BR) {
// Blocks containing blocks can transitively capture more variables.
IgnoredContexts.insert(BR->getBlockDecl());
Visit(BR->getBlockDecl()->getBody());
}
void VisitPseudoObjectExpr(PseudoObjectExpr *PE) {
for (PseudoObjectExpr::semantics_iterator it = PE->semantics_begin(),
et = PE->semantics_end(); it != et; ++it) {
Expr *Semantic = *it;
if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
Semantic = OVE->getSourceExpr();
Visit(Semantic);
}
}
};
} // end anonymous namespace
typedef BumpVector<const VarDecl*> DeclVec;
static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD,
void *&Vec,
llvm::BumpPtrAllocator &A) {
if (Vec)
return (DeclVec*) Vec;
BumpVectorContext BC(A);
DeclVec *BV = (DeclVec*) A.Allocate<DeclVec>();
new (BV) DeclVec(BC, 10);
// Go through the capture list.
for (const auto &CI : BD->captures()) {
BV->push_back(CI.getVariable(), BC);
}
// Find the referenced global/static variables.
FindBlockDeclRefExprsVals F(*BV, BC);
F.Visit(BD->getBody());
Vec = BV;
return BV;
}
llvm::iterator_range<AnalysisDeclContext::referenced_decls_iterator>
AnalysisDeclContext::getReferencedBlockVars(const BlockDecl *BD) {
if (!ReferencedBlockVars)
ReferencedBlockVars = new llvm::DenseMap<const BlockDecl*,void*>();
const DeclVec *V =
LazyInitializeReferencedDecls(BD, (*ReferencedBlockVars)[BD], A);
return llvm::make_range(V->begin(), V->end());
}
ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
if (!ManagedAnalyses)
ManagedAnalyses = new ManagedAnalysisMap();
ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
return (*M)[tag];
}
//===----------------------------------------------------------------------===//
// Cleanup.
//===----------------------------------------------------------------------===//
ManagedAnalysis::~ManagedAnalysis() {}
AnalysisDeclContext::~AnalysisDeclContext() {
delete forcedBlkExprs;
delete ReferencedBlockVars;
// Release the managed analyses.
if (ManagedAnalyses) {
ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
llvm::DeleteContainerSeconds(*M);
delete M;
}
}
AnalysisDeclContextManager::~AnalysisDeclContextManager() {
llvm::DeleteContainerSeconds(Contexts);
}
LocationContext::~LocationContext() {}
LocationContextManager::~LocationContextManager() {
clear();
}
void LocationContextManager::clear() {
for (llvm::FoldingSet<LocationContext>::iterator I = Contexts.begin(),
E = Contexts.end(); I != E; ) {
LocationContext *LC = &*I;
++I;
delete LC;
}
Contexts.clear();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ProgramPoint.cpp | //==- ProgramPoint.cpp - Program Points for Path-Sensitive Analysis -*- C++ -*-/
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface ProgramPoint, which identifies a
// distinct location in a function.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/ProgramPoint.h"
using namespace clang;
ProgramPointTag::~ProgramPointTag() {}
ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
const LocationContext *LC,
const ProgramPointTag *tag){
switch (K) {
default:
llvm_unreachable("Unhandled ProgramPoint kind");
case ProgramPoint::PreStmtKind:
return PreStmt(S, LC, tag);
case ProgramPoint::PostStmtKind:
return PostStmt(S, LC, tag);
case ProgramPoint::PreLoadKind:
return PreLoad(S, LC, tag);
case ProgramPoint::PostLoadKind:
return PostLoad(S, LC, tag);
case ProgramPoint::PreStoreKind:
return PreStore(S, LC, tag);
case ProgramPoint::PostLValueKind:
return PostLValue(S, LC, tag);
case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
return PostStmtPurgeDeadSymbols(S, LC, tag);
case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
return PreStmtPurgeDeadSymbols(S, LC, tag);
}
}
SimpleProgramPointTag::SimpleProgramPointTag(StringRef MsgProvider,
StringRef Msg)
: Desc((MsgProvider + " : " + Msg).str()) {}
StringRef SimpleProgramPointTag::getTagDescription() const {
return Desc;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/CodeInjector.cpp | //===-- CodeInjector.cpp ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/CodeInjector.h"
using namespace clang;
CodeInjector::CodeInjector() {}
CodeInjector::~CodeInjector() {}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/FormatString.cpp | // FormatString.cpp - Common stuff for handling printf/scanf formats -*- C++ -*-
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Shared details for processing format strings of printf and scanf
// (and friends).
//
//===----------------------------------------------------------------------===//
#include "FormatStringParsing.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetInfo.h"
using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::FormatSpecifier;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
using clang::analyze_format_string::PositionContext;
using clang::analyze_format_string::ConversionSpecifier;
using namespace clang;
// Key function to FormatStringHandler.
FormatStringHandler::~FormatStringHandler() {}
//===----------------------------------------------------------------------===//
// Functions for parsing format strings components in both printf and
// scanf format strings.
//===----------------------------------------------------------------------===//
OptionalAmount
clang::analyze_format_string::ParseAmount(const char *&Beg, const char *E) {
const char *I = Beg;
UpdateOnReturn <const char*> UpdateBeg(Beg, I);
unsigned accumulator = 0;
bool hasDigits = false;
for ( ; I != E; ++I) {
char c = *I;
if (c >= '0' && c <= '9') {
hasDigits = true;
accumulator = (accumulator * 10) + (c - '0');
continue;
}
if (hasDigits)
return OptionalAmount(OptionalAmount::Constant, accumulator, Beg, I - Beg,
false);
break;
}
return OptionalAmount();
}
OptionalAmount
clang::analyze_format_string::ParseNonPositionAmount(const char *&Beg,
const char *E,
unsigned &argIndex) {
if (*Beg == '*') {
++Beg;
return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg, 0, false);
}
return ParseAmount(Beg, E);
}
OptionalAmount
clang::analyze_format_string::ParsePositionAmount(FormatStringHandler &H,
const char *Start,
const char *&Beg,
const char *E,
PositionContext p) {
if (*Beg == '*') {
const char *I = Beg + 1;
const OptionalAmount &Amt = ParseAmount(I, E);
if (Amt.getHowSpecified() == OptionalAmount::NotSpecified) {
H.HandleInvalidPosition(Beg, I - Beg, p);
return OptionalAmount(false);
}
if (I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return OptionalAmount(false);
}
assert(Amt.getHowSpecified() == OptionalAmount::Constant);
if (*I == '$') {
// Handle positional arguments
// Special case: '*0$', since this is an easy mistake.
if (Amt.getConstantAmount() == 0) {
H.HandleZeroPosition(Beg, I - Beg + 1);
return OptionalAmount(false);
}
const char *Tmp = Beg;
Beg = ++I;
return OptionalAmount(OptionalAmount::Arg, Amt.getConstantAmount() - 1,
Tmp, 0, true);
}
H.HandleInvalidPosition(Beg, I - Beg, p);
return OptionalAmount(false);
}
return ParseAmount(Beg, E);
}
bool
clang::analyze_format_string::ParseFieldWidth(FormatStringHandler &H,
FormatSpecifier &CS,
const char *Start,
const char *&Beg, const char *E,
unsigned *argIndex) {
// FIXME: Support negative field widths.
if (argIndex) {
CS.setFieldWidth(ParseNonPositionAmount(Beg, E, *argIndex));
}
else {
const OptionalAmount Amt =
ParsePositionAmount(H, Start, Beg, E,
analyze_format_string::FieldWidthPos);
if (Amt.isInvalid())
return true;
CS.setFieldWidth(Amt);
}
return false;
}
bool
clang::analyze_format_string::ParseArgPosition(FormatStringHandler &H,
FormatSpecifier &FS,
const char *Start,
const char *&Beg,
const char *E) {
const char *I = Beg;
const OptionalAmount &Amt = ParseAmount(I, E);
if (I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
if (Amt.getHowSpecified() == OptionalAmount::Constant && *(I++) == '$') {
// Warn that positional arguments are non-standard.
H.HandlePosition(Start, I - Start);
// Special case: '%0$', since this is an easy mistake.
if (Amt.getConstantAmount() == 0) {
H.HandleZeroPosition(Start, I - Start);
return true;
}
FS.setArgIndex(Amt.getConstantAmount() - 1);
FS.setUsesPositionalArg();
// Update the caller's pointer if we decided to consume
// these characters.
Beg = I;
return false;
}
return false;
}
bool
clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
const char *&I,
const char *E,
const LangOptions &LO,
bool IsScanf) {
LengthModifier::Kind lmKind = LengthModifier::None;
const char *lmPosition = I;
switch (*I) {
default:
return false;
case 'h':
++I;
lmKind = (I != E && *I == 'h') ? (++I, LengthModifier::AsChar)
: LengthModifier::AsShort;
break;
case 'l':
++I;
lmKind = (I != E && *I == 'l') ? (++I, LengthModifier::AsLongLong)
: LengthModifier::AsLong;
break;
case 'j': lmKind = LengthModifier::AsIntMax; ++I; break;
case 'z': lmKind = LengthModifier::AsSizeT; ++I; break;
case 't': lmKind = LengthModifier::AsPtrDiff; ++I; break;
case 'L': lmKind = LengthModifier::AsLongDouble; ++I; break;
case 'q': lmKind = LengthModifier::AsQuad; ++I; break;
case 'a':
if (IsScanf && !LO.C99 && !LO.CPlusPlus11) {
// For scanf in C90, look at the next character to see if this should
// be parsed as the GNU extension 'a' length modifier. If not, this
// will be parsed as a conversion specifier.
++I;
if (I != E && (*I == 's' || *I == 'S' || *I == '[')) {
lmKind = LengthModifier::AsAllocate;
break;
}
--I;
}
return false;
case 'm':
if (IsScanf) {
lmKind = LengthModifier::AsMAllocate;
++I;
break;
}
return false;
// printf: AsInt64, AsInt32, AsInt3264
// scanf: AsInt64
case 'I':
if (I + 1 != E && I + 2 != E) {
if (I[1] == '6' && I[2] == '4') {
I += 3;
lmKind = LengthModifier::AsInt64;
break;
}
if (IsScanf)
return false;
if (I[1] == '3' && I[2] == '2') {
I += 3;
lmKind = LengthModifier::AsInt32;
break;
}
}
++I;
lmKind = LengthModifier::AsInt3264;
break;
case 'w':
lmKind = LengthModifier::AsWide; ++I; break;
}
LengthModifier lm(lmPosition, lmKind);
FS.setLengthModifier(lm);
return true;
}
//===----------------------------------------------------------------------===//
// Methods on ArgType.
//===----------------------------------------------------------------------===//
clang::analyze_format_string::ArgType::MatchKind
ArgType::matchesType(ASTContext &C, QualType argTy) const {
if (Ptr) {
// It has to be a pointer.
const PointerType *PT = argTy->getAs<PointerType>();
if (!PT)
return NoMatch;
// We cannot write through a const qualified pointer.
if (PT->getPointeeType().isConstQualified())
return NoMatch;
argTy = PT->getPointeeType();
}
switch (K) {
case InvalidTy:
llvm_unreachable("ArgType must be valid");
case UnknownTy:
return Match;
case AnyCharTy: {
if (const EnumType *ETy = argTy->getAs<EnumType>())
argTy = ETy->getDecl()->getIntegerType();
if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
switch (BT->getKind()) {
default:
break;
case BuiltinType::Char_S:
case BuiltinType::SChar:
case BuiltinType::UChar:
case BuiltinType::Char_U:
return Match;
}
return NoMatch;
}
case SpecificTy: {
if (const EnumType *ETy = argTy->getAs<EnumType>())
argTy = ETy->getDecl()->getIntegerType();
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
if (T == argTy)
return Match;
// Check for "compatible types".
if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
switch (BT->getKind()) {
default:
break;
case BuiltinType::Char_S:
case BuiltinType::SChar:
case BuiltinType::Char_U:
case BuiltinType::UChar:
return T == C.UnsignedCharTy || T == C.SignedCharTy ? Match
: NoMatch;
case BuiltinType::Short:
return T == C.UnsignedShortTy ? Match : NoMatch;
case BuiltinType::UShort:
return T == C.ShortTy ? Match : NoMatch;
case BuiltinType::Int:
return T == C.UnsignedIntTy ? Match : NoMatch;
case BuiltinType::UInt:
return T == C.IntTy ? Match : NoMatch;
case BuiltinType::Long:
return T == C.UnsignedLongTy ? Match : NoMatch;
case BuiltinType::ULong:
return T == C.LongTy ? Match : NoMatch;
case BuiltinType::LongLong:
return T == C.UnsignedLongLongTy ? Match : NoMatch;
case BuiltinType::ULongLong:
return T == C.LongLongTy ? Match : NoMatch;
}
return NoMatch;
}
case CStrTy: {
const PointerType *PT = argTy->getAs<PointerType>();
if (!PT)
return NoMatch;
QualType pointeeTy = PT->getPointeeType();
if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
switch (BT->getKind()) {
case BuiltinType::Void:
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::Char_S:
case BuiltinType::SChar:
return Match;
default:
break;
}
return NoMatch;
}
case WCStrTy: {
const PointerType *PT = argTy->getAs<PointerType>();
if (!PT)
return NoMatch;
QualType pointeeTy =
C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
return pointeeTy == C.getWideCharType() ? Match : NoMatch;
}
case WIntTy: {
QualType PromoArg =
argTy->isPromotableIntegerType()
? C.getPromotedIntegerType(argTy) : argTy;
QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType();
PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
// If the promoted argument is the corresponding signed type of the
// wint_t type, then it should match.
if (PromoArg->hasSignedIntegerRepresentation() &&
C.getCorrespondingUnsignedType(PromoArg) == WInt)
return Match;
return WInt == PromoArg ? Match : NoMatch;
}
case CPointerTy:
if (argTy->isVoidPointerType()) {
return Match;
} if (argTy->isPointerType() || argTy->isObjCObjectPointerType() ||
argTy->isBlockPointerType() || argTy->isNullPtrType()) {
return NoMatchPedantic;
} else {
return NoMatch;
}
case ObjCPointerTy: {
if (argTy->getAs<ObjCObjectPointerType>() ||
argTy->getAs<BlockPointerType>())
return Match;
// Handle implicit toll-free bridging.
if (const PointerType *PT = argTy->getAs<PointerType>()) {
// Things such as CFTypeRef are really just opaque pointers
// to C structs representing CF types that can often be bridged
// to Objective-C objects. Since the compiler doesn't know which
// structs can be toll-free bridged, we just accept them all.
QualType pointee = PT->getPointeeType();
if (pointee->getAsStructureType() || pointee->isVoidType())
return Match;
}
return NoMatch;
}
}
llvm_unreachable("Invalid ArgType Kind!");
}
QualType ArgType::getRepresentativeType(ASTContext &C) const {
QualType Res;
switch (K) {
case InvalidTy:
llvm_unreachable("No representative type for Invalid ArgType");
case UnknownTy:
llvm_unreachable("No representative type for Unknown ArgType");
case AnyCharTy:
Res = C.CharTy;
break;
case SpecificTy:
Res = T;
break;
case CStrTy:
Res = C.getPointerType(C.CharTy);
break;
case WCStrTy:
Res = C.getPointerType(C.getWideCharType());
break;
case ObjCPointerTy:
Res = C.ObjCBuiltinIdTy;
break;
case CPointerTy:
Res = C.VoidPtrTy;
break;
case WIntTy: {
Res = C.getWIntType();
break;
}
}
if (Ptr)
Res = C.getPointerType(Res);
return Res;
}
std::string ArgType::getRepresentativeTypeName(ASTContext &C) const {
std::string S = getRepresentativeType(C).getAsString();
std::string Alias;
if (Name) {
// Use a specific name for this type, e.g. "size_t".
Alias = Name;
if (Ptr) {
// If ArgType is actually a pointer to T, append an asterisk.
Alias += (Alias[Alias.size()-1] == '*') ? "*" : " *";
}
// If Alias is the same as the underlying type, e.g. wchar_t, then drop it.
if (S == Alias)
Alias.clear();
}
if (!Alias.empty())
return std::string("'") + Alias + "' (aka '" + S + "')";
return std::string("'") + S + "'";
}
//===----------------------------------------------------------------------===//
// Methods on OptionalAmount.
//===----------------------------------------------------------------------===//
ArgType
analyze_format_string::OptionalAmount::getArgType(ASTContext &Ctx) const {
return Ctx.IntTy;
}
//===----------------------------------------------------------------------===//
// Methods on LengthModifier.
//===----------------------------------------------------------------------===//
const char *
analyze_format_string::LengthModifier::toString() const {
switch (kind) {
case AsChar:
return "hh";
case AsShort:
return "h";
case AsLong: // or AsWideChar
return "l";
case AsLongLong:
return "ll";
case AsQuad:
return "q";
case AsIntMax:
return "j";
case AsSizeT:
return "z";
case AsPtrDiff:
return "t";
case AsInt32:
return "I32";
case AsInt3264:
return "I";
case AsInt64:
return "I64";
case AsLongDouble:
return "L";
case AsAllocate:
return "a";
case AsMAllocate:
return "m";
case AsWide:
return "w";
case None:
return "";
}
return nullptr;
}
//===----------------------------------------------------------------------===//
// Methods on ConversionSpecifier.
//===----------------------------------------------------------------------===//
const char *ConversionSpecifier::toString() const {
switch (kind) {
case dArg: return "d";
case DArg: return "D";
case iArg: return "i";
case oArg: return "o";
case OArg: return "O";
case uArg: return "u";
case UArg: return "U";
case xArg: return "x";
case XArg: return "X";
case fArg: return "f";
case FArg: return "F";
case eArg: return "e";
case EArg: return "E";
case gArg: return "g";
case GArg: return "G";
case aArg: return "a";
case AArg: return "A";
case cArg: return "c";
case sArg: return "s";
case pArg: return "p";
case nArg: return "n";
case PercentArg: return "%";
case ScanListArg: return "[";
case InvalidSpecifier: return nullptr;
// POSIX unicode extensions.
case CArg: return "C";
case SArg: return "S";
// Objective-C specific specifiers.
case ObjCObjArg: return "@";
// FreeBSD kernel specific specifiers.
case FreeBSDbArg: return "b";
case FreeBSDDArg: return "D";
case FreeBSDrArg: return "r";
case FreeBSDyArg: return "y";
// GlibC specific specifiers.
case PrintErrno: return "m";
// MS specific specifiers.
case ZArg: return "Z";
}
return nullptr;
}
Optional<ConversionSpecifier>
ConversionSpecifier::getStandardSpecifier() const {
ConversionSpecifier::Kind NewKind;
switch (getKind()) {
default:
return None;
case DArg:
NewKind = dArg;
break;
case UArg:
NewKind = uArg;
break;
case OArg:
NewKind = oArg;
break;
}
ConversionSpecifier FixedCS(*this);
FixedCS.setKind(NewKind);
return FixedCS;
}
//===----------------------------------------------------------------------===//
// Methods on OptionalAmount.
//===----------------------------------------------------------------------===//
void OptionalAmount::toString(raw_ostream &os) const {
switch (hs) {
case Invalid:
case NotSpecified:
return;
case Arg:
if (UsesDotPrefix)
os << ".";
if (usesPositionalArg())
os << "*" << getPositionalArgIndex() << "$";
else
os << "*";
break;
case Constant:
if (UsesDotPrefix)
os << ".";
os << amt;
break;
}
}
bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target) const {
switch (LM.getKind()) {
case LengthModifier::None:
return true;
// Handle most integer flags
case LengthModifier::AsShort:
if (Target.getTriple().isOSMSVCRT()) {
switch (CS.getKind()) {
case ConversionSpecifier::cArg:
case ConversionSpecifier::CArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::SArg:
case ConversionSpecifier::ZArg:
return true;
default:
break;
}
}
// Fall through.
case LengthModifier::AsChar:
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
case LengthModifier::AsIntMax:
case LengthModifier::AsSizeT:
case LengthModifier::AsPtrDiff:
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::nArg:
return true;
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return Target.getTriple().isOSFreeBSD();
default:
return false;
}
// Handle 'l' flag
case LengthModifier::AsLong: // or AsWideChar
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
case ConversionSpecifier::nArg:
case ConversionSpecifier::cArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::ScanListArg:
case ConversionSpecifier::ZArg:
return true;
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return Target.getTriple().isOSFreeBSD();
default:
return false;
}
case LengthModifier::AsLongDouble:
switch (CS.getKind()) {
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
return true;
// GNU libc extension.
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
return !Target.getTriple().isOSDarwin() &&
!Target.getTriple().isOSWindows();
default:
return false;
}
case LengthModifier::AsAllocate:
switch (CS.getKind()) {
case ConversionSpecifier::sArg:
case ConversionSpecifier::SArg:
case ConversionSpecifier::ScanListArg:
return true;
default:
return false;
}
case LengthModifier::AsMAllocate:
switch (CS.getKind()) {
case ConversionSpecifier::cArg:
case ConversionSpecifier::CArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::SArg:
case ConversionSpecifier::ScanListArg:
return true;
default:
return false;
}
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsInt64:
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
return Target.getTriple().isOSMSVCRT();
default:
return false;
}
case LengthModifier::AsWide:
switch (CS.getKind()) {
case ConversionSpecifier::cArg:
case ConversionSpecifier::CArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::SArg:
case ConversionSpecifier::ZArg:
return Target.getTriple().isOSMSVCRT();
default:
return false;
}
}
llvm_unreachable("Invalid LengthModifier Kind!");
}
bool FormatSpecifier::hasStandardLengthModifier() const {
switch (LM.getKind()) {
case LengthModifier::None:
case LengthModifier::AsChar:
case LengthModifier::AsShort:
case LengthModifier::AsLong:
case LengthModifier::AsLongLong:
case LengthModifier::AsIntMax:
case LengthModifier::AsSizeT:
case LengthModifier::AsPtrDiff:
case LengthModifier::AsLongDouble:
return true;
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsQuad:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsInt64:
case LengthModifier::AsWide:
return false;
}
llvm_unreachable("Invalid LengthModifier Kind!");
}
bool FormatSpecifier::hasStandardConversionSpecifier(
const LangOptions &LangOpt) const {
switch (CS.getKind()) {
case ConversionSpecifier::cArg:
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::pArg:
case ConversionSpecifier::nArg:
case ConversionSpecifier::ObjCObjArg:
case ConversionSpecifier::ScanListArg:
case ConversionSpecifier::PercentArg:
return true;
case ConversionSpecifier::CArg:
case ConversionSpecifier::SArg:
return LangOpt.ObjC1 || LangOpt.ObjC2;
case ConversionSpecifier::InvalidSpecifier:
case ConversionSpecifier::FreeBSDbArg:
case ConversionSpecifier::FreeBSDDArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
case ConversionSpecifier::PrintErrno:
case ConversionSpecifier::DArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::UArg:
case ConversionSpecifier::ZArg:
return false;
}
llvm_unreachable("Invalid ConversionSpecifier Kind!");
}
bool FormatSpecifier::hasStandardLengthConversionCombination() const {
if (LM.getKind() == LengthModifier::AsLongDouble) {
switch(CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
return false;
default:
return true;
}
}
return true;
}
Optional<LengthModifier> FormatSpecifier::getCorrectedLengthModifier() const {
if (CS.isAnyIntArg() || CS.getKind() == ConversionSpecifier::nArg) {
if (LM.getKind() == LengthModifier::AsLongDouble ||
LM.getKind() == LengthModifier::AsQuad) {
LengthModifier FixedLM(LM);
FixedLM.setKind(LengthModifier::AsLongLong);
return FixedLM;
}
}
return None;
}
bool FormatSpecifier::namedTypeToLengthModifier(QualType QT,
LengthModifier &LM) {
assert(isa<TypedefType>(QT) && "Expected a TypedefType");
const TypedefNameDecl *Typedef = cast<TypedefType>(QT)->getDecl();
for (;;) {
const IdentifierInfo *Identifier = Typedef->getIdentifier();
if (Identifier->getName() == "size_t") {
LM.setKind(LengthModifier::AsSizeT);
return true;
} else if (Identifier->getName() == "ssize_t") {
// Not C99, but common in Unix.
LM.setKind(LengthModifier::AsSizeT);
return true;
} else if (Identifier->getName() == "intmax_t") {
LM.setKind(LengthModifier::AsIntMax);
return true;
} else if (Identifier->getName() == "uintmax_t") {
LM.setKind(LengthModifier::AsIntMax);
return true;
} else if (Identifier->getName() == "ptrdiff_t") {
LM.setKind(LengthModifier::AsPtrDiff);
return true;
}
QualType T = Typedef->getUnderlyingType();
if (!isa<TypedefType>(T))
break;
Typedef = cast<TypedefType>(T)->getDecl();
}
return false;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp | //===- ThreadSafetyTIL.cpp -------------------------------------*- C++ --*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT in the llvm repository for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
using namespace clang;
using namespace threadSafety;
using namespace til;
StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) {
switch (Op) {
case UOP_Minus: return "-";
case UOP_BitNot: return "~";
case UOP_LogicNot: return "!";
}
return "";
}
StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
switch (Op) {
case BOP_Mul: return "*";
case BOP_Div: return "/";
case BOP_Rem: return "%";
case BOP_Add: return "+";
case BOP_Sub: return "-";
case BOP_Shl: return "<<";
case BOP_Shr: return ">>";
case BOP_BitAnd: return "&";
case BOP_BitXor: return "^";
case BOP_BitOr: return "|";
case BOP_Eq: return "==";
case BOP_Neq: return "!=";
case BOP_Lt: return "<";
case BOP_Leq: return "<=";
case BOP_LogicAnd: return "&&";
case BOP_LogicOr: return "||";
}
return "";
}
SExpr* Future::force() {
Status = FS_evaluating;
Result = compute();
Status = FS_done;
return Result;
}
unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
unsigned Idx = Predecessors.size();
Predecessors.reserveCheck(1, Arena);
Predecessors.push_back(Pred);
for (SExpr *E : Args) {
if (Phi* Ph = dyn_cast<Phi>(E)) {
Ph->values().reserveCheck(1, Arena);
Ph->values().push_back(nullptr);
}
}
return Idx;
}
void BasicBlock::reservePredecessors(unsigned NumPreds) {
Predecessors.reserve(NumPreds, Arena);
for (SExpr *E : Args) {
if (Phi* Ph = dyn_cast<Phi>(E)) {
Ph->values().reserve(NumPreds, Arena);
}
}
}
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
const SExpr *til::getCanonicalVal(const SExpr *E) {
while (true) {
if (auto *V = dyn_cast<Variable>(E)) {
if (V->kind() == Variable::VK_Let) {
E = V->definition();
continue;
}
}
if (const Phi *Ph = dyn_cast<Phi>(E)) {
if (Ph->status() == Phi::PH_SingleVal) {
E = Ph->values()[0];
continue;
}
}
break;
}
return E;
}
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
// The non-const version will simplify incomplete Phi nodes.
SExpr *til::simplifyToCanonicalVal(SExpr *E) {
while (true) {
if (auto *V = dyn_cast<Variable>(E)) {
if (V->kind() != Variable::VK_Let)
return V;
// Eliminate redundant variables, e.g. x = y, or x = 5,
// but keep anything more complicated.
if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
E = V->definition();
continue;
}
return V;
}
if (auto *Ph = dyn_cast<Phi>(E)) {
if (Ph->status() == Phi::PH_Incomplete)
simplifyIncompleteArg(Ph);
// Eliminate redundant Phi nodes.
if (Ph->status() == Phi::PH_SingleVal) {
E = Ph->values()[0];
continue;
}
}
return E;
}
}
// Trace the arguments of an incomplete Phi node to see if they have the same
// canonical definition. If so, mark the Phi node as redundant.
// getCanonicalVal() will recursively call simplifyIncompletePhi().
void til::simplifyIncompleteArg(til::Phi *Ph) {
assert(Ph && Ph->status() == Phi::PH_Incomplete);
// eliminate infinite recursion -- assume that this node is not redundant.
Ph->setStatus(Phi::PH_MultiVal);
SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
if (Ei == Ph)
continue; // Recursive reference to itself. Don't count.
if (Ei != E0) {
return; // Status is already set to MultiVal.
}
}
Ph->setStatus(Phi::PH_SingleVal);
}
// Renumbers the arguments and instructions to have unique, sequential IDs.
int BasicBlock::renumberInstrs(int ID) {
for (auto *Arg : Args)
Arg->setID(this, ID++);
for (auto *Instr : Instrs)
Instr->setID(this, ID++);
TermInstr->setID(this, ID++);
return ID;
}
// Sorts the CFGs blocks using a reverse post-order depth-first traversal.
// Each block will be written into the Blocks array in order, and its BlockID
// will be set to the index in the array. Sorting should start from the entry
// block, and ID should be the total number of blocks.
int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
if (Visited) return ID;
Visited = true;
for (auto *Block : successors())
ID = Block->topologicalSort(Blocks, ID);
// set ID and update block array in place.
// We may lose pointers to unreachable blocks.
assert(ID > 0);
BlockID = --ID;
Blocks[BlockID] = this;
return ID;
}
// Performs a reverse topological traversal, starting from the exit block and
// following back-edges. The dominator is serialized before any predecessors,
// which guarantees that all blocks are serialized after their dominator and
// before their post-dominator (because it's a reverse topological traversal).
// ID should be initially set to 0.
//
// This sort assumes that (1) dominators have been computed, (2) there are no
// critical edges, and (3) the entry block is reachable from the exit block
// and no blocks are accessable via traversal of back-edges from the exit that
// weren't accessable via forward edges from the entry.
int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
// Visited is assumed to have been set by the topologicalSort. This pass
// assumes !Visited means that we've visited this node before.
if (!Visited) return ID;
Visited = false;
if (DominatorNode.Parent)
ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
for (auto *Pred : Predecessors)
ID = Pred->topologicalFinalSort(Blocks, ID);
assert(static_cast<size_t>(ID) < Blocks.size());
BlockID = ID++;
Blocks[BlockID] = this;
return ID;
}
// Computes the immediate dominator of the current block. Assumes that all of
// its predecessors have already computed their dominators. This is achieved
// by visiting the nodes in topological order.
void BasicBlock::computeDominator() {
BasicBlock *Candidate = nullptr;
// Walk backwards from each predecessor to find the common dominator node.
for (auto *Pred : Predecessors) {
// Skip back-edges
if (Pred->BlockID >= BlockID) continue;
// If we don't yet have a candidate for dominator yet, take this one.
if (Candidate == nullptr) {
Candidate = Pred;
continue;
}
// Walk the alternate and current candidate back to find a common ancestor.
auto *Alternate = Pred;
while (Alternate != Candidate) {
if (Candidate->BlockID > Alternate->BlockID)
Candidate = Candidate->DominatorNode.Parent;
else
Alternate = Alternate->DominatorNode.Parent;
}
}
DominatorNode.Parent = Candidate;
DominatorNode.SizeOfSubTree = 1;
}
// Computes the immediate post-dominator of the current block. Assumes that all
// of its successors have already computed their post-dominators. This is
// achieved visiting the nodes in reverse topological order.
void BasicBlock::computePostDominator() {
BasicBlock *Candidate = nullptr;
// Walk back from each predecessor to find the common post-dominator node.
for (auto *Succ : successors()) {
// Skip back-edges
if (Succ->BlockID <= BlockID) continue;
// If we don't yet have a candidate for post-dominator yet, take this one.
if (Candidate == nullptr) {
Candidate = Succ;
continue;
}
// Walk the alternate and current candidate back to find a common ancestor.
auto *Alternate = Succ;
while (Alternate != Candidate) {
if (Candidate->BlockID < Alternate->BlockID)
Candidate = Candidate->PostDominatorNode.Parent;
else
Alternate = Alternate->PostDominatorNode.Parent;
}
}
PostDominatorNode.Parent = Candidate;
PostDominatorNode.SizeOfSubTree = 1;
}
// Renumber instructions in all blocks
void SCFG::renumberInstrs() {
int InstrID = 0;
for (auto *Block : Blocks)
InstrID = Block->renumberInstrs(InstrID);
}
static inline void computeNodeSize(BasicBlock *B,
BasicBlock::TopologyNode BasicBlock::*TN) {
BasicBlock::TopologyNode *N = &(B->*TN);
if (N->Parent) {
BasicBlock::TopologyNode *P = &(N->Parent->*TN);
// Initially set ID relative to the (as yet uncomputed) parent ID
N->NodeID = P->SizeOfSubTree;
P->SizeOfSubTree += N->SizeOfSubTree;
}
}
static inline void computeNodeID(BasicBlock *B,
BasicBlock::TopologyNode BasicBlock::*TN) {
BasicBlock::TopologyNode *N = &(B->*TN);
if (N->Parent) {
BasicBlock::TopologyNode *P = &(N->Parent->*TN);
N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
}
}
// Normalizes a CFG. Normalization has a few major components:
// 1) Removing unreachable blocks.
// 2) Computing dominators and post-dominators
// 3) Topologically sorting the blocks into the "Blocks" array.
void SCFG::computeNormalForm() {
// Topologically sort the blocks starting from the entry block.
int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
if (NumUnreachableBlocks > 0) {
// If there were unreachable blocks shift everything down, and delete them.
for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
size_t NI = I - NumUnreachableBlocks;
Blocks[NI] = Blocks[I];
Blocks[NI]->BlockID = NI;
// FIXME: clean up predecessor pointers to unreachable blocks?
}
Blocks.drop(NumUnreachableBlocks);
}
// Compute dominators.
for (auto *Block : Blocks)
Block->computeDominator();
// Once dominators have been computed, the final sort may be performed.
int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
assert(static_cast<size_t>(NumBlocks) == Blocks.size());
(void) NumBlocks;
// Renumber the instructions now that we have a final sort.
renumberInstrs();
// Compute post-dominators and compute the sizes of each node in the
// dominator tree.
for (auto *Block : Blocks.reverse()) {
Block->computePostDominator();
computeNodeSize(Block, &BasicBlock::DominatorNode);
}
// Compute the sizes of each node in the post-dominator tree and assign IDs in
// the dominator tree.
for (auto *Block : Blocks) {
computeNodeID(Block, &BasicBlock::DominatorNode);
computeNodeSize(Block, &BasicBlock::PostDominatorNode);
}
// Assign IDs in the post-dominator tree.
for (auto *Block : Blocks.reverse()) {
computeNodeID(Block, &BasicBlock::PostDominatorNode);
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/CocoaConventions.cpp | //===- CocoaConventions.h - Special handling of Cocoa conventions -*- C++ -*--//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements cocoa naming convention analysis.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace ento;
bool cocoa::isRefType(QualType RetTy, StringRef Prefix,
StringRef Name) {
// Recursively walk the typedef stack, allowing typedefs of reference types.
while (const TypedefType *TD = RetTy->getAs<TypedefType>()) {
StringRef TDName = TD->getDecl()->getIdentifier()->getName();
if (TDName.startswith(Prefix) && TDName.endswith("Ref"))
return true;
// XPC unfortunately uses CF-style function names, but aren't CF types.
if (TDName.startswith("xpc_"))
return false;
RetTy = TD->getDecl()->getUnderlyingType();
}
if (Name.empty())
return false;
// Is the type void*?
const PointerType* PT = RetTy->getAs<PointerType>();
if (!(PT->getPointeeType().getUnqualifiedType()->isVoidType()))
return false;
// Does the name start with the prefix?
return Name.startswith(Prefix);
}
bool coreFoundation::isCFObjectRef(QualType T) {
return cocoa::isRefType(T, "CF") || // Core Foundation.
cocoa::isRefType(T, "CG") || // Core Graphics.
cocoa::isRefType(T, "DADisk") || // Disk Arbitration API.
cocoa::isRefType(T, "DADissenter") ||
cocoa::isRefType(T, "DASessionRef");
}
bool cocoa::isCocoaObjectRef(QualType Ty) {
if (!Ty->isObjCObjectPointerType())
return false;
const ObjCObjectPointerType *PT = Ty->getAs<ObjCObjectPointerType>();
// Can be true for objects with the 'NSObject' attribute.
if (!PT)
return true;
// We assume that id<..>, id, Class, and Class<..> all represent tracked
// objects.
if (PT->isObjCIdType() || PT->isObjCQualifiedIdType() ||
PT->isObjCClassType() || PT->isObjCQualifiedClassType())
return true;
// Does the interface subclass NSObject?
// FIXME: We can memoize here if this gets too expensive.
const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
// Assume that anything declared with a forward declaration and no
// @interface subclasses NSObject.
if (!ID->hasDefinition())
return true;
for ( ; ID ; ID = ID->getSuperClass())
if (ID->getIdentifier()->getName() == "NSObject")
return true;
return false;
}
bool coreFoundation::followsCreateRule(const FunctionDecl *fn) {
// For now, *just* base this on the function name, not on anything else.
const IdentifierInfo *ident = fn->getIdentifier();
if (!ident) return false;
StringRef functionName = ident->getName();
StringRef::iterator it = functionName.begin();
StringRef::iterator start = it;
StringRef::iterator endI = functionName.end();
while (true) {
// Scan for the start of 'create' or 'copy'.
for ( ; it != endI ; ++it) {
// Search for the first character. It can either be 'C' or 'c'.
char ch = *it;
if (ch == 'C' || ch == 'c') {
// Make sure this isn't something like 'recreate' or 'Scopy'.
if (ch == 'c' && it != start && isLetter(*(it - 1)))
continue;
++it;
break;
}
}
// Did we hit the end of the string? If so, we didn't find a match.
if (it == endI)
return false;
// Scan for *lowercase* 'reate' or 'opy', followed by no lowercase
// character.
StringRef suffix = functionName.substr(it - start);
if (suffix.startswith("reate")) {
it += 5;
}
else if (suffix.startswith("opy")) {
it += 3;
} else {
// Keep scanning.
continue;
}
if (it == endI || !isLowercase(*it))
return true;
// If we matched a lowercase character, it isn't the end of the
// word. Keep scanning.
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/Consumed.cpp | //===- Consumed.cpp --------------------------------------------*- C++ --*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// A intra-procedural analysis for checking consumed properties. This is based,
// in part, on research on linear types.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/Consumed.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
// TODO: Adjust states of args to constructors in the same way that arguments to
// function calls are handled.
// TODO: Use information from tests in for- and while-loop conditional.
// TODO: Add notes about the actual and expected state for
// TODO: Correctly identify unreachable blocks when chaining boolean operators.
// TODO: Adjust the parser and AttributesList class to support lists of
// identifiers.
// TODO: Warn about unreachable code.
// TODO: Switch to using a bitmap to track unreachable blocks.
// TODO: Handle variable definitions, e.g. bool valid = x.isValid();
// if (valid) ...; (Deferred)
// TODO: Take notes on state transitions to provide better warning messages.
// (Deferred)
// TODO: Test nested conditionals: A) Checking the same value multiple times,
// and 2) Checking different values. (Deferred)
using namespace clang;
using namespace consumed;
// Key method definition
ConsumedWarningsHandlerBase::~ConsumedWarningsHandlerBase() {}
static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
// Find the source location of the first statement in the block, if the block
// is not empty.
for (const auto &B : *Block)
if (Optional<CFGStmt> CS = B.getAs<CFGStmt>())
return CS->getStmt()->getLocStart();
// Block is empty.
// If we have one successor, return the first statement in that block
if (Block->succ_size() == 1 && *Block->succ_begin())
return getFirstStmtLoc(*Block->succ_begin());
return SourceLocation();
}
static SourceLocation getLastStmtLoc(const CFGBlock *Block) {
// Find the source location of the last statement in the block, if the block
// is not empty.
if (const Stmt *StmtNode = Block->getTerminator()) {
return StmtNode->getLocStart();
} else {
for (CFGBlock::const_reverse_iterator BI = Block->rbegin(),
BE = Block->rend(); BI != BE; ++BI) {
if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>())
return CS->getStmt()->getLocStart();
}
}
// If we have one successor, return the first statement in that block
SourceLocation Loc;
if (Block->succ_size() == 1 && *Block->succ_begin())
Loc = getFirstStmtLoc(*Block->succ_begin());
if (Loc.isValid())
return Loc;
// If we have one predecessor, return the last statement in that block
if (Block->pred_size() == 1 && *Block->pred_begin())
return getLastStmtLoc(*Block->pred_begin());
return Loc;
}
static ConsumedState invertConsumedUnconsumed(ConsumedState State) {
switch (State) {
case CS_Unconsumed:
return CS_Consumed;
case CS_Consumed:
return CS_Unconsumed;
case CS_None:
return CS_None;
case CS_Unknown:
return CS_Unknown;
}
llvm_unreachable("invalid enum");
}
static bool isCallableInState(const CallableWhenAttr *CWAttr,
ConsumedState State) {
for (const auto &S : CWAttr->callableStates()) {
ConsumedState MappedAttrState = CS_None;
switch (S) {
case CallableWhenAttr::Unknown:
MappedAttrState = CS_Unknown;
break;
case CallableWhenAttr::Unconsumed:
MappedAttrState = CS_Unconsumed;
break;
case CallableWhenAttr::Consumed:
MappedAttrState = CS_Consumed;
break;
}
if (MappedAttrState == State)
return true;
}
return false;
}
static bool isConsumableType(const QualType &QT) {
if (QT->isPointerType() || QT->isReferenceType())
return false;
if (const CXXRecordDecl *RD = QT->getAsCXXRecordDecl())
return RD->hasAttr<ConsumableAttr>();
return false;
}
static bool isAutoCastType(const QualType &QT) {
if (QT->isPointerType() || QT->isReferenceType())
return false;
if (const CXXRecordDecl *RD = QT->getAsCXXRecordDecl())
return RD->hasAttr<ConsumableAutoCastAttr>();
return false;
}
static bool isSetOnReadPtrType(const QualType &QT) {
if (const CXXRecordDecl *RD = QT->getPointeeCXXRecordDecl())
return RD->hasAttr<ConsumableSetOnReadAttr>();
return false;
}
static bool isKnownState(ConsumedState State) {
switch (State) {
case CS_Unconsumed:
case CS_Consumed:
return true;
case CS_None:
case CS_Unknown:
return false;
}
llvm_unreachable("invalid enum");
}
static bool isRValueRef(QualType ParamType) {
return ParamType->isRValueReferenceType();
}
static bool isTestingFunction(const FunctionDecl *FunDecl) {
return FunDecl->hasAttr<TestTypestateAttr>();
}
static bool isPointerOrRef(QualType ParamType) {
return ParamType->isPointerType() || ParamType->isReferenceType();
}
static ConsumedState mapConsumableAttrState(const QualType QT) {
assert(isConsumableType(QT));
const ConsumableAttr *CAttr =
QT->getAsCXXRecordDecl()->getAttr<ConsumableAttr>();
switch (CAttr->getDefaultState()) {
case ConsumableAttr::Unknown:
return CS_Unknown;
case ConsumableAttr::Unconsumed:
return CS_Unconsumed;
case ConsumableAttr::Consumed:
return CS_Consumed;
}
llvm_unreachable("invalid enum");
}
static ConsumedState
mapParamTypestateAttrState(const ParamTypestateAttr *PTAttr) {
switch (PTAttr->getParamState()) {
case ParamTypestateAttr::Unknown:
return CS_Unknown;
case ParamTypestateAttr::Unconsumed:
return CS_Unconsumed;
case ParamTypestateAttr::Consumed:
return CS_Consumed;
}
llvm_unreachable("invalid_enum");
}
static ConsumedState
mapReturnTypestateAttrState(const ReturnTypestateAttr *RTSAttr) {
switch (RTSAttr->getState()) {
case ReturnTypestateAttr::Unknown:
return CS_Unknown;
case ReturnTypestateAttr::Unconsumed:
return CS_Unconsumed;
case ReturnTypestateAttr::Consumed:
return CS_Consumed;
}
llvm_unreachable("invalid enum");
}
static ConsumedState mapSetTypestateAttrState(const SetTypestateAttr *STAttr) {
switch (STAttr->getNewState()) {
case SetTypestateAttr::Unknown:
return CS_Unknown;
case SetTypestateAttr::Unconsumed:
return CS_Unconsumed;
case SetTypestateAttr::Consumed:
return CS_Consumed;
}
llvm_unreachable("invalid_enum");
}
static StringRef stateToString(ConsumedState State) {
switch (State) {
case consumed::CS_None:
return "none";
case consumed::CS_Unknown:
return "unknown";
case consumed::CS_Unconsumed:
return "unconsumed";
case consumed::CS_Consumed:
return "consumed";
}
llvm_unreachable("invalid enum");
}
static ConsumedState testsFor(const FunctionDecl *FunDecl) {
assert(isTestingFunction(FunDecl));
switch (FunDecl->getAttr<TestTypestateAttr>()->getTestState()) {
case TestTypestateAttr::Unconsumed:
return CS_Unconsumed;
case TestTypestateAttr::Consumed:
return CS_Consumed;
}
llvm_unreachable("invalid enum");
}
namespace {
struct VarTestResult {
const VarDecl *Var;
ConsumedState TestsFor;
};
} // end anonymous::VarTestResult
namespace clang {
namespace consumed {
enum EffectiveOp {
EO_And,
EO_Or
};
class PropagationInfo {
enum {
IT_None,
IT_State,
IT_VarTest,
IT_BinTest,
IT_Var,
IT_Tmp
} InfoType;
struct BinTestTy {
const BinaryOperator *Source;
EffectiveOp EOp;
VarTestResult LTest;
VarTestResult RTest;
};
union {
ConsumedState State;
VarTestResult VarTest;
const VarDecl *Var;
const CXXBindTemporaryExpr *Tmp;
BinTestTy BinTest;
};
public:
PropagationInfo() : InfoType(IT_None) {}
PropagationInfo(const VarTestResult &VarTest)
: InfoType(IT_VarTest), VarTest(VarTest) {}
PropagationInfo(const VarDecl *Var, ConsumedState TestsFor)
: InfoType(IT_VarTest) {
VarTest.Var = Var;
VarTest.TestsFor = TestsFor;
}
PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
const VarTestResult <est, const VarTestResult &RTest)
: InfoType(IT_BinTest) {
BinTest.Source = Source;
BinTest.EOp = EOp;
BinTest.LTest = LTest;
BinTest.RTest = RTest;
}
PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
const VarDecl *LVar, ConsumedState LTestsFor,
const VarDecl *RVar, ConsumedState RTestsFor)
: InfoType(IT_BinTest) {
BinTest.Source = Source;
BinTest.EOp = EOp;
BinTest.LTest.Var = LVar;
BinTest.LTest.TestsFor = LTestsFor;
BinTest.RTest.Var = RVar;
BinTest.RTest.TestsFor = RTestsFor;
}
PropagationInfo(ConsumedState State)
: InfoType(IT_State), State(State) {}
PropagationInfo(const VarDecl *Var) : InfoType(IT_Var), Var(Var) {}
PropagationInfo(const CXXBindTemporaryExpr *Tmp)
: InfoType(IT_Tmp), Tmp(Tmp) {}
const ConsumedState & getState() const {
assert(InfoType == IT_State);
return State;
}
const VarTestResult & getVarTest() const {
assert(InfoType == IT_VarTest);
return VarTest;
}
const VarTestResult & getLTest() const {
assert(InfoType == IT_BinTest);
return BinTest.LTest;
}
const VarTestResult & getRTest() const {
assert(InfoType == IT_BinTest);
return BinTest.RTest;
}
const VarDecl * getVar() const {
assert(InfoType == IT_Var);
return Var;
}
const CXXBindTemporaryExpr * getTmp() const {
assert(InfoType == IT_Tmp);
return Tmp;
}
ConsumedState getAsState(const ConsumedStateMap *StateMap) const {
assert(isVar() || isTmp() || isState());
if (isVar())
return StateMap->getState(Var);
else if (isTmp())
return StateMap->getState(Tmp);
else if (isState())
return State;
else
return CS_None;
}
EffectiveOp testEffectiveOp() const {
assert(InfoType == IT_BinTest);
return BinTest.EOp;
}
const BinaryOperator * testSourceNode() const {
assert(InfoType == IT_BinTest);
return BinTest.Source;
}
inline bool isValid() const { return InfoType != IT_None; }
inline bool isState() const { return InfoType == IT_State; }
inline bool isVarTest() const { return InfoType == IT_VarTest; }
inline bool isBinTest() const { return InfoType == IT_BinTest; }
inline bool isVar() const { return InfoType == IT_Var; }
inline bool isTmp() const { return InfoType == IT_Tmp; }
bool isTest() const {
return InfoType == IT_VarTest || InfoType == IT_BinTest;
}
bool isPointerToValue() const {
return InfoType == IT_Var || InfoType == IT_Tmp;
}
PropagationInfo invertTest() const {
assert(InfoType == IT_VarTest || InfoType == IT_BinTest);
if (InfoType == IT_VarTest) {
return PropagationInfo(VarTest.Var,
invertConsumedUnconsumed(VarTest.TestsFor));
} else if (InfoType == IT_BinTest) {
return PropagationInfo(BinTest.Source,
BinTest.EOp == EO_And ? EO_Or : EO_And,
BinTest.LTest.Var, invertConsumedUnconsumed(BinTest.LTest.TestsFor),
BinTest.RTest.Var, invertConsumedUnconsumed(BinTest.RTest.TestsFor));
} else {
return PropagationInfo();
}
}
};
static inline void
setStateForVarOrTmp(ConsumedStateMap *StateMap, const PropagationInfo &PInfo,
ConsumedState State) {
assert(PInfo.isVar() || PInfo.isTmp());
if (PInfo.isVar())
StateMap->setState(PInfo.getVar(), State);
else
StateMap->setState(PInfo.getTmp(), State);
}
class ConsumedStmtVisitor : public ConstStmtVisitor<ConsumedStmtVisitor> {
typedef llvm::DenseMap<const Stmt *, PropagationInfo> MapType;
typedef std::pair<const Stmt *, PropagationInfo> PairType;
typedef MapType::iterator InfoEntry;
typedef MapType::const_iterator ConstInfoEntry;
AnalysisDeclContext &AC;
ConsumedAnalyzer &Analyzer;
ConsumedStateMap *StateMap;
MapType PropagationMap;
InfoEntry findInfo(const Expr *E) {
return PropagationMap.find(E->IgnoreParens());
}
ConstInfoEntry findInfo(const Expr *E) const {
return PropagationMap.find(E->IgnoreParens());
}
void insertInfo(const Expr *E, const PropagationInfo &PI) {
PropagationMap.insert(PairType(E->IgnoreParens(), PI));
}
void forwardInfo(const Expr *From, const Expr *To);
void copyInfo(const Expr *From, const Expr *To, ConsumedState CS);
ConsumedState getInfo(const Expr *From);
void setInfo(const Expr *To, ConsumedState NS);
void propagateReturnType(const Expr *Call, const FunctionDecl *Fun);
public:
void checkCallability(const PropagationInfo &PInfo,
const FunctionDecl *FunDecl,
SourceLocation BlameLoc);
bool handleCall(const CallExpr *Call, const Expr *ObjArg,
const FunctionDecl *FunD);
void VisitBinaryOperator(const BinaryOperator *BinOp);
void VisitCallExpr(const CallExpr *Call);
void VisitCastExpr(const CastExpr *Cast);
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Temp);
void VisitCXXConstructExpr(const CXXConstructExpr *Call);
void VisitCXXMemberCallExpr(const CXXMemberCallExpr *Call);
void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *Call);
void VisitDeclRefExpr(const DeclRefExpr *DeclRef);
void VisitDeclStmt(const DeclStmt *DelcS);
void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Temp);
void VisitMemberExpr(const MemberExpr *MExpr);
void VisitParmVarDecl(const ParmVarDecl *Param);
void VisitReturnStmt(const ReturnStmt *Ret);
void VisitUnaryOperator(const UnaryOperator *UOp);
void VisitVarDecl(const VarDecl *Var);
ConsumedStmtVisitor(AnalysisDeclContext &AC, ConsumedAnalyzer &Analyzer,
ConsumedStateMap *StateMap)
: AC(AC), Analyzer(Analyzer), StateMap(StateMap) {}
PropagationInfo getInfo(const Expr *StmtNode) const {
ConstInfoEntry Entry = findInfo(StmtNode);
if (Entry != PropagationMap.end())
return Entry->second;
else
return PropagationInfo();
}
void reset(ConsumedStateMap *NewStateMap) {
StateMap = NewStateMap;
}
};
void ConsumedStmtVisitor::forwardInfo(const Expr *From, const Expr *To) {
InfoEntry Entry = findInfo(From);
if (Entry != PropagationMap.end())
insertInfo(To, Entry->second);
}
// Create a new state for To, which is initialized to the state of From.
// If NS is not CS_None, sets the state of From to NS.
void ConsumedStmtVisitor::copyInfo(const Expr *From, const Expr *To,
ConsumedState NS) {
InfoEntry Entry = findInfo(From);
if (Entry != PropagationMap.end()) {
PropagationInfo& PInfo = Entry->second;
ConsumedState CS = PInfo.getAsState(StateMap);
if (CS != CS_None)
insertInfo(To, PropagationInfo(CS));
if (NS != CS_None && PInfo.isPointerToValue())
setStateForVarOrTmp(StateMap, PInfo, NS);
}
}
// Get the ConsumedState for From
ConsumedState ConsumedStmtVisitor::getInfo(const Expr *From) {
InfoEntry Entry = findInfo(From);
if (Entry != PropagationMap.end()) {
PropagationInfo& PInfo = Entry->second;
return PInfo.getAsState(StateMap);
}
return CS_None;
}
// If we already have info for To then update it, otherwise create a new entry.
void ConsumedStmtVisitor::setInfo(const Expr *To, ConsumedState NS) {
InfoEntry Entry = findInfo(To);
if (Entry != PropagationMap.end()) {
PropagationInfo& PInfo = Entry->second;
if (PInfo.isPointerToValue())
setStateForVarOrTmp(StateMap, PInfo, NS);
} else if (NS != CS_None) {
insertInfo(To, PropagationInfo(NS));
}
}
void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
const FunctionDecl *FunDecl,
SourceLocation BlameLoc) {
assert(!PInfo.isTest());
const CallableWhenAttr *CWAttr = FunDecl->getAttr<CallableWhenAttr>();
if (!CWAttr)
return;
if (PInfo.isVar()) {
ConsumedState VarState = StateMap->getState(PInfo.getVar());
if (VarState == CS_None || isCallableInState(CWAttr, VarState))
return;
Analyzer.WarningsHandler.warnUseInInvalidState(
FunDecl->getNameAsString(), PInfo.getVar()->getNameAsString(),
stateToString(VarState), BlameLoc);
} else {
ConsumedState TmpState = PInfo.getAsState(StateMap);
if (TmpState == CS_None || isCallableInState(CWAttr, TmpState))
return;
Analyzer.WarningsHandler.warnUseOfTempInInvalidState(
FunDecl->getNameAsString(), stateToString(TmpState), BlameLoc);
}
}
// Factors out common behavior for function, method, and operator calls.
// Check parameters and set parameter state if necessary.
// Returns true if the state of ObjArg is set, or false otherwise.
bool ConsumedStmtVisitor::handleCall(const CallExpr *Call, const Expr *ObjArg,
const FunctionDecl *FunD) {
unsigned Offset = 0;
if (isa<CXXOperatorCallExpr>(Call) && isa<CXXMethodDecl>(FunD))
Offset = 1; // first argument is 'this'
// check explicit parameters
for (unsigned Index = Offset; Index < Call->getNumArgs(); ++Index) {
// Skip variable argument lists.
if (Index - Offset >= FunD->getNumParams())
break;
const ParmVarDecl *Param = FunD->getParamDecl(Index - Offset);
QualType ParamType = Param->getType();
InfoEntry Entry = findInfo(Call->getArg(Index));
if (Entry == PropagationMap.end() || Entry->second.isTest())
continue;
PropagationInfo PInfo = Entry->second;
// Check that the parameter is in the correct state.
if (ParamTypestateAttr *PTA = Param->getAttr<ParamTypestateAttr>()) {
ConsumedState ParamState = PInfo.getAsState(StateMap);
ConsumedState ExpectedState = mapParamTypestateAttrState(PTA);
if (ParamState != ExpectedState)
Analyzer.WarningsHandler.warnParamTypestateMismatch(
Call->getArg(Index)->getExprLoc(),
stateToString(ExpectedState), stateToString(ParamState));
}
if (!(Entry->second.isVar() || Entry->second.isTmp()))
continue;
// Adjust state on the caller side.
if (isRValueRef(ParamType))
setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Consumed);
else if (ReturnTypestateAttr *RT = Param->getAttr<ReturnTypestateAttr>())
setStateForVarOrTmp(StateMap, PInfo, mapReturnTypestateAttrState(RT));
else if (isPointerOrRef(ParamType) &&
(!ParamType->getPointeeType().isConstQualified() ||
isSetOnReadPtrType(ParamType)))
setStateForVarOrTmp(StateMap, PInfo, consumed::CS_Unknown);
}
if (!ObjArg)
return false;
// check implicit 'self' parameter, if present
InfoEntry Entry = findInfo(ObjArg);
if (Entry != PropagationMap.end()) {
PropagationInfo PInfo = Entry->second;
checkCallability(PInfo, FunD, Call->getExprLoc());
if (SetTypestateAttr *STA = FunD->getAttr<SetTypestateAttr>()) {
if (PInfo.isVar()) {
StateMap->setState(PInfo.getVar(), mapSetTypestateAttrState(STA));
return true;
}
else if (PInfo.isTmp()) {
StateMap->setState(PInfo.getTmp(), mapSetTypestateAttrState(STA));
return true;
}
}
else if (isTestingFunction(FunD) && PInfo.isVar()) {
PropagationMap.insert(PairType(Call,
PropagationInfo(PInfo.getVar(), testsFor(FunD))));
}
}
return false;
}
void ConsumedStmtVisitor::propagateReturnType(const Expr *Call,
const FunctionDecl *Fun) {
QualType RetType = Fun->getCallResultType();
if (RetType->isReferenceType())
RetType = RetType->getPointeeType();
if (isConsumableType(RetType)) {
ConsumedState ReturnState;
if (ReturnTypestateAttr *RTA = Fun->getAttr<ReturnTypestateAttr>())
ReturnState = mapReturnTypestateAttrState(RTA);
else
ReturnState = mapConsumableAttrState(RetType);
PropagationMap.insert(PairType(Call, PropagationInfo(ReturnState)));
}
}
void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
switch (BinOp->getOpcode()) {
case BO_LAnd:
case BO_LOr : {
InfoEntry LEntry = findInfo(BinOp->getLHS()),
REntry = findInfo(BinOp->getRHS());
VarTestResult LTest, RTest;
if (LEntry != PropagationMap.end() && LEntry->second.isVarTest()) {
LTest = LEntry->second.getVarTest();
} else {
LTest.Var = nullptr;
LTest.TestsFor = CS_None;
}
if (REntry != PropagationMap.end() && REntry->second.isVarTest()) {
RTest = REntry->second.getVarTest();
} else {
RTest.Var = nullptr;
RTest.TestsFor = CS_None;
}
if (!(LTest.Var == nullptr && RTest.Var == nullptr))
PropagationMap.insert(PairType(BinOp, PropagationInfo(BinOp,
static_cast<EffectiveOp>(BinOp->getOpcode() == BO_LOr), LTest, RTest)));
break;
}
case BO_PtrMemD:
case BO_PtrMemI:
forwardInfo(BinOp->getLHS(), BinOp);
break;
default:
break;
}
}
void ConsumedStmtVisitor::VisitCallExpr(const CallExpr *Call) {
const FunctionDecl *FunDecl = Call->getDirectCallee();
if (!FunDecl)
return;
// Special case for the std::move function.
// TODO: Make this more specific. (Deferred)
if (Call->getNumArgs() == 1 && FunDecl->getNameAsString() == "move" &&
FunDecl->isInStdNamespace()) {
copyInfo(Call->getArg(0), Call, CS_Consumed);
return;
}
handleCall(Call, nullptr, FunDecl);
propagateReturnType(Call, FunDecl);
}
void ConsumedStmtVisitor::VisitCastExpr(const CastExpr *Cast) {
forwardInfo(Cast->getSubExpr(), Cast);
}
void ConsumedStmtVisitor::VisitCXXBindTemporaryExpr(
const CXXBindTemporaryExpr *Temp) {
InfoEntry Entry = findInfo(Temp->getSubExpr());
if (Entry != PropagationMap.end() && !Entry->second.isTest()) {
StateMap->setState(Temp, Entry->second.getAsState(StateMap));
PropagationMap.insert(PairType(Temp, PropagationInfo(Temp)));
}
}
void ConsumedStmtVisitor::VisitCXXConstructExpr(const CXXConstructExpr *Call) {
CXXConstructorDecl *Constructor = Call->getConstructor();
ASTContext &CurrContext = AC.getASTContext();
QualType ThisType = Constructor->getThisType(CurrContext)->getPointeeType();
if (!isConsumableType(ThisType))
return;
// FIXME: What should happen if someone annotates the move constructor?
if (ReturnTypestateAttr *RTA = Constructor->getAttr<ReturnTypestateAttr>()) {
// TODO: Adjust state of args appropriately.
ConsumedState RetState = mapReturnTypestateAttrState(RTA);
PropagationMap.insert(PairType(Call, PropagationInfo(RetState)));
} else if (Constructor->isDefaultConstructor()) {
PropagationMap.insert(PairType(Call,
PropagationInfo(consumed::CS_Consumed)));
} else if (Constructor->isMoveConstructor()) {
copyInfo(Call->getArg(0), Call, CS_Consumed);
} else if (Constructor->isCopyConstructor()) {
// Copy state from arg. If setStateOnRead then set arg to CS_Unknown.
ConsumedState NS =
isSetOnReadPtrType(Constructor->getThisType(CurrContext)) ?
CS_Unknown : CS_None;
copyInfo(Call->getArg(0), Call, NS);
} else {
// TODO: Adjust state of args appropriately.
ConsumedState RetState = mapConsumableAttrState(ThisType);
PropagationMap.insert(PairType(Call, PropagationInfo(RetState)));
}
}
void ConsumedStmtVisitor::VisitCXXMemberCallExpr(
const CXXMemberCallExpr *Call) {
CXXMethodDecl* MD = Call->getMethodDecl();
if (!MD)
return;
handleCall(Call, Call->getImplicitObjectArgument(), MD);
propagateReturnType(Call, MD);
}
void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
const CXXOperatorCallExpr *Call) {
const FunctionDecl *FunDecl =
dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee());
if (!FunDecl) return;
if (Call->getOperator() == OO_Equal) {
ConsumedState CS = getInfo(Call->getArg(1));
if (!handleCall(Call, Call->getArg(0), FunDecl))
setInfo(Call->getArg(0), CS);
return;
}
if (const CXXMemberCallExpr *MCall = dyn_cast<CXXMemberCallExpr>(Call))
handleCall(MCall, MCall->getImplicitObjectArgument(), FunDecl);
else
handleCall(Call, Call->getArg(0), FunDecl);
propagateReturnType(Call, FunDecl);
}
void ConsumedStmtVisitor::VisitDeclRefExpr(const DeclRefExpr *DeclRef) {
if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclRef->getDecl()))
if (StateMap->getState(Var) != consumed::CS_None)
PropagationMap.insert(PairType(DeclRef, PropagationInfo(Var)));
}
void ConsumedStmtVisitor::VisitDeclStmt(const DeclStmt *DeclS) {
for (const auto *DI : DeclS->decls())
if (isa<VarDecl>(DI))
VisitVarDecl(cast<VarDecl>(DI));
if (DeclS->isSingleDecl())
if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()))
PropagationMap.insert(PairType(DeclS, PropagationInfo(Var)));
}
void ConsumedStmtVisitor::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *Temp) {
forwardInfo(Temp->GetTemporaryExpr(), Temp);
}
void ConsumedStmtVisitor::VisitMemberExpr(const MemberExpr *MExpr) {
forwardInfo(MExpr->getBase(), MExpr);
}
void ConsumedStmtVisitor::VisitParmVarDecl(const ParmVarDecl *Param) {
QualType ParamType = Param->getType();
ConsumedState ParamState = consumed::CS_None;
if (const ParamTypestateAttr *PTA = Param->getAttr<ParamTypestateAttr>())
ParamState = mapParamTypestateAttrState(PTA);
else if (isConsumableType(ParamType))
ParamState = mapConsumableAttrState(ParamType);
else if (isRValueRef(ParamType) &&
isConsumableType(ParamType->getPointeeType()))
ParamState = mapConsumableAttrState(ParamType->getPointeeType());
else if (ParamType->isReferenceType() &&
isConsumableType(ParamType->getPointeeType()))
ParamState = consumed::CS_Unknown;
if (ParamState != CS_None)
StateMap->setState(Param, ParamState);
}
void ConsumedStmtVisitor::VisitReturnStmt(const ReturnStmt *Ret) {
ConsumedState ExpectedState = Analyzer.getExpectedReturnState();
if (ExpectedState != CS_None) {
InfoEntry Entry = findInfo(Ret->getRetValue());
if (Entry != PropagationMap.end()) {
ConsumedState RetState = Entry->second.getAsState(StateMap);
if (RetState != ExpectedState)
Analyzer.WarningsHandler.warnReturnTypestateMismatch(
Ret->getReturnLoc(), stateToString(ExpectedState),
stateToString(RetState));
}
}
StateMap->checkParamsForReturnTypestate(Ret->getLocStart(),
Analyzer.WarningsHandler);
}
void ConsumedStmtVisitor::VisitUnaryOperator(const UnaryOperator *UOp) {
InfoEntry Entry = findInfo(UOp->getSubExpr());
if (Entry == PropagationMap.end()) return;
switch (UOp->getOpcode()) {
case UO_AddrOf:
PropagationMap.insert(PairType(UOp, Entry->second));
break;
case UO_LNot:
if (Entry->second.isTest())
PropagationMap.insert(PairType(UOp, Entry->second.invertTest()));
break;
default:
break;
}
}
// TODO: See if I need to check for reference types here.
void ConsumedStmtVisitor::VisitVarDecl(const VarDecl *Var) {
if (isConsumableType(Var->getType())) {
if (Var->hasInit()) {
MapType::iterator VIT = findInfo(Var->getInit()->IgnoreImplicit());
if (VIT != PropagationMap.end()) {
PropagationInfo PInfo = VIT->second;
ConsumedState St = PInfo.getAsState(StateMap);
if (St != consumed::CS_None) {
StateMap->setState(Var, St);
return;
}
}
}
// Otherwise
StateMap->setState(Var, consumed::CS_Unknown);
}
}
}} // end clang::consumed::ConsumedStmtVisitor
namespace clang {
namespace consumed {
static void splitVarStateForIf(const IfStmt *IfNode, const VarTestResult &Test,
ConsumedStateMap *ThenStates,
ConsumedStateMap *ElseStates) {
ConsumedState VarState = ThenStates->getState(Test.Var);
if (VarState == CS_Unknown) {
ThenStates->setState(Test.Var, Test.TestsFor);
ElseStates->setState(Test.Var, invertConsumedUnconsumed(Test.TestsFor));
} else if (VarState == invertConsumedUnconsumed(Test.TestsFor)) {
ThenStates->markUnreachable();
} else if (VarState == Test.TestsFor) {
ElseStates->markUnreachable();
}
}
static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
ConsumedStateMap *ThenStates,
ConsumedStateMap *ElseStates) {
const VarTestResult <est = PInfo.getLTest(),
&RTest = PInfo.getRTest();
ConsumedState LState = LTest.Var ? ThenStates->getState(LTest.Var) : CS_None,
RState = RTest.Var ? ThenStates->getState(RTest.Var) : CS_None;
if (LTest.Var) {
if (PInfo.testEffectiveOp() == EO_And) {
if (LState == CS_Unknown) {
ThenStates->setState(LTest.Var, LTest.TestsFor);
} else if (LState == invertConsumedUnconsumed(LTest.TestsFor)) {
ThenStates->markUnreachable();
} else if (LState == LTest.TestsFor && isKnownState(RState)) {
if (RState == RTest.TestsFor)
ElseStates->markUnreachable();
else
ThenStates->markUnreachable();
}
} else {
if (LState == CS_Unknown) {
ElseStates->setState(LTest.Var,
invertConsumedUnconsumed(LTest.TestsFor));
} else if (LState == LTest.TestsFor) {
ElseStates->markUnreachable();
} else if (LState == invertConsumedUnconsumed(LTest.TestsFor) &&
isKnownState(RState)) {
if (RState == RTest.TestsFor)
ElseStates->markUnreachable();
else
ThenStates->markUnreachable();
}
}
}
if (RTest.Var) {
if (PInfo.testEffectiveOp() == EO_And) {
if (RState == CS_Unknown)
ThenStates->setState(RTest.Var, RTest.TestsFor);
else if (RState == invertConsumedUnconsumed(RTest.TestsFor))
ThenStates->markUnreachable();
} else {
if (RState == CS_Unknown)
ElseStates->setState(RTest.Var,
invertConsumedUnconsumed(RTest.TestsFor));
else if (RState == RTest.TestsFor)
ElseStates->markUnreachable();
}
}
}
bool ConsumedBlockInfo::allBackEdgesVisited(const CFGBlock *CurrBlock,
const CFGBlock *TargetBlock) {
assert(CurrBlock && "Block pointer must not be NULL");
assert(TargetBlock && "TargetBlock pointer must not be NULL");
unsigned int CurrBlockOrder = VisitOrder[CurrBlock->getBlockID()];
for (CFGBlock::const_pred_iterator PI = TargetBlock->pred_begin(),
PE = TargetBlock->pred_end(); PI != PE; ++PI) {
if (*PI && CurrBlockOrder < VisitOrder[(*PI)->getBlockID()] )
return false;
}
return true;
}
void ConsumedBlockInfo::addInfo(const CFGBlock *Block,
ConsumedStateMap *StateMap,
bool &AlreadyOwned) {
assert(Block && "Block pointer must not be NULL");
ConsumedStateMap *Entry = StateMapsArray[Block->getBlockID()];
if (Entry) {
Entry->intersect(StateMap);
} else if (AlreadyOwned) {
StateMapsArray[Block->getBlockID()] = new ConsumedStateMap(*StateMap);
} else {
StateMapsArray[Block->getBlockID()] = StateMap;
AlreadyOwned = true;
}
}
void ConsumedBlockInfo::addInfo(const CFGBlock *Block,
ConsumedStateMap *StateMap) {
assert(Block && "Block pointer must not be NULL");
ConsumedStateMap *Entry = StateMapsArray[Block->getBlockID()];
if (Entry) {
Entry->intersect(StateMap);
delete StateMap;
} else {
StateMapsArray[Block->getBlockID()] = StateMap;
}
}
ConsumedStateMap* ConsumedBlockInfo::borrowInfo(const CFGBlock *Block) {
assert(Block && "Block pointer must not be NULL");
assert(StateMapsArray[Block->getBlockID()] && "Block has no block info");
return StateMapsArray[Block->getBlockID()];
}
void ConsumedBlockInfo::discardInfo(const CFGBlock *Block) {
unsigned int BlockID = Block->getBlockID();
delete StateMapsArray[BlockID];
StateMapsArray[BlockID] = nullptr;
}
ConsumedStateMap* ConsumedBlockInfo::getInfo(const CFGBlock *Block) {
assert(Block && "Block pointer must not be NULL");
ConsumedStateMap *StateMap = StateMapsArray[Block->getBlockID()];
if (isBackEdgeTarget(Block)) {
return new ConsumedStateMap(*StateMap);
} else {
StateMapsArray[Block->getBlockID()] = nullptr;
return StateMap;
}
}
bool ConsumedBlockInfo::isBackEdge(const CFGBlock *From, const CFGBlock *To) {
assert(From && "From block must not be NULL");
assert(To && "From block must not be NULL");
return VisitOrder[From->getBlockID()] > VisitOrder[To->getBlockID()];
}
bool ConsumedBlockInfo::isBackEdgeTarget(const CFGBlock *Block) {
assert(Block && "Block pointer must not be NULL");
// Anything with less than two predecessors can't be the target of a back
// edge.
if (Block->pred_size() < 2)
return false;
unsigned int BlockVisitOrder = VisitOrder[Block->getBlockID()];
for (CFGBlock::const_pred_iterator PI = Block->pred_begin(),
PE = Block->pred_end(); PI != PE; ++PI) {
if (*PI && BlockVisitOrder < VisitOrder[(*PI)->getBlockID()])
return true;
}
return false;
}
void ConsumedStateMap::checkParamsForReturnTypestate(SourceLocation BlameLoc,
ConsumedWarningsHandlerBase &WarningsHandler) const {
for (const auto &DM : VarMap) {
if (isa<ParmVarDecl>(DM.first)) {
const ParmVarDecl *Param = cast<ParmVarDecl>(DM.first);
const ReturnTypestateAttr *RTA = Param->getAttr<ReturnTypestateAttr>();
if (!RTA)
continue;
ConsumedState ExpectedState = mapReturnTypestateAttrState(RTA);
if (DM.second != ExpectedState)
WarningsHandler.warnParamReturnTypestateMismatch(BlameLoc,
Param->getNameAsString(), stateToString(ExpectedState),
stateToString(DM.second));
}
}
}
void ConsumedStateMap::clearTemporaries() {
TmpMap.clear();
}
ConsumedState ConsumedStateMap::getState(const VarDecl *Var) const {
VarMapType::const_iterator Entry = VarMap.find(Var);
if (Entry != VarMap.end())
return Entry->second;
return CS_None;
}
ConsumedState
ConsumedStateMap::getState(const CXXBindTemporaryExpr *Tmp) const {
TmpMapType::const_iterator Entry = TmpMap.find(Tmp);
if (Entry != TmpMap.end())
return Entry->second;
return CS_None;
}
void ConsumedStateMap::intersect(const ConsumedStateMap *Other) {
ConsumedState LocalState;
if (this->From && this->From == Other->From && !Other->Reachable) {
this->markUnreachable();
return;
}
for (const auto &DM : Other->VarMap) {
LocalState = this->getState(DM.first);
if (LocalState == CS_None)
continue;
if (LocalState != DM.second)
VarMap[DM.first] = CS_Unknown;
}
}
void ConsumedStateMap::intersectAtLoopHead(const CFGBlock *LoopHead,
const CFGBlock *LoopBack, const ConsumedStateMap *LoopBackStates,
ConsumedWarningsHandlerBase &WarningsHandler) {
ConsumedState LocalState;
SourceLocation BlameLoc = getLastStmtLoc(LoopBack);
for (const auto &DM : LoopBackStates->VarMap) {
LocalState = this->getState(DM.first);
if (LocalState == CS_None)
continue;
if (LocalState != DM.second) {
VarMap[DM.first] = CS_Unknown;
WarningsHandler.warnLoopStateMismatch(BlameLoc,
DM.first->getNameAsString());
}
}
}
void ConsumedStateMap::markUnreachable() {
this->Reachable = false;
VarMap.clear();
TmpMap.clear();
}
void ConsumedStateMap::setState(const VarDecl *Var, ConsumedState State) {
VarMap[Var] = State;
}
void ConsumedStateMap::setState(const CXXBindTemporaryExpr *Tmp,
ConsumedState State) {
TmpMap[Tmp] = State;
}
void ConsumedStateMap::remove(const CXXBindTemporaryExpr *Tmp) {
TmpMap.erase(Tmp);
}
bool ConsumedStateMap::operator!=(const ConsumedStateMap *Other) const {
for (const auto &DM : Other->VarMap)
if (this->getState(DM.first) != DM.second)
return true;
return false;
}
void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
const FunctionDecl *D) {
QualType ReturnType;
if (const CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
ASTContext &CurrContext = AC.getASTContext();
ReturnType = Constructor->getThisType(CurrContext)->getPointeeType();
} else
ReturnType = D->getCallResultType();
if (const ReturnTypestateAttr *RTSAttr = D->getAttr<ReturnTypestateAttr>()) {
const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
if (!RD || !RD->hasAttr<ConsumableAttr>()) {
// FIXME: This should be removed when template instantiation propagates
// attributes at template specialization definition, not
// declaration. When it is removed the test needs to be enabled
// in SemaDeclAttr.cpp.
WarningsHandler.warnReturnTypestateForUnconsumableType(
RTSAttr->getLocation(), ReturnType.getAsString());
ExpectedReturnState = CS_None;
} else
ExpectedReturnState = mapReturnTypestateAttrState(RTSAttr);
} else if (isConsumableType(ReturnType)) {
if (isAutoCastType(ReturnType)) // We can auto-cast the state to the
ExpectedReturnState = CS_None; // expected state.
else
ExpectedReturnState = mapConsumableAttrState(ReturnType);
}
else
ExpectedReturnState = CS_None;
}
bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
const ConsumedStmtVisitor &Visitor) {
std::unique_ptr<ConsumedStateMap> FalseStates(
new ConsumedStateMap(*CurrStates));
PropagationInfo PInfo;
if (const IfStmt *IfNode =
dyn_cast_or_null<IfStmt>(CurrBlock->getTerminator().getStmt())) {
const Expr *Cond = IfNode->getCond();
PInfo = Visitor.getInfo(Cond);
if (!PInfo.isValid() && isa<BinaryOperator>(Cond))
PInfo = Visitor.getInfo(cast<BinaryOperator>(Cond)->getRHS());
if (PInfo.isVarTest()) {
CurrStates->setSource(Cond);
FalseStates->setSource(Cond);
splitVarStateForIf(IfNode, PInfo.getVarTest(), CurrStates,
FalseStates.get());
} else if (PInfo.isBinTest()) {
CurrStates->setSource(PInfo.testSourceNode());
FalseStates->setSource(PInfo.testSourceNode());
splitVarStateForIfBinOp(PInfo, CurrStates, FalseStates.get());
} else {
return false;
}
} else if (const BinaryOperator *BinOp =
dyn_cast_or_null<BinaryOperator>(CurrBlock->getTerminator().getStmt())) {
PInfo = Visitor.getInfo(BinOp->getLHS());
if (!PInfo.isVarTest()) {
if ((BinOp = dyn_cast_or_null<BinaryOperator>(BinOp->getLHS()))) {
PInfo = Visitor.getInfo(BinOp->getRHS());
if (!PInfo.isVarTest())
return false;
} else {
return false;
}
}
CurrStates->setSource(BinOp);
FalseStates->setSource(BinOp);
const VarTestResult &Test = PInfo.getVarTest();
ConsumedState VarState = CurrStates->getState(Test.Var);
if (BinOp->getOpcode() == BO_LAnd) {
if (VarState == CS_Unknown)
CurrStates->setState(Test.Var, Test.TestsFor);
else if (VarState == invertConsumedUnconsumed(Test.TestsFor))
CurrStates->markUnreachable();
} else if (BinOp->getOpcode() == BO_LOr) {
if (VarState == CS_Unknown)
FalseStates->setState(Test.Var,
invertConsumedUnconsumed(Test.TestsFor));
else if (VarState == Test.TestsFor)
FalseStates->markUnreachable();
}
} else {
return false;
}
CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin();
if (*SI)
BlockInfo.addInfo(*SI, CurrStates);
else
delete CurrStates;
if (*++SI)
BlockInfo.addInfo(*SI, FalseStates.release());
CurrStates = nullptr;
return true;
}
void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
const FunctionDecl *D = dyn_cast_or_null<FunctionDecl>(AC.getDecl());
if (!D)
return;
CFG *CFGraph = AC.getCFG();
if (!CFGraph)
return;
determineExpectedReturnState(AC, D);
PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>();
// AC.getCFG()->viewCFG(LangOptions());
BlockInfo = ConsumedBlockInfo(CFGraph->getNumBlockIDs(), SortedGraph);
CurrStates = new ConsumedStateMap();
ConsumedStmtVisitor Visitor(AC, *this, CurrStates);
// Add all trackable parameters to the state map.
for (const auto *PI : D->params())
Visitor.VisitParmVarDecl(PI);
// Visit all of the function's basic blocks.
for (const auto *CurrBlock : *SortedGraph) {
if (!CurrStates)
CurrStates = BlockInfo.getInfo(CurrBlock);
if (!CurrStates) {
continue;
} else if (!CurrStates->isReachable()) {
delete CurrStates;
CurrStates = nullptr;
continue;
}
Visitor.reset(CurrStates);
// Visit all of the basic block's statements.
for (const auto &B : *CurrBlock) {
switch (B.getKind()) {
case CFGElement::Statement:
Visitor.Visit(B.castAs<CFGStmt>().getStmt());
break;
case CFGElement::TemporaryDtor: {
const CFGTemporaryDtor &DTor = B.castAs<CFGTemporaryDtor>();
const CXXBindTemporaryExpr *BTE = DTor.getBindTemporaryExpr();
Visitor.checkCallability(PropagationInfo(BTE),
DTor.getDestructorDecl(AC.getASTContext()),
BTE->getExprLoc());
CurrStates->remove(BTE);
break;
}
case CFGElement::AutomaticObjectDtor: {
const CFGAutomaticObjDtor &DTor = B.castAs<CFGAutomaticObjDtor>();
SourceLocation Loc = DTor.getTriggerStmt()->getLocEnd();
const VarDecl *Var = DTor.getVarDecl();
Visitor.checkCallability(PropagationInfo(Var),
DTor.getDestructorDecl(AC.getASTContext()),
Loc);
break;
}
default:
break;
}
}
// TODO: Handle other forms of branching with precision, including while-
// and for-loops. (Deferred)
if (!splitState(CurrBlock, Visitor)) {
CurrStates->setSource(nullptr);
if (CurrBlock->succ_size() > 1 ||
(CurrBlock->succ_size() == 1 &&
(*CurrBlock->succ_begin())->pred_size() > 1)) {
bool OwnershipTaken = false;
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
if (*SI == nullptr) continue;
if (BlockInfo.isBackEdge(CurrBlock, *SI)) {
BlockInfo.borrowInfo(*SI)->intersectAtLoopHead(*SI, CurrBlock,
CurrStates,
WarningsHandler);
if (BlockInfo.allBackEdgesVisited(CurrBlock, *SI))
BlockInfo.discardInfo(*SI);
} else {
BlockInfo.addInfo(*SI, CurrStates, OwnershipTaken);
}
}
if (!OwnershipTaken)
delete CurrStates;
CurrStates = nullptr;
}
}
if (CurrBlock == &AC.getCFG()->getExit() &&
D->getCallResultType()->isVoidType())
CurrStates->checkParamsForReturnTypestate(D->getLocation(),
WarningsHandler);
} // End of block iterator.
// Delete the last existing state map.
delete CurrStates;
WarningsHandler.emitDiagnostics();
}
}} // end namespace clang::consumed
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/LiveVariables.cpp | //=- LiveVariables.cpp - Live Variable Analysis for Source CFGs ----------*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements Live Variables analysis for source-level CFGs.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <vector>
using namespace clang;
namespace {
class DataflowWorklist {
SmallVector<const CFGBlock *, 20> worklist;
llvm::BitVector enqueuedBlocks;
PostOrderCFGView *POV;
public:
DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
: enqueuedBlocks(cfg.getNumBlockIDs()),
POV(Ctx.getAnalysis<PostOrderCFGView>()) {}
void enqueueBlock(const CFGBlock *block);
void enqueuePredecessors(const CFGBlock *block);
const CFGBlock *dequeue();
void sortWorklist();
};
}
void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) {
if (block && !enqueuedBlocks[block->getBlockID()]) {
enqueuedBlocks[block->getBlockID()] = true;
worklist.push_back(block);
}
}
void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
const unsigned OldWorklistSize = worklist.size();
for (CFGBlock::const_pred_iterator I = block->pred_begin(),
E = block->pred_end(); I != E; ++I) {
enqueueBlock(*I);
}
if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
return;
sortWorklist();
}
void DataflowWorklist::sortWorklist() {
std::sort(worklist.begin(), worklist.end(), POV->getComparator());
}
const CFGBlock *DataflowWorklist::dequeue() {
if (worklist.empty())
return nullptr;
const CFGBlock *b = worklist.pop_back_val();
enqueuedBlocks[b->getBlockID()] = false;
return b;
}
namespace {
class LiveVariablesImpl {
public:
AnalysisDeclContext &analysisContext;
llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksBeginToLiveness;
llvm::DenseMap<const Stmt *, LiveVariables::LivenessValues> stmtsToLiveness;
llvm::DenseMap<const DeclRefExpr *, unsigned> inAssignment;
const bool killAtAssign;
LiveVariables::LivenessValues
merge(LiveVariables::LivenessValues valsA,
LiveVariables::LivenessValues valsB);
LiveVariables::LivenessValues
runOnBlock(const CFGBlock *block, LiveVariables::LivenessValues val,
LiveVariables::Observer *obs = nullptr);
void dumpBlockLiveness(const SourceManager& M);
LiveVariablesImpl(AnalysisDeclContext &ac, bool KillAtAssign)
: analysisContext(ac),
SSetFact(false), // Do not canonicalize ImmutableSets by default.
DSetFact(false), // This is a *major* performance win.
killAtAssign(KillAtAssign) {}
};
}
static LiveVariablesImpl &getImpl(void *x) {
return *((LiveVariablesImpl *) x);
}
//===----------------------------------------------------------------------===//
// Operations and queries on LivenessValues.
//===----------------------------------------------------------------------===//
bool LiveVariables::LivenessValues::isLive(const Stmt *S) const {
return liveStmts.contains(S);
}
bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
return liveDecls.contains(D);
}
namespace {
template <typename SET>
SET mergeSets(SET A, SET B) {
if (A.isEmpty())
return B;
for (typename SET::iterator it = B.begin(), ei = B.end(); it != ei; ++it) {
A = A.add(*it);
}
return A;
}
}
void LiveVariables::Observer::anchor() { }
LiveVariables::LivenessValues
LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
LiveVariables::LivenessValues valsB) {
llvm::ImmutableSetRef<const Stmt *>
SSetRefA(valsA.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory()),
SSetRefB(valsB.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory());
llvm::ImmutableSetRef<const VarDecl *>
DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()),
DSetRefB(valsB.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory());
SSetRefA = mergeSets(SSetRefA, SSetRefB);
DSetRefA = mergeSets(DSetRefA, DSetRefB);
// asImmutableSet() canonicalizes the tree, allowing us to do an easy
// comparison afterwards.
return LiveVariables::LivenessValues(SSetRefA.asImmutableSet(),
DSetRefA.asImmutableSet());
}
bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const {
return liveStmts == V.liveStmts && liveDecls == V.liveDecls;
}
//===----------------------------------------------------------------------===//
// Query methods.
//===----------------------------------------------------------------------===//
static bool isAlwaysAlive(const VarDecl *D) {
return D->hasGlobalStorage();
}
bool LiveVariables::isLive(const CFGBlock *B, const VarDecl *D) {
return isAlwaysAlive(D) || getImpl(impl).blocksEndToLiveness[B].isLive(D);
}
bool LiveVariables::isLive(const Stmt *S, const VarDecl *D) {
return isAlwaysAlive(D) || getImpl(impl).stmtsToLiveness[S].isLive(D);
}
bool LiveVariables::isLive(const Stmt *Loc, const Stmt *S) {
return getImpl(impl).stmtsToLiveness[Loc].isLive(S);
}
//===----------------------------------------------------------------------===//
// Dataflow computation.
//===----------------------------------------------------------------------===//
namespace {
class TransferFunctions : public StmtVisitor<TransferFunctions> {
LiveVariablesImpl &LV;
LiveVariables::LivenessValues &val;
LiveVariables::Observer *observer;
const CFGBlock *currentBlock;
public:
TransferFunctions(LiveVariablesImpl &im,
LiveVariables::LivenessValues &Val,
LiveVariables::Observer *Observer,
const CFGBlock *CurrentBlock)
: LV(im), val(Val), observer(Observer), currentBlock(CurrentBlock) {}
void VisitBinaryOperator(BinaryOperator *BO);
void VisitBlockExpr(BlockExpr *BE);
void VisitDeclRefExpr(DeclRefExpr *DR);
void VisitDeclStmt(DeclStmt *DS);
void VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS);
void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE);
void VisitUnaryOperator(UnaryOperator *UO);
void Visit(Stmt *S);
};
}
static const VariableArrayType *FindVA(QualType Ty) {
const Type *ty = Ty.getTypePtr();
while (const ArrayType *VT = dyn_cast<ArrayType>(ty)) {
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(VT))
if (VAT->getSizeExpr())
return VAT;
ty = VT->getElementType().getTypePtr();
}
return nullptr;
}
static const Stmt *LookThroughStmt(const Stmt *S) {
while (S) {
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreParens();
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) {
S = EWC->getSubExpr();
continue;
}
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
S = OVE->getSourceExpr();
continue;
}
break;
}
return S;
}
static void AddLiveStmt(llvm::ImmutableSet<const Stmt *> &Set,
llvm::ImmutableSet<const Stmt *>::Factory &F,
const Stmt *S) {
Set = F.add(Set, LookThroughStmt(S));
}
void TransferFunctions::Visit(Stmt *S) {
if (observer)
observer->observeStmt(S, currentBlock, val);
StmtVisitor<TransferFunctions>::Visit(S);
if (isa<Expr>(S)) {
val.liveStmts = LV.SSetFact.remove(val.liveStmts, S);
}
// Mark all children expressions live.
switch (S->getStmtClass()) {
default:
break;
case Stmt::StmtExprClass: {
// For statement expressions, look through the compound statement.
S = cast<StmtExpr>(S)->getSubStmt();
break;
}
case Stmt::CXXMemberCallExprClass: {
// Include the implicit "this" pointer as being live.
CXXMemberCallExpr *CE = cast<CXXMemberCallExpr>(S);
if (Expr *ImplicitObj = CE->getImplicitObjectArgument()) {
AddLiveStmt(val.liveStmts, LV.SSetFact, ImplicitObj);
}
break;
}
case Stmt::ObjCMessageExprClass: {
// In calls to super, include the implicit "self" pointer as being live.
ObjCMessageExpr *CE = cast<ObjCMessageExpr>(S);
if (CE->getReceiverKind() == ObjCMessageExpr::SuperInstance)
val.liveDecls = LV.DSetFact.add(val.liveDecls,
LV.analysisContext.getSelfDecl());
break;
}
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
for (const VariableArrayType* VA = FindVA(VD->getType());
VA != nullptr; VA = FindVA(VA->getElementType())) {
AddLiveStmt(val.liveStmts, LV.SSetFact, VA->getSizeExpr());
}
}
break;
}
case Stmt::PseudoObjectExprClass: {
// A pseudo-object operation only directly consumes its result
// expression.
Expr *child = cast<PseudoObjectExpr>(S)->getResultExpr();
if (!child) return;
if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(child))
child = OV->getSourceExpr();
child = child->IgnoreParens();
val.liveStmts = LV.SSetFact.add(val.liveStmts, child);
return;
}
// FIXME: These cases eventually shouldn't be needed.
case Stmt::ExprWithCleanupsClass: {
S = cast<ExprWithCleanups>(S)->getSubExpr();
break;
}
case Stmt::CXXBindTemporaryExprClass: {
S = cast<CXXBindTemporaryExpr>(S)->getSubExpr();
break;
}
case Stmt::UnaryExprOrTypeTraitExprClass: {
// No need to unconditionally visit subexpressions.
return;
}
}
for (Stmt *Child : S->children()) {
if (Child)
AddLiveStmt(val.liveStmts, LV.SSetFact, Child);
}
}
void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
if (B->isAssignmentOp()) {
if (!LV.killAtAssign)
return;
// Assigning to a variable?
Expr *LHS = B->getLHS()->IgnoreParens();
if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS))
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Assignments to references don't kill the ref's address
if (VD->getType()->isReferenceType())
return;
if (!isAlwaysAlive(VD)) {
// The variable is now dead.
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
}
if (observer)
observer->observerKill(DR);
}
}
}
void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
for (const VarDecl *VD :
LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl())) {
if (isAlwaysAlive(VD))
continue;
val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
}
}
void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
if (!isAlwaysAlive(D) && LV.inAssignment.find(DR) == LV.inAssignment.end())
val.liveDecls = LV.DSetFact.add(val.liveDecls, D);
}
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
for (const auto *DI : DS->decls())
if (const auto *VD = dyn_cast<VarDecl>(DI)) {
if (!isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
}
}
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS) {
// Kill the iteration variable.
DeclRefExpr *DR = nullptr;
const VarDecl *VD = nullptr;
Stmt *element = OS->getElement();
if (DeclStmt *DS = dyn_cast<DeclStmt>(element)) {
VD = cast<VarDecl>(DS->getSingleDecl());
}
else if ((DR = dyn_cast<DeclRefExpr>(cast<Expr>(element)->IgnoreParens()))) {
VD = cast<VarDecl>(DR->getDecl());
}
if (VD) {
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
if (observer && DR)
observer->observerKill(DR);
}
}
void TransferFunctions::
VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE)
{
// While sizeof(var) doesn't technically extend the liveness of 'var', it
// does extent the liveness of metadata if 'var' is a VariableArrayType.
// We handle that special case here.
if (UE->getKind() != UETT_SizeOf || UE->isArgumentType())
return;
const Expr *subEx = UE->getArgumentExpr();
if (subEx->getType()->isVariableArrayType()) {
assert(subEx->isLValue());
val.liveStmts = LV.SSetFact.add(val.liveStmts, subEx->IgnoreParens());
}
}
void TransferFunctions::VisitUnaryOperator(UnaryOperator *UO) {
// Treat ++/-- as a kill.
// Note we don't actually have to do anything if we don't have an observer,
// since a ++/-- acts as both a kill and a "use".
if (!observer)
return;
switch (UO->getOpcode()) {
default:
return;
case UO_PostInc:
case UO_PostDec:
case UO_PreInc:
case UO_PreDec:
break;
}
if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens()))
if (isa<VarDecl>(DR->getDecl())) {
// Treat ++/-- as a kill.
observer->observerKill(DR);
}
}
LiveVariables::LivenessValues
LiveVariablesImpl::runOnBlock(const CFGBlock *block,
LiveVariables::LivenessValues val,
LiveVariables::Observer *obs) {
TransferFunctions TF(*this, val, obs, block);
// Visit the terminator (if any).
if (const Stmt *term = block->getTerminator())
TF.Visit(const_cast<Stmt*>(term));
// Apply the transfer function for all Stmts in the block.
for (CFGBlock::const_reverse_iterator it = block->rbegin(),
ei = block->rend(); it != ei; ++it) {
const CFGElement &elem = *it;
if (Optional<CFGAutomaticObjDtor> Dtor =
elem.getAs<CFGAutomaticObjDtor>()) {
val.liveDecls = DSetFact.add(val.liveDecls, Dtor->getVarDecl());
continue;
}
if (!elem.getAs<CFGStmt>())
continue;
const Stmt *S = elem.castAs<CFGStmt>().getStmt();
TF.Visit(const_cast<Stmt*>(S));
stmtsToLiveness[S] = val;
}
return val;
}
void LiveVariables::runOnAllBlocks(LiveVariables::Observer &obs) {
const CFG *cfg = getImpl(impl).analysisContext.getCFG();
for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it)
getImpl(impl).runOnBlock(*it, getImpl(impl).blocksEndToLiveness[*it], &obs);
}
LiveVariables::LiveVariables(void *im) : impl(im) {}
LiveVariables::~LiveVariables() {
delete (LiveVariablesImpl*) impl;
}
LiveVariables *
LiveVariables::computeLiveness(AnalysisDeclContext &AC,
bool killAtAssign) {
// No CFG? Bail out.
CFG *cfg = AC.getCFG();
if (!cfg)
return nullptr;
// The analysis currently has scalability issues for very large CFGs.
// Bail out if it looks too large.
if (cfg->getNumBlockIDs() > 300000)
return nullptr;
LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign);
// Construct the dataflow worklist. Enqueue the exit block as the
// start of the analysis.
DataflowWorklist worklist(*cfg, AC);
llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
// FIXME: we should enqueue using post order.
for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
const CFGBlock *block = *it;
worklist.enqueueBlock(block);
// FIXME: Scan for DeclRefExprs using in the LHS of an assignment.
// We need to do this because we lack context in the reverse analysis
// to determine if a DeclRefExpr appears in such a context, and thus
// doesn't constitute a "use".
if (killAtAssign)
for (CFGBlock::const_iterator bi = block->begin(), be = block->end();
bi != be; ++bi) {
if (Optional<CFGStmt> cs = bi->getAs<CFGStmt>()) {
if (const BinaryOperator *BO =
dyn_cast<BinaryOperator>(cs->getStmt())) {
if (BO->getOpcode() == BO_Assign) {
if (const DeclRefExpr *DR =
dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) {
LV->inAssignment[DR] = 1;
}
}
}
}
}
}
worklist.sortWorklist();
while (const CFGBlock *block = worklist.dequeue()) {
// Determine if the block's end value has changed. If not, we
// have nothing left to do for this block.
LivenessValues &prevVal = LV->blocksEndToLiveness[block];
// Merge the values of all successor blocks.
LivenessValues val;
for (CFGBlock::const_succ_iterator it = block->succ_begin(),
ei = block->succ_end(); it != ei; ++it) {
if (const CFGBlock *succ = *it) {
val = LV->merge(val, LV->blocksBeginToLiveness[succ]);
}
}
if (!everAnalyzedBlock[block->getBlockID()])
everAnalyzedBlock[block->getBlockID()] = true;
else if (prevVal.equals(val))
continue;
prevVal = val;
// Update the dataflow value for the start of this block.
LV->blocksBeginToLiveness[block] = LV->runOnBlock(block, val);
// Enqueue the value to the predecessors.
worklist.enqueuePredecessors(block);
}
return new LiveVariables(LV);
}
void LiveVariables::dumpBlockLiveness(const SourceManager &M) {
getImpl(impl).dumpBlockLiveness(M);
}
void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
std::vector<const CFGBlock *> vec;
for (llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues>::iterator
it = blocksEndToLiveness.begin(), ei = blocksEndToLiveness.end();
it != ei; ++it) {
vec.push_back(it->first);
}
std::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
return A->getBlockID() < B->getBlockID();
});
std::vector<const VarDecl*> declVec;
for (std::vector<const CFGBlock *>::iterator
it = vec.begin(), ei = vec.end(); it != ei; ++it) {
llvm::errs() << "\n[ B" << (*it)->getBlockID()
<< " (live variables at block exit) ]\n";
LiveVariables::LivenessValues vals = blocksEndToLiveness[*it];
declVec.clear();
for (llvm::ImmutableSet<const VarDecl *>::iterator si =
vals.liveDecls.begin(),
se = vals.liveDecls.end(); si != se; ++si) {
declVec.push_back(*si);
}
std::sort(declVec.begin(), declVec.end(), [](const Decl *A, const Decl *B) {
return A->getLocStart() < B->getLocStart();
});
for (std::vector<const VarDecl*>::iterator di = declVec.begin(),
de = declVec.end(); di != de; ++di) {
llvm::errs() << " " << (*di)->getDeclName().getAsString()
<< " <";
(*di)->getLocation().dump(M);
llvm::errs() << ">\n";
}
}
llvm::errs() << "\n";
}
const void *LiveVariables::getTag() { static int x; return &x; }
const void *RelaxedLiveVariables::getTag() { static int x; return &x; }
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/CFG.cpp |
#include "clang/Analysis/CFG.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "llvm/ADT/DenseMap.h"
#include <memory>
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/GraphWriter.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
namespace {
static SourceLocation GetEndLoc(Decl *D) {
if (VarDecl *VD = dyn_cast<VarDecl>(D))
if (Expr *Ex = VD->getInit())
return Ex->getSourceRange().getEnd();
return D->getLocation();
}
class CFGBuilder;
/// The CFG builder uses a recursive algorithm to build the CFG. When
/// we process an expression, sometimes we know that we must add the
/// subexpressions as block-level expressions. For example:
///
/// exp1 || exp2
///
/// When processing the '||' expression, we know that exp1 and exp2
/// need to be added as block-level expressions, even though they
/// might not normally need to be. AddStmtChoice records this
/// contextual information. If AddStmtChoice is 'NotAlwaysAdd', then
/// the builder has an option not to add a subexpression as a
/// block-level expression.
///
class AddStmtChoice {
public:
enum Kind { NotAlwaysAdd = 0, AlwaysAdd = 1 };
AddStmtChoice(Kind a_kind = NotAlwaysAdd) : kind(a_kind) {}
bool alwaysAdd(CFGBuilder &builder,
const Stmt *stmt) const;
/// Return a copy of this object, except with the 'always-add' bit
/// set as specified.
AddStmtChoice withAlwaysAdd(bool alwaysAdd) const {
return AddStmtChoice(alwaysAdd ? AlwaysAdd : NotAlwaysAdd);
}
private:
Kind kind;
};
/// LocalScope - Node in tree of local scopes created for C++ implicit
/// destructor calls generation. It contains list of automatic variables
/// declared in the scope and link to position in previous scope this scope
/// began in.
///
/// The process of creating local scopes is as follows:
/// - Init CFGBuilder::ScopePos with invalid position (equivalent for null),
/// - Before processing statements in scope (e.g. CompoundStmt) create
/// LocalScope object using CFGBuilder::ScopePos as link to previous scope
/// and set CFGBuilder::ScopePos to the end of new scope,
/// - On every occurrence of VarDecl increase CFGBuilder::ScopePos if it points
/// at this VarDecl,
/// - For every normal (without jump) end of scope add to CFGBlock destructors
/// for objects in the current scope,
/// - For every jump add to CFGBlock destructors for objects
/// between CFGBuilder::ScopePos and local scope position saved for jump
/// target. Thanks to C++ restrictions on goto jumps we can be sure that
/// jump target position will be on the path to root from CFGBuilder::ScopePos
/// (adding any variable that doesn't need constructor to be called to
/// LocalScope can break this assumption),
///
class LocalScope {
public:
typedef BumpVector<VarDecl*> AutomaticVarsTy;
/// const_iterator - Iterates local scope backwards and jumps to previous
/// scope on reaching the beginning of currently iterated scope.
class const_iterator {
const LocalScope* Scope;
/// VarIter is guaranteed to be greater then 0 for every valid iterator.
/// Invalid iterator (with null Scope) has VarIter equal to 0.
unsigned VarIter;
public:
/// Create invalid iterator. Dereferencing invalid iterator is not allowed.
/// Incrementing invalid iterator is allowed and will result in invalid
/// iterator.
const_iterator()
: Scope(nullptr), VarIter(0) {}
/// Create valid iterator. In case when S.Prev is an invalid iterator and
/// I is equal to 0, this will create invalid iterator.
const_iterator(const LocalScope& S, unsigned I)
: Scope(&S), VarIter(I) {
// Iterator to "end" of scope is not allowed. Handle it by going up
// in scopes tree possibly up to invalid iterator in the root.
if (VarIter == 0 && Scope)
*this = Scope->Prev;
}
VarDecl *const* operator->() const {
assert (Scope && "Dereferencing invalid iterator is not allowed");
assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
return &Scope->Vars[VarIter - 1];
}
VarDecl *operator*() const {
return *this->operator->();
}
const_iterator &operator++() {
if (!Scope)
return *this;
assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
--VarIter;
if (VarIter == 0)
*this = Scope->Prev;
return *this;
}
const_iterator operator++(int) {
const_iterator P = *this;
++*this;
return P;
}
bool operator==(const const_iterator &rhs) const {
return Scope == rhs.Scope && VarIter == rhs.VarIter;
}
bool operator!=(const const_iterator &rhs) const {
return !(*this == rhs);
}
explicit operator bool() const {
return *this != const_iterator();
}
int distance(const_iterator L);
};
friend class const_iterator;
private:
BumpVectorContext ctx;
/// Automatic variables in order of declaration.
AutomaticVarsTy Vars;
/// Iterator to variable in previous scope that was declared just before
/// begin of this scope.
const_iterator Prev;
public:
/// Constructs empty scope linked to previous scope in specified place.
LocalScope(BumpVectorContext &ctx, const_iterator P)
: ctx(ctx), Vars(ctx, 4), Prev(P) {}
/// Begin of scope in direction of CFG building (backwards).
const_iterator begin() const { return const_iterator(*this, Vars.size()); }
void addVar(VarDecl *VD) {
Vars.push_back(VD, ctx);
}
};
/// distance - Calculates distance from this to L. L must be reachable from this
/// (with use of ++ operator). Cost of calculating the distance is linear w.r.t.
/// number of scopes between this and L.
int LocalScope::const_iterator::distance(LocalScope::const_iterator L) {
int D = 0;
const_iterator F = *this;
while (F.Scope != L.Scope) {
assert (F != const_iterator()
&& "L iterator is not reachable from F iterator.");
D += F.VarIter;
F = F.Scope->Prev;
}
D += F.VarIter - L.VarIter;
return D;
}
/// Structure for specifying position in CFG during its build process. It
/// consists of CFGBlock that specifies position in CFG and
/// LocalScope::const_iterator that specifies position in LocalScope graph.
struct BlockScopePosPair {
BlockScopePosPair() : block(nullptr) {}
BlockScopePosPair(CFGBlock *b, LocalScope::const_iterator scopePos)
: block(b), scopePosition(scopePos) {}
CFGBlock *block;
LocalScope::const_iterator scopePosition;
};
/// TryResult - a class representing a variant over the values
/// 'true', 'false', or 'unknown'. This is returned by tryEvaluateBool,
/// and is used by the CFGBuilder to decide if a branch condition
/// can be decided up front during CFG construction.
class TryResult {
int X;
public:
TryResult(bool b) : X(b ? 1 : 0) {}
TryResult() : X(-1) {}
bool isTrue() const { return X == 1; }
bool isFalse() const { return X == 0; }
bool isKnown() const { return X >= 0; }
void negate() {
assert(isKnown());
X ^= 0x1;
}
};
TryResult bothKnownTrue(TryResult R1, TryResult R2) {
if (!R1.isKnown() || !R2.isKnown())
return TryResult();
return TryResult(R1.isTrue() && R2.isTrue());
}
class reverse_children {
llvm::SmallVector<Stmt *, 12> childrenBuf;
ArrayRef<Stmt*> children;
public:
reverse_children(Stmt *S);
typedef ArrayRef<Stmt*>::reverse_iterator iterator;
iterator begin() const { return children.rbegin(); }
iterator end() const { return children.rend(); }
};
reverse_children::reverse_children(Stmt *S) {
if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
children = CE->getRawSubExprs();
return;
}
switch (S->getStmtClass()) {
// Note: Fill in this switch with more cases we want to optimize.
case Stmt::InitListExprClass: {
InitListExpr *IE = cast<InitListExpr>(S);
children = llvm::makeArrayRef(reinterpret_cast<Stmt**>(IE->getInits()),
IE->getNumInits());
return;
}
default:
break;
}
// Default case for all other statements.
for (Stmt *SubStmt : S->children())
childrenBuf.push_back(SubStmt);
// This needs to be done *after* childrenBuf has been populated.
children = childrenBuf;
}
/// CFGBuilder - This class implements CFG construction from an AST.
/// The builder is stateful: an instance of the builder should be used to only
/// construct a single CFG.
///
/// Example usage:
///
/// CFGBuilder builder;
/// CFG* cfg = builder.BuildAST(stmt1);
///
/// CFG construction is done via a recursive walk of an AST. We actually parse
/// the AST in reverse order so that the successor of a basic block is
/// constructed prior to its predecessor. This allows us to nicely capture
/// implicit fall-throughs without extra basic blocks.
///
class CFGBuilder {
typedef BlockScopePosPair JumpTarget;
typedef BlockScopePosPair JumpSource;
ASTContext *Context;
std::unique_ptr<CFG> cfg;
CFGBlock *Block;
CFGBlock *Succ;
JumpTarget ContinueJumpTarget;
JumpTarget BreakJumpTarget;
CFGBlock *SwitchTerminatedBlock;
CFGBlock *DefaultCaseBlock;
CFGBlock *TryTerminatedBlock;
// Current position in local scope.
LocalScope::const_iterator ScopePos;
// LabelMap records the mapping from Label expressions to their jump targets.
typedef llvm::DenseMap<LabelDecl*, JumpTarget> LabelMapTy;
LabelMapTy LabelMap;
// A list of blocks that end with a "goto" that must be backpatched to their
// resolved targets upon completion of CFG construction.
typedef std::vector<JumpSource> BackpatchBlocksTy;
BackpatchBlocksTy BackpatchBlocks;
// A list of labels whose address has been taken (for indirect gotos).
typedef llvm::SmallPtrSet<LabelDecl*, 5> LabelSetTy;
LabelSetTy AddressTakenLabels;
bool badCFG;
const CFG::BuildOptions &BuildOpts;
// State to track for building switch statements.
bool switchExclusivelyCovered;
Expr::EvalResult *switchCond;
CFG::BuildOptions::ForcedBlkExprs::value_type *cachedEntry;
const Stmt *lastLookup;
// Caches boolean evaluations of expressions to avoid multiple re-evaluations
// during construction of branches for chained logical operators.
typedef llvm::DenseMap<Expr *, TryResult> CachedBoolEvalsTy;
CachedBoolEvalsTy CachedBoolEvals;
public:
explicit CFGBuilder(ASTContext *astContext,
const CFG::BuildOptions &buildOpts)
: Context(astContext), cfg(new CFG()), // crew a new CFG
Block(nullptr), Succ(nullptr),
SwitchTerminatedBlock(nullptr), DefaultCaseBlock(nullptr),
TryTerminatedBlock(nullptr), badCFG(false), BuildOpts(buildOpts),
switchExclusivelyCovered(false), switchCond(nullptr),
cachedEntry(nullptr), lastLookup(nullptr) {}
// buildCFG - Used by external clients to construct the CFG.
std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
bool alwaysAdd(const Stmt *stmt);
private:
// Visitors to walk an AST and construct the CFG.
CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc);
CFGBlock *VisitBreakStmt(BreakStmt *B);
CFGBlock *VisitCallExpr(CallExpr *C, AddStmtChoice asc);
CFGBlock *VisitCaseStmt(CaseStmt *C);
CFGBlock *VisitChooseExpr(ChooseExpr *C, AddStmtChoice asc);
CFGBlock *VisitCompoundStmt(CompoundStmt *C);
CFGBlock *VisitConditionalOperator(AbstractConditionalOperator *C,
AddStmtChoice asc);
CFGBlock *VisitContinueStmt(ContinueStmt *C);
CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
AddStmtChoice asc);
CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
CFGBlock *VisitCXXNewExpr(CXXNewExpr *DE, AddStmtChoice asc);
CFGBlock *VisitCXXDeleteExpr(CXXDeleteExpr *DE, AddStmtChoice asc);
CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
AddStmtChoice asc);
CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
AddStmtChoice asc);
CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
CFGBlock *VisitDeclStmt(DeclStmt *DS);
CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
CFGBlock *VisitDefaultStmt(DefaultStmt *D);
CFGBlock *VisitDoStmt(DoStmt *D);
CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E, AddStmtChoice asc);
CFGBlock *VisitForStmt(ForStmt *F);
CFGBlock *VisitGotoStmt(GotoStmt *G);
CFGBlock *VisitIfStmt(IfStmt *I);
CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
CFGBlock *VisitLabelStmt(LabelStmt *L);
CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
CFGBlock *VisitLogicalOperator(BinaryOperator *B);
std::pair<CFGBlock *, CFGBlock *> VisitLogicalOperator(BinaryOperator *B,
Stmt *Term,
CFGBlock *TrueBlock,
CFGBlock *FalseBlock);
CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
CFGBlock *VisitReturnStmt(ReturnStmt *R);
CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc);
CFGBlock *VisitSwitchStmt(SwitchStmt *S);
CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
AddStmtChoice asc);
CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
CFGBlock *VisitWhileStmt(WhileStmt *W);
CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd);
CFGBlock *VisitStmt(Stmt *S, AddStmtChoice asc);
CFGBlock *VisitChildren(Stmt *S);
CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
/// When creating the CFG for temporary destructors, we want to mirror the
/// branch structure of the corresponding constructor calls.
/// Thus, while visiting a statement for temporary destructors, we keep a
/// context to keep track of the following information:
/// - whether a subexpression is executed unconditionally
/// - if a subexpression is executed conditionally, the first
/// CXXBindTemporaryExpr we encounter in that subexpression (which
/// corresponds to the last temporary destructor we have to call for this
/// subexpression) and the CFG block at that point (which will become the
/// successor block when inserting the decision point).
///
/// That way, we can build the branch structure for temporary destructors as
/// follows:
/// 1. If a subexpression is executed unconditionally, we add the temporary
/// destructor calls to the current block.
/// 2. If a subexpression is executed conditionally, when we encounter a
/// CXXBindTemporaryExpr:
/// a) If it is the first temporary destructor call in the subexpression,
/// we remember the CXXBindTemporaryExpr and the current block in the
/// TempDtorContext; we start a new block, and insert the temporary
/// destructor call.
/// b) Otherwise, add the temporary destructor call to the current block.
/// 3. When we finished visiting a conditionally executed subexpression,
/// and we found at least one temporary constructor during the visitation
/// (2.a has executed), we insert a decision block that uses the
/// CXXBindTemporaryExpr as terminator, and branches to the current block
/// if the CXXBindTemporaryExpr was marked executed, and otherwise
/// branches to the stored successor.
struct TempDtorContext {
TempDtorContext()
: IsConditional(false), KnownExecuted(true), Succ(nullptr),
TerminatorExpr(nullptr) {}
TempDtorContext(TryResult KnownExecuted)
: IsConditional(true), KnownExecuted(KnownExecuted), Succ(nullptr),
TerminatorExpr(nullptr) {}
/// Returns whether we need to start a new branch for a temporary destructor
/// call. This is the case when the temporary destructor is
/// conditionally executed, and it is the first one we encounter while
/// visiting a subexpression - other temporary destructors at the same level
/// will be added to the same block and are executed under the same
/// condition.
bool needsTempDtorBranch() const {
return IsConditional && !TerminatorExpr;
}
/// Remember the successor S of a temporary destructor decision branch for
/// the corresponding CXXBindTemporaryExpr E.
void setDecisionPoint(CFGBlock *S, CXXBindTemporaryExpr *E) {
Succ = S;
TerminatorExpr = E;
}
const bool IsConditional;
const TryResult KnownExecuted;
CFGBlock *Succ;
CXXBindTemporaryExpr *TerminatorExpr;
};
// Visitors to walk an AST and generate destructors of temporaries in
// full expression.
CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
TempDtorContext &Context);
CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E, TempDtorContext &Context);
CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E,
TempDtorContext &Context);
CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(
CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context);
CFGBlock *VisitConditionalOperatorForTemporaryDtors(
AbstractConditionalOperator *E, bool BindToTemporary,
TempDtorContext &Context);
void InsertTempDtorDecisionBlock(const TempDtorContext &Context,
CFGBlock *FalseSucc = nullptr);
// NYS == Not Yet Supported
CFGBlock *NYS() {
badCFG = true;
return Block;
}
void autoCreateBlock() { if (!Block) Block = createBlock(); }
CFGBlock *createBlock(bool add_successor = true);
CFGBlock *createNoReturnBlock();
CFGBlock *addStmt(Stmt *S) {
return Visit(S, AddStmtChoice::AlwaysAdd);
}
CFGBlock *addInitializer(CXXCtorInitializer *I);
void addAutomaticObjDtors(LocalScope::const_iterator B,
LocalScope::const_iterator E, Stmt *S);
void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
// Local scopes creation.
LocalScope* createOrReuseLocalScope(LocalScope* Scope);
void addLocalScopeForStmt(Stmt *S);
LocalScope* addLocalScopeForDeclStmt(DeclStmt *DS,
LocalScope* Scope = nullptr);
LocalScope* addLocalScopeForVarDecl(VarDecl *VD, LocalScope* Scope = nullptr);
void addLocalScopeAndDtors(Stmt *S);
// Interface to CFGBlock - adding CFGElements.
void appendStmt(CFGBlock *B, const Stmt *S) {
if (alwaysAdd(S) && cachedEntry)
cachedEntry->second = B;
// All block-level expressions should have already been IgnoreParens()ed.
assert(!isa<Expr>(S) || cast<Expr>(S)->IgnoreParens() == S);
B->appendStmt(const_cast<Stmt*>(S), cfg->getBumpVectorContext());
}
void appendInitializer(CFGBlock *B, CXXCtorInitializer *I) {
B->appendInitializer(I, cfg->getBumpVectorContext());
}
void appendNewAllocator(CFGBlock *B, CXXNewExpr *NE) {
B->appendNewAllocator(NE, cfg->getBumpVectorContext());
}
void appendBaseDtor(CFGBlock *B, const CXXBaseSpecifier *BS) {
B->appendBaseDtor(BS, cfg->getBumpVectorContext());
}
void appendMemberDtor(CFGBlock *B, FieldDecl *FD) {
B->appendMemberDtor(FD, cfg->getBumpVectorContext());
}
void appendTemporaryDtor(CFGBlock *B, CXXBindTemporaryExpr *E) {
B->appendTemporaryDtor(E, cfg->getBumpVectorContext());
}
void appendAutomaticObjDtor(CFGBlock *B, VarDecl *VD, Stmt *S) {
B->appendAutomaticObjDtor(VD, S, cfg->getBumpVectorContext());
}
void appendDeleteDtor(CFGBlock *B, CXXRecordDecl *RD, CXXDeleteExpr *DE) {
B->appendDeleteDtor(RD, DE, cfg->getBumpVectorContext());
}
void prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
LocalScope::const_iterator B, LocalScope::const_iterator E);
void addSuccessor(CFGBlock *B, CFGBlock *S, bool IsReachable = true) {
B->addSuccessor(CFGBlock::AdjacentBlock(S, IsReachable),
cfg->getBumpVectorContext());
}
/// Add a reachable successor to a block, with the alternate variant that is
/// unreachable.
void addSuccessor(CFGBlock *B, CFGBlock *ReachableBlock, CFGBlock *AltBlock) {
B->addSuccessor(CFGBlock::AdjacentBlock(ReachableBlock, AltBlock),
cfg->getBumpVectorContext());
}
/// \brief Find a relational comparison with an expression evaluating to a
/// boolean and a constant other than 0 and 1.
/// e.g. if ((x < y) == 10)
TryResult checkIncorrectRelationalOperator(const BinaryOperator *B) {
const Expr *LHSExpr = B->getLHS()->IgnoreParens();
const Expr *RHSExpr = B->getRHS()->IgnoreParens();
const IntegerLiteral *IntLiteral = dyn_cast<IntegerLiteral>(LHSExpr);
const Expr *BoolExpr = RHSExpr;
bool IntFirst = true;
if (!IntLiteral) {
IntLiteral = dyn_cast<IntegerLiteral>(RHSExpr);
BoolExpr = LHSExpr;
IntFirst = false;
}
if (!IntLiteral || !BoolExpr->isKnownToHaveBooleanValue())
return TryResult();
llvm::APInt IntValue = IntLiteral->getValue();
if ((IntValue == 1) || (IntValue == 0))
return TryResult();
bool IntLarger = IntLiteral->getType()->isUnsignedIntegerType() ||
!IntValue.isNegative();
BinaryOperatorKind Bok = B->getOpcode();
if (Bok == BO_GT || Bok == BO_GE) {
// Always true for 10 > bool and bool > -1
// Always false for -1 > bool and bool > 10
return TryResult(IntFirst == IntLarger);
} else {
// Always true for -1 < bool and bool < 10
// Always false for 10 < bool and bool < -1
return TryResult(IntFirst != IntLarger);
}
}
/// Find an incorrect equality comparison. Either with an expression
/// evaluating to a boolean and a constant other than 0 and 1.
/// e.g. if (!x == 10) or a bitwise and/or operation that always evaluates to
/// true/false e.q. (x & 8) == 4.
TryResult checkIncorrectEqualityOperator(const BinaryOperator *B) {
const Expr *LHSExpr = B->getLHS()->IgnoreParens();
const Expr *RHSExpr = B->getRHS()->IgnoreParens();
const IntegerLiteral *IntLiteral = dyn_cast<IntegerLiteral>(LHSExpr);
const Expr *BoolExpr = RHSExpr;
if (!IntLiteral) {
IntLiteral = dyn_cast<IntegerLiteral>(RHSExpr);
BoolExpr = LHSExpr;
}
if (!IntLiteral)
return TryResult();
const BinaryOperator *BitOp = dyn_cast<BinaryOperator>(BoolExpr);
if (BitOp && (BitOp->getOpcode() == BO_And ||
BitOp->getOpcode() == BO_Or)) {
const Expr *LHSExpr2 = BitOp->getLHS()->IgnoreParens();
const Expr *RHSExpr2 = BitOp->getRHS()->IgnoreParens();
const IntegerLiteral *IntLiteral2 = dyn_cast<IntegerLiteral>(LHSExpr2);
if (!IntLiteral2)
IntLiteral2 = dyn_cast<IntegerLiteral>(RHSExpr2);
if (!IntLiteral2)
return TryResult();
llvm::APInt L1 = IntLiteral->getValue();
llvm::APInt L2 = IntLiteral2->getValue();
if ((BitOp->getOpcode() == BO_And && (L2 & L1) != L1) ||
(BitOp->getOpcode() == BO_Or && (L2 | L1) != L1)) {
if (BuildOpts.Observer)
BuildOpts.Observer->compareBitwiseEquality(B,
B->getOpcode() != BO_EQ);
TryResult(B->getOpcode() != BO_EQ);
}
} else if (BoolExpr->isKnownToHaveBooleanValue()) {
llvm::APInt IntValue = IntLiteral->getValue();
if ((IntValue == 1) || (IntValue == 0)) {
return TryResult();
}
return TryResult(B->getOpcode() != BO_EQ);
}
return TryResult();
}
TryResult analyzeLogicOperatorCondition(BinaryOperatorKind Relation,
const llvm::APSInt &Value1,
const llvm::APSInt &Value2) {
assert(Value1.isSigned() == Value2.isSigned());
switch (Relation) {
default:
return TryResult();
case BO_EQ:
return TryResult(Value1 == Value2);
case BO_NE:
return TryResult(Value1 != Value2);
case BO_LT:
return TryResult(Value1 < Value2);
case BO_LE:
return TryResult(Value1 <= Value2);
case BO_GT:
return TryResult(Value1 > Value2);
case BO_GE:
return TryResult(Value1 >= Value2);
}
}
/// \brief Find a pair of comparison expressions with or without parentheses
/// with a shared variable and constants and a logical operator between them
/// that always evaluates to either true or false.
/// e.g. if (x != 3 || x != 4)
TryResult checkIncorrectLogicOperator(const BinaryOperator *B) {
assert(B->isLogicalOp());
const BinaryOperator *LHS =
dyn_cast<BinaryOperator>(B->getLHS()->IgnoreParens());
const BinaryOperator *RHS =
dyn_cast<BinaryOperator>(B->getRHS()->IgnoreParens());
if (!LHS || !RHS)
return TryResult();
if (!LHS->isComparisonOp() || !RHS->isComparisonOp())
return TryResult();
BinaryOperatorKind BO1 = LHS->getOpcode();
const DeclRefExpr *Decl1 =
dyn_cast<DeclRefExpr>(LHS->getLHS()->IgnoreParenImpCasts());
const IntegerLiteral *Literal1 =
dyn_cast<IntegerLiteral>(LHS->getRHS()->IgnoreParens());
if (!Decl1 && !Literal1) {
if (BO1 == BO_GT)
BO1 = BO_LT;
else if (BO1 == BO_GE)
BO1 = BO_LE;
else if (BO1 == BO_LT)
BO1 = BO_GT;
else if (BO1 == BO_LE)
BO1 = BO_GE;
Decl1 = dyn_cast<DeclRefExpr>(LHS->getRHS()->IgnoreParenImpCasts());
Literal1 = dyn_cast<IntegerLiteral>(LHS->getLHS()->IgnoreParens());
}
if (!Decl1 || !Literal1)
return TryResult();
BinaryOperatorKind BO2 = RHS->getOpcode();
const DeclRefExpr *Decl2 =
dyn_cast<DeclRefExpr>(RHS->getLHS()->IgnoreParenImpCasts());
const IntegerLiteral *Literal2 =
dyn_cast<IntegerLiteral>(RHS->getRHS()->IgnoreParens());
if (!Decl2 && !Literal2) {
if (BO2 == BO_GT)
BO2 = BO_LT;
else if (BO2 == BO_GE)
BO2 = BO_LE;
else if (BO2 == BO_LT)
BO2 = BO_GT;
else if (BO2 == BO_LE)
BO2 = BO_GE;
Decl2 = dyn_cast<DeclRefExpr>(RHS->getRHS()->IgnoreParenImpCasts());
Literal2 = dyn_cast<IntegerLiteral>(RHS->getLHS()->IgnoreParens());
}
if (!Decl2 || !Literal2)
return TryResult();
// Check that it is the same variable on both sides.
if (Decl1->getDecl() != Decl2->getDecl())
return TryResult();
llvm::APSInt L1, L2;
if (!Literal1->EvaluateAsInt(L1, *Context) ||
!Literal2->EvaluateAsInt(L2, *Context))
return TryResult();
// Can't compare signed with unsigned or with different bit width.
if (L1.isSigned() != L2.isSigned() || L1.getBitWidth() != L2.getBitWidth())
return TryResult();
// Values that will be used to determine if result of logical
// operator is always true/false
const llvm::APSInt Values[] = {
// Value less than both Value1 and Value2
llvm::APSInt::getMinValue(L1.getBitWidth(), L1.isUnsigned()),
// L1
L1,
// Value between Value1 and Value2
((L1 < L2) ? L1 : L2) + llvm::APSInt(llvm::APInt(L1.getBitWidth(), 1),
L1.isUnsigned()),
// L2
L2,
// Value greater than both Value1 and Value2
llvm::APSInt::getMaxValue(L1.getBitWidth(), L1.isUnsigned()),
};
// Check whether expression is always true/false by evaluating the following
// * variable x is less than the smallest literal.
// * variable x is equal to the smallest literal.
// * Variable x is between smallest and largest literal.
// * Variable x is equal to the largest literal.
// * Variable x is greater than largest literal.
bool AlwaysTrue = true, AlwaysFalse = true;
for (unsigned int ValueIndex = 0;
ValueIndex < sizeof(Values) / sizeof(Values[0]);
++ValueIndex) {
llvm::APSInt Value = Values[ValueIndex];
TryResult Res1, Res2;
Res1 = analyzeLogicOperatorCondition(BO1, Value, L1);
Res2 = analyzeLogicOperatorCondition(BO2, Value, L2);
if (!Res1.isKnown() || !Res2.isKnown())
return TryResult();
if (B->getOpcode() == BO_LAnd) {
AlwaysTrue &= (Res1.isTrue() && Res2.isTrue());
AlwaysFalse &= !(Res1.isTrue() && Res2.isTrue());
} else {
AlwaysTrue &= (Res1.isTrue() || Res2.isTrue());
AlwaysFalse &= !(Res1.isTrue() || Res2.isTrue());
}
}
if (AlwaysTrue || AlwaysFalse) {
if (BuildOpts.Observer)
BuildOpts.Observer->compareAlwaysTrue(B, AlwaysTrue);
return TryResult(AlwaysTrue);
}
return TryResult();
}
/// Try and evaluate an expression to an integer constant.
bool tryEvaluate(Expr *S, Expr::EvalResult &outResult) {
if (!BuildOpts.PruneTriviallyFalseEdges)
return false;
return !S->isTypeDependent() &&
!S->isValueDependent() &&
S->EvaluateAsRValue(outResult, *Context);
}
/// tryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
/// if we can evaluate to a known value, otherwise return -1.
TryResult tryEvaluateBool(Expr *S) {
if (!BuildOpts.PruneTriviallyFalseEdges ||
S->isTypeDependent() || S->isValueDependent())
return TryResult();
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(S)) {
if (Bop->isLogicalOp()) {
// Check the cache first.
CachedBoolEvalsTy::iterator I = CachedBoolEvals.find(S);
if (I != CachedBoolEvals.end())
return I->second; // already in map;
// Retrieve result at first, or the map might be updated.
TryResult Result = evaluateAsBooleanConditionNoCache(S);
CachedBoolEvals[S] = Result; // update or insert
return Result;
}
else {
switch (Bop->getOpcode()) {
default: break;
// For 'x & 0' and 'x * 0', we can determine that
// the value is always false.
case BO_Mul:
case BO_And: {
// If either operand is zero, we know the value
// must be false.
llvm::APSInt IntVal;
if (Bop->getLHS()->EvaluateAsInt(IntVal, *Context)) {
if (!IntVal.getBoolValue()) {
return TryResult(false);
}
}
if (Bop->getRHS()->EvaluateAsInt(IntVal, *Context)) {
if (!IntVal.getBoolValue()) {
return TryResult(false);
}
}
}
break;
}
}
}
return evaluateAsBooleanConditionNoCache(S);
}
/// \brief Evaluate as boolean \param E without using the cache.
TryResult evaluateAsBooleanConditionNoCache(Expr *E) {
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(E)) {
if (Bop->isLogicalOp()) {
TryResult LHS = tryEvaluateBool(Bop->getLHS());
if (LHS.isKnown()) {
// We were able to evaluate the LHS, see if we can get away with not
// evaluating the RHS: 0 && X -> 0, 1 || X -> 1
if (LHS.isTrue() == (Bop->getOpcode() == BO_LOr))
return LHS.isTrue();
TryResult RHS = tryEvaluateBool(Bop->getRHS());
if (RHS.isKnown()) {
if (Bop->getOpcode() == BO_LOr)
return LHS.isTrue() || RHS.isTrue();
else
return LHS.isTrue() && RHS.isTrue();
}
} else {
TryResult RHS = tryEvaluateBool(Bop->getRHS());
if (RHS.isKnown()) {
// We can't evaluate the LHS; however, sometimes the result
// is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
if (RHS.isTrue() == (Bop->getOpcode() == BO_LOr))
return RHS.isTrue();
} else {
TryResult BopRes = checkIncorrectLogicOperator(Bop);
if (BopRes.isKnown())
return BopRes.isTrue();
}
}
return TryResult();
} else if (Bop->isEqualityOp()) {
TryResult BopRes = checkIncorrectEqualityOperator(Bop);
if (BopRes.isKnown())
return BopRes.isTrue();
} else if (Bop->isRelationalOp()) {
TryResult BopRes = checkIncorrectRelationalOperator(Bop);
if (BopRes.isKnown())
return BopRes.isTrue();
}
}
bool Result;
if (E->EvaluateAsBooleanCondition(Result, *Context))
return Result;
return TryResult();
}
};
inline bool AddStmtChoice::alwaysAdd(CFGBuilder &builder,
const Stmt *stmt) const {
return builder.alwaysAdd(stmt) || kind == AlwaysAdd;
}
bool CFGBuilder::alwaysAdd(const Stmt *stmt) {
bool shouldAdd = BuildOpts.alwaysAdd(stmt);
if (!BuildOpts.forcedBlkExprs)
return shouldAdd;
if (lastLookup == stmt) {
if (cachedEntry) {
assert(cachedEntry->first == stmt);
return true;
}
return shouldAdd;
}
lastLookup = stmt;
// Perform the lookup!
CFG::BuildOptions::ForcedBlkExprs *fb = *BuildOpts.forcedBlkExprs;
if (!fb) {
// No need to update 'cachedEntry', since it will always be null.
assert(!cachedEntry);
return shouldAdd;
}
CFG::BuildOptions::ForcedBlkExprs::iterator itr = fb->find(stmt);
if (itr == fb->end()) {
cachedEntry = nullptr;
return shouldAdd;
}
cachedEntry = &*itr;
return true;
}
// FIXME: Add support for dependent-sized array types in C++?
// Does it even make sense to build a CFG for an uninstantiated template?
static const VariableArrayType *FindVA(const Type *t) {
while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
if (vat->getSizeExpr())
return vat;
t = vt->getElementType().getTypePtr();
}
return nullptr;
}
/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
/// arbitrary statement. Examples include a single expression or a function
/// body (compound statement). The ownership of the returned CFG is
/// transferred to the caller. If CFG construction fails, this method returns
/// NULL.
std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
assert(cfg.get());
if (!Statement)
return nullptr;
// Create an empty block that will serve as the exit block for the CFG. Since
// this is the first block added to the CFG, it will be implicitly registered
// as the exit block.
Succ = createBlock();
assert(Succ == &cfg->getExit());
Block = nullptr; // the EXIT block is empty. Create all other blocks lazily.
if (BuildOpts.AddImplicitDtors)
if (const CXXDestructorDecl *DD = dyn_cast_or_null<CXXDestructorDecl>(D))
addImplicitDtorsForDestructor(DD);
// Visit the statements and create the CFG.
CFGBlock *B = addStmt(Statement);
if (badCFG)
return nullptr;
// For C++ constructor add initializers to CFG.
if (const CXXConstructorDecl *CD = dyn_cast_or_null<CXXConstructorDecl>(D)) {
for (CXXConstructorDecl::init_const_reverse_iterator I = CD->init_rbegin(),
E = CD->init_rend(); I != E; ++I) {
B = addInitializer(*I);
if (badCFG)
return nullptr;
}
}
if (B)
Succ = B;
// Backpatch the gotos whose label -> block mappings we didn't know when we
// encountered them.
for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(),
E = BackpatchBlocks.end(); I != E; ++I ) {
CFGBlock *B = I->block;
const GotoStmt *G = cast<GotoStmt>(B->getTerminator());
LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
// If there is no target for the goto, then we are looking at an
// incomplete AST. Handle this by not registering a successor.
if (LI == LabelMap.end()) continue;
JumpTarget JT = LI->second;
prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
JT.scopePosition);
addSuccessor(B, JT.block);
}
// Add successors to the Indirect Goto Dispatch block (if we have one).
if (CFGBlock *B = cfg->getIndirectGotoBlock())
for (LabelSetTy::iterator I = AddressTakenLabels.begin(),
E = AddressTakenLabels.end(); I != E; ++I ) {
// Lookup the target block.
LabelMapTy::iterator LI = LabelMap.find(*I);
// If there is no target block that contains label, then we are looking
// at an incomplete AST. Handle this by not registering a successor.
if (LI == LabelMap.end()) continue;
addSuccessor(B, LI->second.block);
}
// Create an empty entry block that has no predecessors.
cfg->setEntry(createBlock());
return std::move(cfg);
}
/// createBlock - Used to lazily create blocks that are connected
/// to the current (global) succcessor.
CFGBlock *CFGBuilder::createBlock(bool add_successor) {
CFGBlock *B = cfg->createBlock();
if (add_successor && Succ)
addSuccessor(B, Succ);
return B;
}
/// createNoReturnBlock - Used to create a block is a 'noreturn' point in the
/// CFG. It is *not* connected to the current (global) successor, and instead
/// directly tied to the exit block in order to be reachable.
CFGBlock *CFGBuilder::createNoReturnBlock() {
CFGBlock *B = createBlock(false);
B->setHasNoReturnElement();
addSuccessor(B, &cfg->getExit(), Succ);
return B;
}
/// addInitializer - Add C++ base or member initializer element to CFG.
CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
if (!BuildOpts.AddInitializers)
return Block;
bool HasTemporaries = false;
// Destructors of temporaries in initialization expression should be called
// after initialization finishes.
Expr *Init = I->getInit();
if (Init) {
HasTemporaries = isa<ExprWithCleanups>(Init);
if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
// Generate destructors for temporaries in initialization expression.
TempDtorContext Context;
VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
/*BindToTemporary=*/false, Context);
}
}
autoCreateBlock();
appendInitializer(Block, I);
if (Init) {
if (HasTemporaries) {
// For expression with temporaries go directly to subexpression to omit
// generating destructors for the second time.
return Visit(cast<ExprWithCleanups>(Init)->getSubExpr());
}
if (BuildOpts.AddCXXDefaultInitExprInCtors) {
if (CXXDefaultInitExpr *Default = dyn_cast<CXXDefaultInitExpr>(Init)) {
// In general, appending the expression wrapped by a CXXDefaultInitExpr
// may cause the same Expr to appear more than once in the CFG. Doing it
// here is safe because there's only one initializer per field.
autoCreateBlock();
appendStmt(Block, Default);
if (Stmt *Child = Default->getExpr())
if (CFGBlock *R = Visit(Child))
Block = R;
return Block;
}
}
return Visit(Init);
}
return Block;
}
/// \brief Retrieve the type of the temporary object whose lifetime was
/// extended by a local reference with the given initializer.
static QualType getReferenceInitTemporaryType(ASTContext &Context,
const Expr *Init) {
while (true) {
// Skip parentheses.
Init = Init->IgnoreParens();
// Skip through cleanups.
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init)) {
Init = EWC->getSubExpr();
continue;
}
// Skip through the temporary-materialization expression.
if (const MaterializeTemporaryExpr *MTE
= dyn_cast<MaterializeTemporaryExpr>(Init)) {
Init = MTE->GetTemporaryExpr();
continue;
}
// Skip derived-to-base and no-op casts.
if (const CastExpr *CE = dyn_cast<CastExpr>(Init)) {
if ((CE->getCastKind() == CK_DerivedToBase ||
CE->getCastKind() == CK_UncheckedDerivedToBase ||
CE->getCastKind() == CK_NoOp) &&
Init->getType()->isRecordType()) {
Init = CE->getSubExpr();
continue;
}
}
// Skip member accesses into rvalues.
if (const MemberExpr *ME = dyn_cast<MemberExpr>(Init)) {
if (!ME->isArrow() && ME->getBase()->isRValue()) {
Init = ME->getBase();
continue;
}
}
break;
}
return Init->getType();
}
/// addAutomaticObjDtors - Add to current block automatic objects destructors
/// for objects in range of local scope positions. Use S as trigger statement
/// for destructors.
void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
LocalScope::const_iterator E, Stmt *S) {
if (!BuildOpts.AddImplicitDtors)
return;
if (B == E)
return;
// We need to append the destructors in reverse order, but any one of them
// may be a no-return destructor which changes the CFG. As a result, buffer
// this sequence up and replay them in reverse order when appending onto the
// CFGBlock(s).
SmallVector<VarDecl*, 10> Decls;
Decls.reserve(B.distance(E));
for (LocalScope::const_iterator I = B; I != E; ++I)
Decls.push_back(*I);
for (SmallVectorImpl<VarDecl*>::reverse_iterator I = Decls.rbegin(),
E = Decls.rend();
I != E; ++I) {
// If this destructor is marked as a no-return destructor, we need to
// create a new block for the destructor which does not have as a successor
// anything built thus far: control won't flow out of this block.
QualType Ty = (*I)->getType();
if (Ty->isReferenceType()) {
Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
}
Ty = Context->getBaseElementType(Ty);
if (Ty->getAsCXXRecordDecl()->isAnyDestructorNoReturn())
Block = createNoReturnBlock();
else
autoCreateBlock();
appendAutomaticObjDtor(Block, *I, S);
}
}
/// addImplicitDtorsForDestructor - Add implicit destructors generated for
/// base and member objects in destructor.
void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
assert (BuildOpts.AddImplicitDtors
&& "Can be called only when dtors should be added");
const CXXRecordDecl *RD = DD->getParent();
// At the end destroy virtual base objects.
for (const auto &VI : RD->vbases()) {
const CXXRecordDecl *CD = VI.getType()->getAsCXXRecordDecl();
if (!CD->hasTrivialDestructor()) {
autoCreateBlock();
appendBaseDtor(Block, &VI);
}
}
// Before virtual bases destroy direct base objects.
for (const auto &BI : RD->bases()) {
if (!BI.isVirtual()) {
const CXXRecordDecl *CD = BI.getType()->getAsCXXRecordDecl();
if (!CD->hasTrivialDestructor()) {
autoCreateBlock();
appendBaseDtor(Block, &BI);
}
}
}
// First destroy member objects.
for (auto *FI : RD->fields()) {
// Check for constant size array. Set type to array element type.
QualType QT = FI->getType();
if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
if (AT->getSize() == 0)
continue;
QT = AT->getElementType();
}
if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
if (!CD->hasTrivialDestructor()) {
autoCreateBlock();
appendMemberDtor(Block, FI);
}
}
}
/// createOrReuseLocalScope - If Scope is NULL create new LocalScope. Either
/// way return valid LocalScope object.
LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
if (!Scope) {
llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
Scope = alloc.Allocate<LocalScope>();
BumpVectorContext ctx(alloc);
new (Scope) LocalScope(ctx, ScopePos);
}
return Scope;
}
/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
/// that should create implicit scope (e.g. if/else substatements).
void CFGBuilder::addLocalScopeForStmt(Stmt *S) {
if (!BuildOpts.AddImplicitDtors)
return;
LocalScope *Scope = nullptr;
// For compound statement we will be creating explicit scope.
if (CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
for (auto *BI : CS->body()) {
Stmt *SI = BI->stripLabelLikeStatements();
if (DeclStmt *DS = dyn_cast<DeclStmt>(SI))
Scope = addLocalScopeForDeclStmt(DS, Scope);
}
return;
}
// For any other statement scope will be implicit and as such will be
// interesting only for DeclStmt.
if (DeclStmt *DS = dyn_cast<DeclStmt>(S->stripLabelLikeStatements()))
addLocalScopeForDeclStmt(DS);
}
/// addLocalScopeForDeclStmt - Add LocalScope for declaration statement. Will
/// reuse Scope if not NULL.
LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
LocalScope* Scope) {
if (!BuildOpts.AddImplicitDtors)
return Scope;
for (auto *DI : DS->decls())
if (VarDecl *VD = dyn_cast<VarDecl>(DI))
Scope = addLocalScopeForVarDecl(VD, Scope);
return Scope;
}
/// addLocalScopeForVarDecl - Add LocalScope for variable declaration. It will
/// create add scope for automatic objects and temporary objects bound to
/// const reference. Will reuse Scope if not NULL.
LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
LocalScope* Scope) {
if (!BuildOpts.AddImplicitDtors)
return Scope;
// Check if variable is local.
switch (VD->getStorageClass()) {
case SC_None:
case SC_Auto:
case SC_Register:
break;
default: return Scope;
}
// Check for const references bound to temporary. Set type to pointee.
QualType QT = VD->getType();
if (QT.getTypePtr()->isReferenceType()) {
// Attempt to determine whether this declaration lifetime-extends a
// temporary.
//
// FIXME: This is incorrect. Non-reference declarations can lifetime-extend
// temporaries, and a single declaration can extend multiple temporaries.
// We should look at the storage duration on each nested
// MaterializeTemporaryExpr instead.
const Expr *Init = VD->getInit();
if (!Init)
return Scope;
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init))
Init = EWC->getSubExpr();
if (!isa<MaterializeTemporaryExpr>(Init))
return Scope;
// Lifetime-extending a temporary.
QT = getReferenceInitTemporaryType(*Context, Init);
}
// Check for constant size array. Set type to array element type.
while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
if (AT->getSize() == 0)
return Scope;
QT = AT->getElementType();
}
// Check if type is a C++ class with non-trivial destructor.
if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
if (!CD->hasTrivialDestructor()) {
// Add the variable to scope
Scope = createOrReuseLocalScope(Scope);
Scope->addVar(VD);
ScopePos = Scope->begin();
}
return Scope;
}
/// addLocalScopeAndDtors - For given statement add local scope for it and
/// add destructors that will cleanup the scope. Will reuse Scope if not NULL.
void CFGBuilder::addLocalScopeAndDtors(Stmt *S) {
if (!BuildOpts.AddImplicitDtors)
return;
LocalScope::const_iterator scopeBeginPos = ScopePos;
addLocalScopeForStmt(S);
addAutomaticObjDtors(ScopePos, scopeBeginPos, S);
}
/// prependAutomaticObjDtorsWithTerminator - Prepend destructor CFGElements for
/// variables with automatic storage duration to CFGBlock's elements vector.
/// Elements will be prepended to physical beginning of the vector which
/// happens to be logical end. Use blocks terminator as statement that specifies
/// destructors call site.
/// FIXME: This mechanism for adding automatic destructors doesn't handle
/// no-return destructors properly.
void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
LocalScope::const_iterator B, LocalScope::const_iterator E) {
BumpVectorContext &C = cfg->getBumpVectorContext();
CFGBlock::iterator InsertPos
= Blk->beginAutomaticObjDtorsInsert(Blk->end(), B.distance(E), C);
for (LocalScope::const_iterator I = B; I != E; ++I)
InsertPos = Blk->insertAutomaticObjDtor(InsertPos, *I,
Blk->getTerminator());
}
/// Visit - Walk the subtree of a statement and add extra
/// blocks for ternary operators, &&, and ||. We also process "," and
/// DeclStmts (which may contain nested control-flow).
CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
if (!S) {
badCFG = true;
return nullptr;
}
if (Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParens();
switch (S->getStmtClass()) {
default:
return VisitStmt(S, asc);
case Stmt::AddrLabelExprClass:
return VisitAddrLabelExpr(cast<AddrLabelExpr>(S), asc);
case Stmt::BinaryConditionalOperatorClass:
return VisitConditionalOperator(cast<BinaryConditionalOperator>(S), asc);
case Stmt::BinaryOperatorClass:
return VisitBinaryOperator(cast<BinaryOperator>(S), asc);
case Stmt::BlockExprClass:
return VisitNoRecurse(cast<Expr>(S), asc);
case Stmt::BreakStmtClass:
return VisitBreakStmt(cast<BreakStmt>(S));
case Stmt::CallExprClass:
case Stmt::CXXOperatorCallExprClass:
case Stmt::CXXMemberCallExprClass:
case Stmt::UserDefinedLiteralClass:
return VisitCallExpr(cast<CallExpr>(S), asc);
case Stmt::CaseStmtClass:
return VisitCaseStmt(cast<CaseStmt>(S));
case Stmt::ChooseExprClass:
return VisitChooseExpr(cast<ChooseExpr>(S), asc);
case Stmt::CompoundStmtClass:
return VisitCompoundStmt(cast<CompoundStmt>(S));
case Stmt::ConditionalOperatorClass:
return VisitConditionalOperator(cast<ConditionalOperator>(S), asc);
case Stmt::ContinueStmtClass:
return VisitContinueStmt(cast<ContinueStmt>(S));
case Stmt::CXXCatchStmtClass:
return VisitCXXCatchStmt(cast<CXXCatchStmt>(S));
case Stmt::ExprWithCleanupsClass:
return VisitExprWithCleanups(cast<ExprWithCleanups>(S), asc);
case Stmt::CXXDefaultArgExprClass:
case Stmt::CXXDefaultInitExprClass:
// FIXME: The expression inside a CXXDefaultArgExpr is owned by the
// called function's declaration, not by the caller. If we simply add
// this expression to the CFG, we could end up with the same Expr
// appearing multiple times.
// PR13385 / <rdar://problem/12156507>
//
// It's likewise possible for multiple CXXDefaultInitExprs for the same
// expression to be used in the same function (through aggregate
// initialization).
return VisitStmt(S, asc);
case Stmt::CXXBindTemporaryExprClass:
return VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), asc);
case Stmt::CXXConstructExprClass:
return VisitCXXConstructExpr(cast<CXXConstructExpr>(S), asc);
case Stmt::CXXNewExprClass:
return VisitCXXNewExpr(cast<CXXNewExpr>(S), asc);
case Stmt::CXXDeleteExprClass:
return VisitCXXDeleteExpr(cast<CXXDeleteExpr>(S), asc);
case Stmt::CXXFunctionalCastExprClass:
return VisitCXXFunctionalCastExpr(cast<CXXFunctionalCastExpr>(S), asc);
case Stmt::CXXTemporaryObjectExprClass:
return VisitCXXTemporaryObjectExpr(cast<CXXTemporaryObjectExpr>(S), asc);
case Stmt::CXXThrowExprClass:
return VisitCXXThrowExpr(cast<CXXThrowExpr>(S));
case Stmt::CXXTryStmtClass:
return VisitCXXTryStmt(cast<CXXTryStmt>(S));
case Stmt::CXXForRangeStmtClass:
return VisitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
case Stmt::DeclStmtClass:
return VisitDeclStmt(cast<DeclStmt>(S));
case Stmt::DefaultStmtClass:
return VisitDefaultStmt(cast<DefaultStmt>(S));
case Stmt::DoStmtClass:
return VisitDoStmt(cast<DoStmt>(S));
case Stmt::ForStmtClass:
return VisitForStmt(cast<ForStmt>(S));
case Stmt::GotoStmtClass:
return VisitGotoStmt(cast<GotoStmt>(S));
case Stmt::IfStmtClass:
return VisitIfStmt(cast<IfStmt>(S));
case Stmt::ImplicitCastExprClass:
return VisitImplicitCastExpr(cast<ImplicitCastExpr>(S), asc);
case Stmt::IndirectGotoStmtClass:
return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
case Stmt::LabelStmtClass:
return VisitLabelStmt(cast<LabelStmt>(S));
case Stmt::LambdaExprClass:
return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
case Stmt::MemberExprClass:
return VisitMemberExpr(cast<MemberExpr>(S), asc);
case Stmt::NullStmtClass:
return Block;
case Stmt::ObjCAtCatchStmtClass:
return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
case Stmt::ObjCAutoreleasePoolStmtClass:
return VisitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(S));
case Stmt::ObjCAtSynchronizedStmtClass:
return VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S));
case Stmt::ObjCAtThrowStmtClass:
return VisitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(S));
case Stmt::ObjCAtTryStmtClass:
return VisitObjCAtTryStmt(cast<ObjCAtTryStmt>(S));
case Stmt::ObjCForCollectionStmtClass:
return VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S));
case Stmt::OpaqueValueExprClass:
return Block;
case Stmt::PseudoObjectExprClass:
return VisitPseudoObjectExpr(cast<PseudoObjectExpr>(S));
case Stmt::ReturnStmtClass:
return VisitReturnStmt(cast<ReturnStmt>(S));
case Stmt::UnaryExprOrTypeTraitExprClass:
return VisitUnaryExprOrTypeTraitExpr(cast<UnaryExprOrTypeTraitExpr>(S),
asc);
case Stmt::StmtExprClass:
return VisitStmtExpr(cast<StmtExpr>(S), asc);
case Stmt::SwitchStmtClass:
return VisitSwitchStmt(cast<SwitchStmt>(S));
case Stmt::UnaryOperatorClass:
return VisitUnaryOperator(cast<UnaryOperator>(S), asc);
case Stmt::WhileStmtClass:
return VisitWhileStmt(cast<WhileStmt>(S));
}
}
CFGBlock *CFGBuilder::VisitStmt(Stmt *S, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, S)) {
autoCreateBlock();
appendStmt(Block, S);
}
return VisitChildren(S);
}
/// VisitChildren - Visit the children of a Stmt.
CFGBlock *CFGBuilder::VisitChildren(Stmt *S) {
CFGBlock *B = Block;
// Visit the children in their reverse order so that they appear in
// left-to-right (natural) order in the CFG.
reverse_children RChildren(S);
for (reverse_children::iterator I = RChildren.begin(), E = RChildren.end();
I != E; ++I) {
if (Stmt *Child = *I)
if (CFGBlock *R = Visit(Child))
B = R;
}
return B;
}
CFGBlock *CFGBuilder::VisitAddrLabelExpr(AddrLabelExpr *A,
AddStmtChoice asc) {
AddressTakenLabels.insert(A->getLabel());
if (asc.alwaysAdd(*this, A)) {
autoCreateBlock();
appendStmt(Block, A);
}
return Block;
}
CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U,
AddStmtChoice asc) {
if (asc.alwaysAdd(*this, U)) {
autoCreateBlock();
appendStmt(Block, U);
}
return Visit(U->getSubExpr(), AddStmtChoice());
}
CFGBlock *CFGBuilder::VisitLogicalOperator(BinaryOperator *B) {
CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
appendStmt(ConfluenceBlock, B);
if (badCFG)
return nullptr;
return VisitLogicalOperator(B, nullptr, ConfluenceBlock,
ConfluenceBlock).first;
}
std::pair<CFGBlock*, CFGBlock*>
CFGBuilder::VisitLogicalOperator(BinaryOperator *B,
Stmt *Term,
CFGBlock *TrueBlock,
CFGBlock *FalseBlock) {
// Introspect the RHS. If it is a nested logical operation, we recursively
// build the CFG using this function. Otherwise, resort to default
// CFG construction behavior.
Expr *RHS = B->getRHS()->IgnoreParens();
CFGBlock *RHSBlock, *ExitBlock;
do {
if (BinaryOperator *B_RHS = dyn_cast<BinaryOperator>(RHS))
if (B_RHS->isLogicalOp()) {
std::tie(RHSBlock, ExitBlock) =
VisitLogicalOperator(B_RHS, Term, TrueBlock, FalseBlock);
break;
}
// The RHS is not a nested logical operation. Don't push the terminator
// down further, but instead visit RHS and construct the respective
// pieces of the CFG, and link up the RHSBlock with the terminator
// we have been provided.
ExitBlock = RHSBlock = createBlock(false);
if (!Term) {
assert(TrueBlock == FalseBlock);
addSuccessor(RHSBlock, TrueBlock);
}
else {
RHSBlock->setTerminator(Term);
TryResult KnownVal = tryEvaluateBool(RHS);
if (!KnownVal.isKnown())
KnownVal = tryEvaluateBool(B);
addSuccessor(RHSBlock, TrueBlock, !KnownVal.isFalse());
addSuccessor(RHSBlock, FalseBlock, !KnownVal.isTrue());
}
Block = RHSBlock;
RHSBlock = addStmt(RHS);
}
while (false);
if (badCFG)
return std::make_pair(nullptr, nullptr);
// Generate the blocks for evaluating the LHS.
Expr *LHS = B->getLHS()->IgnoreParens();
if (BinaryOperator *B_LHS = dyn_cast<BinaryOperator>(LHS))
if (B_LHS->isLogicalOp()) {
if (B->getOpcode() == BO_LOr)
FalseBlock = RHSBlock;
else
TrueBlock = RHSBlock;
// For the LHS, treat 'B' as the terminator that we want to sink
// into the nested branch. The RHS always gets the top-most
// terminator.
return VisitLogicalOperator(B_LHS, B, TrueBlock, FalseBlock);
}
// Create the block evaluating the LHS.
// This contains the '&&' or '||' as the terminator.
CFGBlock *LHSBlock = createBlock(false);
LHSBlock->setTerminator(B);
Block = LHSBlock;
CFGBlock *EntryLHSBlock = addStmt(LHS);
if (badCFG)
return std::make_pair(nullptr, nullptr);
// See if this is a known constant.
TryResult KnownVal = tryEvaluateBool(LHS);
// Now link the LHSBlock with RHSBlock.
if (B->getOpcode() == BO_LOr) {
addSuccessor(LHSBlock, TrueBlock, !KnownVal.isFalse());
addSuccessor(LHSBlock, RHSBlock, !KnownVal.isTrue());
} else {
assert(B->getOpcode() == BO_LAnd);
addSuccessor(LHSBlock, RHSBlock, !KnownVal.isFalse());
addSuccessor(LHSBlock, FalseBlock, !KnownVal.isTrue());
}
return std::make_pair(EntryLHSBlock, ExitBlock);
}
CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
AddStmtChoice asc) {
// && or ||
if (B->isLogicalOp())
return VisitLogicalOperator(B);
if (B->getOpcode() == BO_Comma) { // ,
autoCreateBlock();
appendStmt(Block, B);
addStmt(B->getRHS());
return addStmt(B->getLHS());
}
if (B->isAssignmentOp()) {
if (asc.alwaysAdd(*this, B)) {
autoCreateBlock();
appendStmt(Block, B);
}
Visit(B->getLHS());
return Visit(B->getRHS());
}
if (asc.alwaysAdd(*this, B)) {
autoCreateBlock();
appendStmt(Block, B);
}
CFGBlock *RBlock = Visit(B->getRHS());
CFGBlock *LBlock = Visit(B->getLHS());
// If visiting RHS causes us to finish 'Block', e.g. the RHS is a StmtExpr
// containing a DoStmt, and the LHS doesn't create a new block, then we should
// return RBlock. Otherwise we'll incorrectly return NULL.
return (LBlock ? LBlock : RBlock);
}
CFGBlock *CFGBuilder::VisitNoRecurse(Expr *E, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, E)) {
autoCreateBlock();
appendStmt(Block, E);
}
return Block;
}
CFGBlock *CFGBuilder::VisitBreakStmt(BreakStmt *B) {
// "break" is a control-flow statement. Thus we stop processing the current
// block.
if (badCFG)
return nullptr;
// Now create a new block that ends with the break statement.
Block = createBlock(false);
Block->setTerminator(B);
// If there is no target for the break, then we are looking at an incomplete
// AST. This means that the CFG cannot be constructed.
if (BreakJumpTarget.block) {
addAutomaticObjDtors(ScopePos, BreakJumpTarget.scopePosition, B);
addSuccessor(Block, BreakJumpTarget.block);
} else
badCFG = true;
return Block;
}
static bool CanThrow(Expr *E, ASTContext &Ctx) {
QualType Ty = E->getType();
if (Ty->isFunctionPointerType())
Ty = Ty->getAs<PointerType>()->getPointeeType();
else if (Ty->isBlockPointerType())
Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
const FunctionType *FT = Ty->getAs<FunctionType>();
if (FT) {
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
if (!isUnresolvedExceptionSpec(Proto->getExceptionSpecType()) &&
Proto->isNothrow(Ctx))
return false;
}
return true;
}
CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
// Compute the callee type.
QualType calleeType = C->getCallee()->getType();
if (calleeType == Context->BoundMemberTy) {
QualType boundType = Expr::findBoundMemberType(C->getCallee());
// We should only get a null bound type if processing a dependent
// CFG. Recover by assuming nothing.
if (!boundType.isNull()) calleeType = boundType;
}
// If this is a call to a no-return function, this stops the block here.
bool NoReturn = getFunctionExtInfo(*calleeType).getNoReturn();
bool AddEHEdge = false;
// Languages without exceptions are assumed to not throw.
if (Context->getLangOpts().Exceptions) {
if (BuildOpts.AddEHEdges)
AddEHEdge = true;
}
// If this is a call to a builtin function, it might not actually evaluate
// its arguments. Don't add them to the CFG if this is the case.
bool OmitArguments = false;
if (FunctionDecl *FD = C->getDirectCallee()) {
if (FD->isNoReturn())
NoReturn = true;
if (FD->hasAttr<NoThrowAttr>())
AddEHEdge = false;
if (FD->getBuiltinID() == Builtin::BI__builtin_object_size)
OmitArguments = true;
}
if (!CanThrow(C->getCallee(), *Context))
AddEHEdge = false;
if (OmitArguments) {
assert(!NoReturn && "noreturn calls with unevaluated args not implemented");
assert(!AddEHEdge && "EH calls with unevaluated args not implemented");
autoCreateBlock();
appendStmt(Block, C);
return Visit(C->getCallee());
}
if (!NoReturn && !AddEHEdge) {
return VisitStmt(C, asc.withAlwaysAdd(true));
}
if (Block) {
Succ = Block;
if (badCFG)
return nullptr;
}
if (NoReturn)
Block = createNoReturnBlock();
else
Block = createBlock();
appendStmt(Block, C);
if (AddEHEdge) {
// Add exceptional edges.
if (TryTerminatedBlock)
addSuccessor(Block, TryTerminatedBlock);
else
addSuccessor(Block, &cfg->getExit());
}
return VisitChildren(C);
}
CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
AddStmtChoice asc) {
CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
appendStmt(ConfluenceBlock, C);
if (badCFG)
return nullptr;
AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
Succ = ConfluenceBlock;
Block = nullptr;
CFGBlock *LHSBlock = Visit(C->getLHS(), alwaysAdd);
if (badCFG)
return nullptr;
Succ = ConfluenceBlock;
Block = nullptr;
CFGBlock *RHSBlock = Visit(C->getRHS(), alwaysAdd);
if (badCFG)
return nullptr;
Block = createBlock(false);
// See if this is a known constant.
const TryResult& KnownVal = tryEvaluateBool(C->getCond());
addSuccessor(Block, KnownVal.isFalse() ? nullptr : LHSBlock);
addSuccessor(Block, KnownVal.isTrue() ? nullptr : RHSBlock);
Block->setTerminator(C);
return addStmt(C->getCond());
}
CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C) {
addLocalScopeAndDtors(C);
CFGBlock *LastBlock = Block;
for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
I != E; ++I ) {
// If we hit a segment of code just containing ';' (NullStmts), we can
// get a null block back. In such cases, just use the LastBlock
if (CFGBlock *newBlock = addStmt(*I))
LastBlock = newBlock;
if (badCFG)
return nullptr;
}
return LastBlock;
}
CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C,
AddStmtChoice asc) {
const BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(C);
const OpaqueValueExpr *opaqueValue = (BCO ? BCO->getOpaqueValue() : nullptr);
// Create the confluence block that will "merge" the results of the ternary
// expression.
CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
appendStmt(ConfluenceBlock, C);
if (badCFG)
return nullptr;
AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
// Create a block for the LHS expression if there is an LHS expression. A
// GCC extension allows LHS to be NULL, causing the condition to be the
// value that is returned instead.
// e.g: x ?: y is shorthand for: x ? x : y;
Succ = ConfluenceBlock;
Block = nullptr;
CFGBlock *LHSBlock = nullptr;
const Expr *trueExpr = C->getTrueExpr();
if (trueExpr != opaqueValue) {
LHSBlock = Visit(C->getTrueExpr(), alwaysAdd);
if (badCFG)
return nullptr;
Block = nullptr;
}
else
LHSBlock = ConfluenceBlock;
// Create the block for the RHS expression.
Succ = ConfluenceBlock;
CFGBlock *RHSBlock = Visit(C->getFalseExpr(), alwaysAdd);
if (badCFG)
return nullptr;
// If the condition is a logical '&&' or '||', build a more accurate CFG.
if (BinaryOperator *Cond =
dyn_cast<BinaryOperator>(C->getCond()->IgnoreParens()))
if (Cond->isLogicalOp())
return VisitLogicalOperator(Cond, C, LHSBlock, RHSBlock).first;
// Create the block that will contain the condition.
Block = createBlock(false);
// See if this is a known constant.
const TryResult& KnownVal = tryEvaluateBool(C->getCond());
addSuccessor(Block, LHSBlock, !KnownVal.isFalse());
addSuccessor(Block, RHSBlock, !KnownVal.isTrue());
Block->setTerminator(C);
Expr *condExpr = C->getCond();
if (opaqueValue) {
// Run the condition expression if it's not trivially expressed in
// terms of the opaque value (or if there is no opaque value).
if (condExpr != opaqueValue)
addStmt(condExpr);
// Before that, run the common subexpression if there was one.
// At least one of this or the above will be run.
return addStmt(BCO->getCommon());
}
return addStmt(condExpr);
}
CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
// Check if the Decl is for an __label__. If so, elide it from the
// CFG entirely.
if (isa<LabelDecl>(*DS->decl_begin()))
return Block;
// This case also handles static_asserts.
if (DS->isSingleDecl())
return VisitDeclSubExpr(DS);
CFGBlock *B = nullptr;
// Build an individual DeclStmt for each decl.
for (DeclStmt::reverse_decl_iterator I = DS->decl_rbegin(),
E = DS->decl_rend();
I != E; ++I) {
// Get the alignment of the new DeclStmt, padding out to >=8 bytes.
unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
? 8 : llvm::AlignOf<DeclStmt>::Alignment;
// Allocate the DeclStmt using the BumpPtrAllocator. It will get
// automatically freed with the CFG.
DeclGroupRef DG(*I);
Decl *D = *I;
void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
cfg->addSyntheticDeclStmt(DSNew, DS);
// Append the fake DeclStmt to block.
B = VisitDeclSubExpr(DSNew);
}
return B;
}
/// VisitDeclSubExpr - Utility method to add block-level expressions for
/// DeclStmts and initializers in them.
CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
assert(DS->isSingleDecl() && "Can handle single declarations only.");
VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
if (!VD) {
// Of everything that can be declared in a DeclStmt, only VarDecls impact
// runtime semantics.
return Block;
}
bool HasTemporaries = false;
// Guard static initializers under a branch.
CFGBlock *blockAfterStaticInit = nullptr;
if (BuildOpts.AddStaticInitBranches && VD->isStaticLocal()) {
// For static variables, we need to create a branch to track
// whether or not they are initialized.
if (Block) {
Succ = Block;
Block = nullptr;
if (badCFG)
return nullptr;
}
blockAfterStaticInit = Succ;
}
// Destructors of temporaries in initialization expression should be called
// after initialization finishes.
Expr *Init = VD->getInit();
if (Init) {
HasTemporaries = isa<ExprWithCleanups>(Init);
if (BuildOpts.AddTemporaryDtors && HasTemporaries) {
// Generate destructors for temporaries in initialization expression.
TempDtorContext Context;
VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
/*BindToTemporary=*/false, Context);
}
}
autoCreateBlock();
appendStmt(Block, DS);
// Keep track of the last non-null block, as 'Block' can be nulled out
// if the initializer expression is something like a 'while' in a
// statement-expression.
CFGBlock *LastBlock = Block;
if (Init) {
if (HasTemporaries) {
// For expression with temporaries go directly to subexpression to omit
// generating destructors for the second time.
ExprWithCleanups *EC = cast<ExprWithCleanups>(Init);
if (CFGBlock *newBlock = Visit(EC->getSubExpr()))
LastBlock = newBlock;
}
else {
if (CFGBlock *newBlock = Visit(Init))
LastBlock = newBlock;
}
}
// If the type of VD is a VLA, then we must process its size expressions.
for (const VariableArrayType* VA = FindVA(VD->getType().getTypePtr());
VA != nullptr; VA = FindVA(VA->getElementType().getTypePtr())) {
if (CFGBlock *newBlock = addStmt(VA->getSizeExpr()))
LastBlock = newBlock;
}
// Remove variable from local scope.
if (ScopePos && VD == *ScopePos)
++ScopePos;
CFGBlock *B = LastBlock;
if (blockAfterStaticInit) {
Succ = B;
Block = createBlock(false);
Block->setTerminator(DS);
addSuccessor(Block, blockAfterStaticInit);
addSuccessor(Block, B);
B = Block;
}
return B;
}
CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
// We may see an if statement in the middle of a basic block, or it may be the
// first statement we are processing. In either case, we create a new basic
// block. First, we create the blocks for the then...else statements, and
// then we create the block containing the if statement. If we were in the
// middle of a block, we stop processing that block. That block is then the
// implicit successor for the "then" and "else" clauses.
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Create local scope for possible condition variable.
// Store scope position. Add implicit destructor.
if (VarDecl *VD = I->getConditionVariable()) {
LocalScope::const_iterator BeginScopePos = ScopePos;
addLocalScopeForVarDecl(VD);
addAutomaticObjDtors(ScopePos, BeginScopePos, I);
}
// The block we were processing is now finished. Make it the successor
// block.
if (Block) {
Succ = Block;
if (badCFG)
return nullptr;
}
// Process the false branch.
CFGBlock *ElseBlock = Succ;
if (Stmt *Else = I->getElse()) {
SaveAndRestore<CFGBlock*> sv(Succ);
// NULL out Block so that the recursive call to Visit will
// create a new basic block.
Block = nullptr;
// If branch is not a compound statement create implicit scope
// and add destructors.
if (!isa<CompoundStmt>(Else))
addLocalScopeAndDtors(Else);
ElseBlock = addStmt(Else);
if (!ElseBlock) // Can occur when the Else body has all NullStmts.
ElseBlock = sv.get();
else if (Block) {
if (badCFG)
return nullptr;
}
}
// Process the true branch.
CFGBlock *ThenBlock;
{
Stmt *Then = I->getThen();
assert(Then);
SaveAndRestore<CFGBlock*> sv(Succ);
Block = nullptr;
// If branch is not a compound statement create implicit scope
// and add destructors.
if (!isa<CompoundStmt>(Then))
addLocalScopeAndDtors(Then);
ThenBlock = addStmt(Then);
if (!ThenBlock) {
// We can reach here if the "then" body has all NullStmts.
// Create an empty block so we can distinguish between true and false
// branches in path-sensitive analyses.
ThenBlock = createBlock(false);
addSuccessor(ThenBlock, sv.get());
} else if (Block) {
if (badCFG)
return nullptr;
}
}
// Specially handle "if (expr1 || ...)" and "if (expr1 && ...)" by
// having these handle the actual control-flow jump. Note that
// if we introduce a condition variable, e.g. "if (int x = exp1 || exp2)"
// we resort to the old control-flow behavior. This special handling
// removes infeasible paths from the control-flow graph by having the
// control-flow transfer of '&&' or '||' go directly into the then/else
// blocks directly.
if (!I->getConditionVariable())
if (BinaryOperator *Cond =
dyn_cast<BinaryOperator>(I->getCond()->IgnoreParens()))
if (Cond->isLogicalOp())
return VisitLogicalOperator(Cond, I, ThenBlock, ElseBlock).first;
// Now create a new block containing the if statement.
Block = createBlock(false);
// Set the terminator of the new block to the If statement.
Block->setTerminator(I);
// See if this is a known constant.
const TryResult &KnownVal = tryEvaluateBool(I->getCond());
// Add the successors. If we know that specific branches are
// unreachable, inform addSuccessor() of that knowledge.
addSuccessor(Block, ThenBlock, /* isReachable = */ !KnownVal.isFalse());
addSuccessor(Block, ElseBlock, /* isReachable = */ !KnownVal.isTrue());
// Add the condition as the last statement in the new block. This may create
// new blocks as the condition may contain control-flow. Any newly created
// blocks will be pointed to be "Block".
CFGBlock *LastBlock = addStmt(I->getCond());
// Finally, if the IfStmt contains a condition variable, add it and its
// initializer to the CFG.
if (const DeclStmt* DS = I->getConditionVariableDeclStmt()) {
autoCreateBlock();
LastBlock = addStmt(const_cast<DeclStmt *>(DS));
}
return LastBlock;
}
CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
// If we were in the middle of a block we stop processing that block.
//
// NOTE: If a "return" appears in the middle of a block, this means that the
// code afterwards is DEAD (unreachable). We still keep a basic block
// for that code; a simple "mark-and-sweep" from the entry block will be
// able to report such dead blocks.
// Create the new block.
Block = createBlock(false);
addAutomaticObjDtors(ScopePos, LocalScope::const_iterator(), R);
// If the one of the destructors does not return, we already have the Exit
// block as a successor.
if (!Block->hasNoReturnElement())
addSuccessor(Block, &cfg->getExit());
// Add the return statement to the block. This may create new blocks if R
// contains control-flow (short-circuit operations).
return VisitStmt(R, AddStmtChoice::AlwaysAdd);
}
CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
// Get the block of the labeled statement. Add it to our map.
addStmt(L->getSubStmt());
CFGBlock *LabelBlock = Block;
if (!LabelBlock) // This can happen when the body is empty, i.e.
LabelBlock = createBlock(); // scopes that only contains NullStmts.
assert(LabelMap.find(L->getDecl()) == LabelMap.end() &&
"label already in map");
LabelMap[L->getDecl()] = JumpTarget(LabelBlock, ScopePos);
// Labels partition blocks, so this is the end of the basic block we were
// processing (L is the block's label). Because this is label (and we have
// already processed the substatement) there is no extra control-flow to worry
// about.
LabelBlock->setLabel(L);
if (badCFG)
return nullptr;
// We set Block to NULL to allow lazy creation of a new block (if necessary);
Block = nullptr;
// This block is now the implicit successor of other blocks.
Succ = LabelBlock;
return LabelBlock;
}
CFGBlock *CFGBuilder::VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc) {
CFGBlock *LastBlock = VisitNoRecurse(E, asc);
for (LambdaExpr::capture_init_iterator it = E->capture_init_begin(),
et = E->capture_init_end(); it != et; ++it) {
if (Expr *Init = *it) {
CFGBlock *Tmp = Visit(Init);
if (Tmp)
LastBlock = Tmp;
}
}
return LastBlock;
}
CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) {
// Goto is a control-flow statement. Thus we stop processing the current
// block and create a new one.
Block = createBlock(false);
Block->setTerminator(G);
// If we already know the mapping to the label block add the successor now.
LabelMapTy::iterator I = LabelMap.find(G->getLabel());
if (I == LabelMap.end())
// We will need to backpatch this block later.
BackpatchBlocks.push_back(JumpSource(Block, ScopePos));
else {
JumpTarget JT = I->second;
addAutomaticObjDtors(ScopePos, JT.scopePosition, G);
addSuccessor(Block, JT.block);
}
return Block;
}
CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
CFGBlock *LoopSuccessor = nullptr;
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Create local scope for init statement and possible condition variable.
// Add destructor for init statement and condition variable.
// Store scope position for continue statement.
if (Stmt *Init = F->getInit())
addLocalScopeForStmt(Init);
LocalScope::const_iterator LoopBeginScopePos = ScopePos;
if (VarDecl *VD = F->getConditionVariable())
addLocalScopeForVarDecl(VD);
LocalScope::const_iterator ContinueScopePos = ScopePos;
addAutomaticObjDtors(ScopePos, save_scope_pos.get(), F);
// "for" is a control-flow statement. Thus we stop processing the current
// block.
if (Block) {
if (badCFG)
return nullptr;
LoopSuccessor = Block;
} else
LoopSuccessor = Succ;
// Save the current value for the break targets.
// All breaks should go to the code following the loop.
SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
CFGBlock *BodyBlock = nullptr, *TransitionBlock = nullptr;
// Now create the loop body.
{
assert(F->getBody());
// Save the current values for Block, Succ, continue and break targets.
SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
// Create an empty block to represent the transition block for looping back
// to the head of the loop. If we have increment code, it will
// go in this block as well.
Block = Succ = TransitionBlock = createBlock(false);
TransitionBlock->setLoopTarget(F);
if (Stmt *I = F->getInc()) {
// Generate increment code in its own basic block. This is the target of
// continue statements.
Succ = addStmt(I);
}
// Finish up the increment (or empty) block if it hasn't been already.
if (Block) {
assert(Block == Succ);
if (badCFG)
return nullptr;
Block = nullptr;
}
// The starting block for the loop increment is the block that should
// represent the 'loop target' for looping back to the start of the loop.
ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
ContinueJumpTarget.block->setLoopTarget(F);
// Loop body should end with destructor of Condition variable (if any).
addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
// If body is not a compound statement create implicit scope
// and add destructors.
if (!isa<CompoundStmt>(F->getBody()))
addLocalScopeAndDtors(F->getBody());
// Now populate the body block, and in the process create new blocks as we
// walk the body of the loop.
BodyBlock = addStmt(F->getBody());
if (!BodyBlock) {
// In the case of "for (...;...;...);" we can have a null BodyBlock.
// Use the continue jump target as the proxy for the body.
BodyBlock = ContinueJumpTarget.block;
}
else if (badCFG)
return nullptr;
}
// Because of short-circuit evaluation, the condition of the loop can span
// multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
// evaluate the condition.
CFGBlock *EntryConditionBlock = nullptr, *ExitConditionBlock = nullptr;
do {
Expr *C = F->getCond();
// Specially handle logical operators, which have a slightly
// more optimal CFG representation.
if (BinaryOperator *Cond =
dyn_cast_or_null<BinaryOperator>(C ? C->IgnoreParens() : nullptr))
if (Cond->isLogicalOp()) {
std::tie(EntryConditionBlock, ExitConditionBlock) =
VisitLogicalOperator(Cond, F, BodyBlock, LoopSuccessor);
break;
}
// The default case when not handling logical operators.
EntryConditionBlock = ExitConditionBlock = createBlock(false);
ExitConditionBlock->setTerminator(F);
// See if this is a known constant.
TryResult KnownVal(true);
if (C) {
// Now add the actual condition to the condition block.
// Because the condition itself may contain control-flow, new blocks may
// be created. Thus we update "Succ" after adding the condition.
Block = ExitConditionBlock;
EntryConditionBlock = addStmt(C);
// If this block contains a condition variable, add both the condition
// variable and initializer to the CFG.
if (VarDecl *VD = F->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
appendStmt(Block, F->getConditionVariableDeclStmt());
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
}
}
if (Block && badCFG)
return nullptr;
KnownVal = tryEvaluateBool(C);
}
// Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? nullptr : BodyBlock);
// Link up the condition block with the code that follows the loop. (the
// false branch).
addSuccessor(ExitConditionBlock,
KnownVal.isTrue() ? nullptr : LoopSuccessor);
} while (false);
// Link up the loop-back block to the entry condition block.
addSuccessor(TransitionBlock, EntryConditionBlock);
// The condition block is the implicit successor for any code above the loop.
Succ = EntryConditionBlock;
// If the loop contains initialization, create a new block for those
// statements. This block can also contain statements that precede the loop.
if (Stmt *I = F->getInit()) {
Block = createBlock();
return addStmt(I);
}
// There is no loop initialization. We are thus basically a while loop.
// NULL out Block to force lazy block construction.
Block = nullptr;
Succ = EntryConditionBlock;
return EntryConditionBlock;
}
CFGBlock *CFGBuilder::VisitMemberExpr(MemberExpr *M, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, M)) {
autoCreateBlock();
appendStmt(Block, M);
}
return Visit(M->getBase());
}
CFGBlock *CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
// Objective-C fast enumeration 'for' statements:
// http://developer.apple.com/documentation/Cocoa/Conceptual/ObjectiveC
//
// for ( Type newVariable in collection_expression ) { statements }
//
// becomes:
//
// prologue:
// 1. collection_expression
// T. jump to loop_entry
// loop_entry:
// 1. side-effects of element expression
// 1. ObjCForCollectionStmt [performs binding to newVariable]
// T. ObjCForCollectionStmt TB, FB [jumps to TB if newVariable != nil]
// TB:
// statements
// T. jump to loop_entry
// FB:
// what comes after
//
// and
//
// Type existingItem;
// for ( existingItem in expression ) { statements }
//
// becomes:
//
// the same with newVariable replaced with existingItem; the binding works
// the same except that for one ObjCForCollectionStmt::getElement() returns
// a DeclStmt and the other returns a DeclRefExpr.
//
CFGBlock *LoopSuccessor = nullptr;
if (Block) {
if (badCFG)
return nullptr;
LoopSuccessor = Block;
Block = nullptr;
} else
LoopSuccessor = Succ;
// Build the condition blocks.
CFGBlock *ExitConditionBlock = createBlock(false);
// Set the terminator for the "exit" condition block.
ExitConditionBlock->setTerminator(S);
// The last statement in the block should be the ObjCForCollectionStmt, which
// performs the actual binding to 'element' and determines if there are any
// more items in the collection.
appendStmt(ExitConditionBlock, S);
Block = ExitConditionBlock;
// Walk the 'element' expression to see if there are any side-effects. We
// generate new blocks as necessary. We DON'T add the statement by default to
// the CFG unless it contains control-flow.
CFGBlock *EntryConditionBlock = Visit(S->getElement(),
AddStmtChoice::NotAlwaysAdd);
if (Block) {
if (badCFG)
return nullptr;
Block = nullptr;
}
// The condition block is the implicit successor for the loop body as well as
// any code above the loop.
Succ = EntryConditionBlock;
// Now create the true branch.
{
// Save the current values for Succ, continue and break targets.
SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
save_break(BreakJumpTarget);
// Add an intermediate block between the BodyBlock and the
// EntryConditionBlock to represent the "loop back" transition, for looping
// back to the head of the loop.
CFGBlock *LoopBackBlock = nullptr;
Succ = LoopBackBlock = createBlock();
LoopBackBlock->setLoopTarget(S);
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
ContinueJumpTarget = JumpTarget(Succ, ScopePos);
CFGBlock *BodyBlock = addStmt(S->getBody());
if (!BodyBlock)
BodyBlock = ContinueJumpTarget.block; // can happen for "for (X in Y) ;"
else if (Block) {
if (badCFG)
return nullptr;
}
// This new body block is a successor to our "exit" condition block.
addSuccessor(ExitConditionBlock, BodyBlock);
}
// Link up the condition block with the code that follows the loop.
// (the false branch).
addSuccessor(ExitConditionBlock, LoopSuccessor);
// Now create a prologue block to contain the collection expression.
Block = createBlock();
return addStmt(S->getCollection());
}
CFGBlock *CFGBuilder::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
// Inline the body.
return addStmt(S->getSubStmt());
// TODO: consider adding cleanups for the end of @autoreleasepool scope.
}
CFGBlock *CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
// FIXME: Add locking 'primitives' to CFG for @synchronized.
// Inline the body.
CFGBlock *SyncBlock = addStmt(S->getSynchBody());
// The sync body starts its own basic block. This makes it a little easier
// for diagnostic clients.
if (SyncBlock) {
if (badCFG)
return nullptr;
Block = nullptr;
Succ = SyncBlock;
}
// Add the @synchronized to the CFG.
autoCreateBlock();
appendStmt(Block, S);
// Inline the sync expression.
return addStmt(S->getSynchExpr());
}
CFGBlock *CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
// FIXME
return NYS();
}
CFGBlock *CFGBuilder::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
autoCreateBlock();
// Add the PseudoObject as the last thing.
appendStmt(Block, E);
CFGBlock *lastBlock = Block;
// Before that, evaluate all of the semantics in order. In
// CFG-land, that means appending them in reverse order.
for (unsigned i = E->getNumSemanticExprs(); i != 0; ) {
Expr *Semantic = E->getSemanticExpr(--i);
// If the semantic is an opaque value, we're being asked to bind
// it to its source expression.
if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
Semantic = OVE->getSourceExpr();
if (CFGBlock *B = Visit(Semantic))
lastBlock = B;
}
return lastBlock;
}
CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
CFGBlock *LoopSuccessor = nullptr;
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Create local scope for possible condition variable.
// Store scope position for continue statement.
LocalScope::const_iterator LoopBeginScopePos = ScopePos;
if (VarDecl *VD = W->getConditionVariable()) {
addLocalScopeForVarDecl(VD);
addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
}
// "while" is a control-flow statement. Thus we stop processing the current
// block.
if (Block) {
if (badCFG)
return nullptr;
LoopSuccessor = Block;
Block = nullptr;
} else {
LoopSuccessor = Succ;
}
CFGBlock *BodyBlock = nullptr, *TransitionBlock = nullptr;
// Process the loop body.
{
assert(W->getBody());
// Save the current values for Block, Succ, continue and break targets.
SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
save_break(BreakJumpTarget);
// Create an empty block to represent the transition block for looping back
// to the head of the loop.
Succ = TransitionBlock = createBlock(false);
TransitionBlock->setLoopTarget(W);
ContinueJumpTarget = JumpTarget(Succ, LoopBeginScopePos);
// All breaks should go to the code following the loop.
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
// Loop body should end with destructor of Condition variable (if any).
addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
// If body is not a compound statement create implicit scope
// and add destructors.
if (!isa<CompoundStmt>(W->getBody()))
addLocalScopeAndDtors(W->getBody());
// Create the body. The returned block is the entry to the loop body.
BodyBlock = addStmt(W->getBody());
if (!BodyBlock)
BodyBlock = ContinueJumpTarget.block; // can happen for "while(...) ;"
else if (Block && badCFG)
return nullptr;
}
// Because of short-circuit evaluation, the condition of the loop can span
// multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
// evaluate the condition.
CFGBlock *EntryConditionBlock = nullptr, *ExitConditionBlock = nullptr;
do {
Expr *C = W->getCond();
// Specially handle logical operators, which have a slightly
// more optimal CFG representation.
if (BinaryOperator *Cond = dyn_cast<BinaryOperator>(C->IgnoreParens()))
if (Cond->isLogicalOp()) {
std::tie(EntryConditionBlock, ExitConditionBlock) =
VisitLogicalOperator(Cond, W, BodyBlock, LoopSuccessor);
break;
}
// The default case when not handling logical operators.
ExitConditionBlock = createBlock(false);
ExitConditionBlock->setTerminator(W);
// Now add the actual condition to the condition block.
// Because the condition itself may contain control-flow, new blocks may
// be created. Thus we update "Succ" after adding the condition.
Block = ExitConditionBlock;
Block = EntryConditionBlock = addStmt(C);
// If this block contains a condition variable, add both the condition
// variable and initializer to the CFG.
if (VarDecl *VD = W->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
appendStmt(Block, W->getConditionVariableDeclStmt());
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
}
}
if (Block && badCFG)
return nullptr;
// See if this is a known constant.
const TryResult& KnownVal = tryEvaluateBool(C);
// Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? nullptr : BodyBlock);
// Link up the condition block with the code that follows the loop. (the
// false branch).
addSuccessor(ExitConditionBlock,
KnownVal.isTrue() ? nullptr : LoopSuccessor);
} while(false);
// Link up the loop-back block to the entry condition block.
addSuccessor(TransitionBlock, EntryConditionBlock);
// There can be no more statements in the condition block since we loop back
// to this block. NULL out Block to force lazy creation of another block.
Block = nullptr;
// Return the condition block, which is the dominating block for the loop.
Succ = EntryConditionBlock;
return EntryConditionBlock;
}
CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
// FIXME: For now we pretend that @catch and the code it contains does not
// exit.
return Block;
}
CFGBlock *CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
// FIXME: This isn't complete. We basically treat @throw like a return
// statement.
// If we were in the middle of a block we stop processing that block.
if (badCFG)
return nullptr;
// Create the new block.
Block = createBlock(false);
// The Exit block is the only successor.
addSuccessor(Block, &cfg->getExit());
// Add the statement to the block. This may create new blocks if S contains
// control-flow (short-circuit operations).
return VisitStmt(S, AddStmtChoice::AlwaysAdd);
}
CFGBlock *CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr *T) {
// If we were in the middle of a block we stop processing that block.
if (badCFG)
return nullptr;
// Create the new block.
Block = createBlock(false);
if (TryTerminatedBlock)
// The current try statement is the only successor.
addSuccessor(Block, TryTerminatedBlock);
else
// otherwise the Exit block is the only successor.
addSuccessor(Block, &cfg->getExit());
// Add the statement to the block. This may create new blocks if S contains
// control-flow (short-circuit operations).
return VisitStmt(T, AddStmtChoice::AlwaysAdd);
}
CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) {
CFGBlock *LoopSuccessor = nullptr;
// "do...while" is a control-flow statement. Thus we stop processing the
// current block.
if (Block) {
if (badCFG)
return nullptr;
LoopSuccessor = Block;
} else
LoopSuccessor = Succ;
// Because of short-circuit evaluation, the condition of the loop can span
// multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
// evaluate the condition.
CFGBlock *ExitConditionBlock = createBlock(false);
CFGBlock *EntryConditionBlock = ExitConditionBlock;
// Set the terminator for the "exit" condition block.
ExitConditionBlock->setTerminator(D);
// Now add the actual condition to the condition block. Because the condition
// itself may contain control-flow, new blocks may be created.
if (Stmt *C = D->getCond()) {
Block = ExitConditionBlock;
EntryConditionBlock = addStmt(C);
if (Block) {
if (badCFG)
return nullptr;
}
}
// The condition block is the implicit successor for the loop body.
Succ = EntryConditionBlock;
// See if this is a known constant.
const TryResult &KnownVal = tryEvaluateBool(D->getCond());
// Process the loop body.
CFGBlock *BodyBlock = nullptr;
{
assert(D->getBody());
// Save the current values for Block, Succ, and continue and break targets
SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
save_break(BreakJumpTarget);
// All continues within this loop should go to the condition block
ContinueJumpTarget = JumpTarget(EntryConditionBlock, ScopePos);
// All breaks should go to the code following the loop.
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
// NULL out Block to force lazy instantiation of blocks for the body.
Block = nullptr;
// If body is not a compound statement create implicit scope
// and add destructors.
if (!isa<CompoundStmt>(D->getBody()))
addLocalScopeAndDtors(D->getBody());
// Create the body. The returned block is the entry to the loop body.
BodyBlock = addStmt(D->getBody());
if (!BodyBlock)
BodyBlock = EntryConditionBlock; // can happen for "do ; while(...)"
else if (Block) {
if (badCFG)
return nullptr;
}
if (!KnownVal.isFalse()) {
// Add an intermediate block between the BodyBlock and the
// ExitConditionBlock to represent the "loop back" transition. Create an
// empty block to represent the transition block for looping back to the
// head of the loop.
// FIXME: Can we do this more efficiently without adding another block?
Block = nullptr;
Succ = BodyBlock;
CFGBlock *LoopBackBlock = createBlock();
LoopBackBlock->setLoopTarget(D);
// Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, LoopBackBlock);
}
else
addSuccessor(ExitConditionBlock, nullptr);
}
// Link up the condition block with the code that follows the loop.
// (the false branch).
addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? nullptr : LoopSuccessor);
// There can be no more statements in the body block(s) since we loop back to
// the body. NULL out Block to force lazy creation of another block.
Block = nullptr;
// Return the loop body, which is the dominating block for the loop.
Succ = BodyBlock;
return BodyBlock;
}
CFGBlock *CFGBuilder::VisitContinueStmt(ContinueStmt *C) {
// "continue" is a control-flow statement. Thus we stop processing the
// current block.
if (badCFG)
return nullptr;
// Now create a new block that ends with the continue statement.
Block = createBlock(false);
Block->setTerminator(C);
// If there is no target for the continue, then we are looking at an
// incomplete AST. This means the CFG cannot be constructed.
if (ContinueJumpTarget.block) {
addAutomaticObjDtors(ScopePos, ContinueJumpTarget.scopePosition, C);
addSuccessor(Block, ContinueJumpTarget.block);
} else
badCFG = true;
return Block;
}
CFGBlock *CFGBuilder::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
AddStmtChoice asc) {
if (asc.alwaysAdd(*this, E)) {
autoCreateBlock();
appendStmt(Block, E);
}
// VLA types have expressions that must be evaluated.
CFGBlock *lastBlock = Block;
if (E->isArgumentType()) {
for (const VariableArrayType *VA =FindVA(E->getArgumentType().getTypePtr());
VA != nullptr; VA = FindVA(VA->getElementType().getTypePtr()))
lastBlock = addStmt(VA->getSizeExpr());
}
return lastBlock;
}
/// VisitStmtExpr - Utility method to handle (nested) statement
/// expressions (a GCC extension).
CFGBlock *CFGBuilder::VisitStmtExpr(StmtExpr *SE, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, SE)) {
autoCreateBlock();
appendStmt(Block, SE);
}
return VisitCompoundStmt(SE->getSubStmt());
}
CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
// "switch" is a control-flow statement. Thus we stop processing the current
// block.
CFGBlock *SwitchSuccessor = nullptr;
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Create local scope for possible condition variable.
// Store scope position. Add implicit destructor.
if (VarDecl *VD = Terminator->getConditionVariable()) {
LocalScope::const_iterator SwitchBeginScopePos = ScopePos;
addLocalScopeForVarDecl(VD);
addAutomaticObjDtors(ScopePos, SwitchBeginScopePos, Terminator);
}
if (Block) {
if (badCFG)
return nullptr;
SwitchSuccessor = Block;
} else SwitchSuccessor = Succ;
// Save the current "switch" context.
SaveAndRestore<CFGBlock*> save_switch(SwitchTerminatedBlock),
save_default(DefaultCaseBlock);
SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
// Set the "default" case to be the block after the switch statement. If the
// switch statement contains a "default:", this value will be overwritten with
// the block for that code.
DefaultCaseBlock = SwitchSuccessor;
// Create a new block that will contain the switch statement.
SwitchTerminatedBlock = createBlock(false);
// Now process the switch body. The code after the switch is the implicit
// successor.
Succ = SwitchSuccessor;
BreakJumpTarget = JumpTarget(SwitchSuccessor, ScopePos);
// When visiting the body, the case statements should automatically get linked
// up to the switch. We also don't keep a pointer to the body, since all
// control-flow from the switch goes to case/default statements.
assert(Terminator->getBody() && "switch must contain a non-NULL body");
Block = nullptr;
// For pruning unreachable case statements, save the current state
// for tracking the condition value.
SaveAndRestore<bool> save_switchExclusivelyCovered(switchExclusivelyCovered,
false);
// Determine if the switch condition can be explicitly evaluated.
assert(Terminator->getCond() && "switch condition must be non-NULL");
Expr::EvalResult result;
bool b = tryEvaluate(Terminator->getCond(), result);
SaveAndRestore<Expr::EvalResult*> save_switchCond(switchCond,
b ? &result : nullptr);
// If body is not a compound statement create implicit scope
// and add destructors.
if (!isa<CompoundStmt>(Terminator->getBody()))
addLocalScopeAndDtors(Terminator->getBody());
addStmt(Terminator->getBody());
if (Block) {
if (badCFG)
return nullptr;
}
// If we have no "default:" case, the default transition is to the code
// following the switch body. Moreover, take into account if all the
// cases of a switch are covered (e.g., switching on an enum value).
//
// Note: We add a successor to a switch that is considered covered yet has no
// case statements if the enumeration has no enumerators.
bool SwitchAlwaysHasSuccessor = false;
SwitchAlwaysHasSuccessor |= switchExclusivelyCovered;
SwitchAlwaysHasSuccessor |= Terminator->isAllEnumCasesCovered() &&
Terminator->getSwitchCaseList();
addSuccessor(SwitchTerminatedBlock, DefaultCaseBlock,
!SwitchAlwaysHasSuccessor);
// Add the terminator and condition in the switch block.
SwitchTerminatedBlock->setTerminator(Terminator);
Block = SwitchTerminatedBlock;
CFGBlock *LastBlock = addStmt(Terminator->getCond());
// Finally, if the SwitchStmt contains a condition variable, add both the
// SwitchStmt and the condition variable initialization to the CFG.
if (VarDecl *VD = Terminator->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
appendStmt(Block, Terminator->getConditionVariableDeclStmt());
LastBlock = addStmt(Init);
}
}
return LastBlock;
}
static bool shouldAddCase(bool &switchExclusivelyCovered,
const Expr::EvalResult *switchCond,
const CaseStmt *CS,
ASTContext &Ctx) {
if (!switchCond)
return true;
bool addCase = false;
if (!switchExclusivelyCovered) {
if (switchCond->Val.isInt()) {
// Evaluate the LHS of the case value.
const llvm::APSInt &lhsInt = CS->getLHS()->EvaluateKnownConstInt(Ctx);
const llvm::APSInt &condInt = switchCond->Val.getInt();
if (condInt == lhsInt) {
addCase = true;
switchExclusivelyCovered = true;
}
else if (condInt < lhsInt) {
if (const Expr *RHS = CS->getRHS()) {
// Evaluate the RHS of the case value.
const llvm::APSInt &V2 = RHS->EvaluateKnownConstInt(Ctx);
if (V2 <= condInt) {
addCase = true;
switchExclusivelyCovered = true;
}
}
}
}
else
addCase = true;
}
return addCase;
}
CFGBlock *CFGBuilder::VisitCaseStmt(CaseStmt *CS) {
// CaseStmts are essentially labels, so they are the first statement in a
// block.
CFGBlock *TopBlock = nullptr, *LastBlock = nullptr;
if (Stmt *Sub = CS->getSubStmt()) {
// For deeply nested chains of CaseStmts, instead of doing a recursion
// (which can blow out the stack), manually unroll and create blocks
// along the way.
while (isa<CaseStmt>(Sub)) {
CFGBlock *currentBlock = createBlock(false);
currentBlock->setLabel(CS);
if (TopBlock)
addSuccessor(LastBlock, currentBlock);
else
TopBlock = currentBlock;
addSuccessor(SwitchTerminatedBlock,
shouldAddCase(switchExclusivelyCovered, switchCond,
CS, *Context)
? currentBlock : nullptr);
LastBlock = currentBlock;
CS = cast<CaseStmt>(Sub);
Sub = CS->getSubStmt();
}
addStmt(Sub);
}
CFGBlock *CaseBlock = Block;
if (!CaseBlock)
CaseBlock = createBlock();
// Cases statements partition blocks, so this is the top of the basic block we
// were processing (the "case XXX:" is the label).
CaseBlock->setLabel(CS);
if (badCFG)
return nullptr;
// Add this block to the list of successors for the block with the switch
// statement.
assert(SwitchTerminatedBlock);
addSuccessor(SwitchTerminatedBlock, CaseBlock,
shouldAddCase(switchExclusivelyCovered, switchCond,
CS, *Context));
// We set Block to NULL to allow lazy creation of a new block (if necessary)
Block = nullptr;
if (TopBlock) {
addSuccessor(LastBlock, CaseBlock);
Succ = TopBlock;
} else {
// This block is now the implicit successor of other blocks.
Succ = CaseBlock;
}
return Succ;
}
CFGBlock *CFGBuilder::VisitDefaultStmt(DefaultStmt *Terminator) {
if (Terminator->getSubStmt())
addStmt(Terminator->getSubStmt());
DefaultCaseBlock = Block;
if (!DefaultCaseBlock)
DefaultCaseBlock = createBlock();
// Default statements partition blocks, so this is the top of the basic block
// we were processing (the "default:" is the label).
DefaultCaseBlock->setLabel(Terminator);
if (badCFG)
return nullptr;
// Unlike case statements, we don't add the default block to the successors
// for the switch statement immediately. This is done when we finish
// processing the switch statement. This allows for the default case
// (including a fall-through to the code after the switch statement) to always
// be the last successor of a switch-terminated block.
// We set Block to NULL to allow lazy creation of a new block (if necessary)
Block = nullptr;
// This block is now the implicit successor of other blocks.
Succ = DefaultCaseBlock;
return DefaultCaseBlock;
}
CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
// "try"/"catch" is a control-flow statement. Thus we stop processing the
// current block.
CFGBlock *TrySuccessor = nullptr;
if (Block) {
if (badCFG)
return nullptr;
TrySuccessor = Block;
} else TrySuccessor = Succ;
CFGBlock *PrevTryTerminatedBlock = TryTerminatedBlock;
// Create a new block that will contain the try statement.
CFGBlock *NewTryTerminatedBlock = createBlock(false);
// Add the terminator in the try block.
NewTryTerminatedBlock->setTerminator(Terminator);
bool HasCatchAll = false;
for (unsigned h = 0; h <Terminator->getNumHandlers(); ++h) {
// The code after the try is the implicit successor.
Succ = TrySuccessor;
CXXCatchStmt *CS = Terminator->getHandler(h);
if (CS->getExceptionDecl() == nullptr) {
HasCatchAll = true;
}
Block = nullptr;
CFGBlock *CatchBlock = VisitCXXCatchStmt(CS);
if (!CatchBlock)
return nullptr;
// Add this block to the list of successors for the block with the try
// statement.
addSuccessor(NewTryTerminatedBlock, CatchBlock);
}
if (!HasCatchAll) {
if (PrevTryTerminatedBlock)
addSuccessor(NewTryTerminatedBlock, PrevTryTerminatedBlock);
else
addSuccessor(NewTryTerminatedBlock, &cfg->getExit());
}
// The code after the try is the implicit successor.
Succ = TrySuccessor;
// Save the current "try" context.
SaveAndRestore<CFGBlock*> save_try(TryTerminatedBlock, NewTryTerminatedBlock);
cfg->addTryDispatchBlock(TryTerminatedBlock);
assert(Terminator->getTryBlock() && "try must contain a non-NULL body");
Block = nullptr;
return addStmt(Terminator->getTryBlock());
}
CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) {
// CXXCatchStmt are treated like labels, so they are the first statement in a
// block.
// Save local scope position because in case of exception variable ScopePos
// won't be restored when traversing AST.
SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Create local scope for possible exception variable.
// Store scope position. Add implicit destructor.
if (VarDecl *VD = CS->getExceptionDecl()) {
LocalScope::const_iterator BeginScopePos = ScopePos;
addLocalScopeForVarDecl(VD);
addAutomaticObjDtors(ScopePos, BeginScopePos, CS);
}
if (CS->getHandlerBlock())
addStmt(CS->getHandlerBlock());
CFGBlock *CatchBlock = Block;
if (!CatchBlock)
CatchBlock = createBlock();
// CXXCatchStmt is more than just a label. They have semantic meaning
// as well, as they implicitly "initialize" the catch variable. Add
// it to the CFG as a CFGElement so that the control-flow of these
// semantics gets captured.
appendStmt(CatchBlock, CS);
// Also add the CXXCatchStmt as a label, to mirror handling of regular
// labels.
CatchBlock->setLabel(CS);
// Bail out if the CFG is bad.
if (badCFG)
return nullptr;
// We set Block to NULL to allow lazy creation of a new block (if necessary)
Block = nullptr;
return CatchBlock;
}
CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
// C++0x for-range statements are specified as [stmt.ranged]:
//
// {
// auto && __range = range-init;
// for ( auto __begin = begin-expr,
// __end = end-expr;
// __begin != __end;
// ++__begin ) {
// for-range-declaration = *__begin;
// statement
// }
// }
// Save local scope position before the addition of the implicit variables.
SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Create local scopes and destructors for range, begin and end variables.
if (Stmt *Range = S->getRangeStmt())
addLocalScopeForStmt(Range);
if (Stmt *BeginEnd = S->getBeginEndStmt())
addLocalScopeForStmt(BeginEnd);
addAutomaticObjDtors(ScopePos, save_scope_pos.get(), S);
LocalScope::const_iterator ContinueScopePos = ScopePos;
// "for" is a control-flow statement. Thus we stop processing the current
// block.
CFGBlock *LoopSuccessor = nullptr;
if (Block) {
if (badCFG)
return nullptr;
LoopSuccessor = Block;
} else
LoopSuccessor = Succ;
// Save the current value for the break targets.
// All breaks should go to the code following the loop.
SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
// The block for the __begin != __end expression.
CFGBlock *ConditionBlock = createBlock(false);
ConditionBlock->setTerminator(S);
// Now add the actual condition to the condition block.
if (Expr *C = S->getCond()) {
Block = ConditionBlock;
CFGBlock *BeginConditionBlock = addStmt(C);
if (badCFG)
return nullptr;
assert(BeginConditionBlock == ConditionBlock &&
"condition block in for-range was unexpectedly complex");
(void)BeginConditionBlock;
}
// The condition block is the implicit successor for the loop body as well as
// any code above the loop.
Succ = ConditionBlock;
// See if this is a known constant.
TryResult KnownVal(true);
if (S->getCond())
KnownVal = tryEvaluateBool(S->getCond());
// Now create the loop body.
{
assert(S->getBody());
// Save the current values for Block, Succ, and continue targets.
SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
// Generate increment code in its own basic block. This is the target of
// continue statements.
Block = nullptr;
Succ = addStmt(S->getInc());
ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
// The starting block for the loop increment is the block that should
// represent the 'loop target' for looping back to the start of the loop.
ContinueJumpTarget.block->setLoopTarget(S);
// Finish up the increment block and prepare to start the loop body.
assert(Block);
if (badCFG)
return nullptr;
Block = nullptr;
// Add implicit scope and dtors for loop variable.
addLocalScopeAndDtors(S->getLoopVarStmt());
// Populate a new block to contain the loop body and loop variable.
addStmt(S->getBody());
if (badCFG)
return nullptr;
CFGBlock *LoopVarStmtBlock = addStmt(S->getLoopVarStmt());
if (badCFG)
return nullptr;
// This new body block is a successor to our condition block.
addSuccessor(ConditionBlock,
KnownVal.isFalse() ? nullptr : LoopVarStmtBlock);
}
// Link up the condition block with the code that follows the loop (the
// false branch).
addSuccessor(ConditionBlock, KnownVal.isTrue() ? nullptr : LoopSuccessor);
// Add the initialization statements.
Block = createBlock();
addStmt(S->getBeginEndStmt());
return addStmt(S->getRangeStmt());
}
CFGBlock *CFGBuilder::VisitExprWithCleanups(ExprWithCleanups *E,
AddStmtChoice asc) {
if (BuildOpts.AddTemporaryDtors) {
// If adding implicit destructors visit the full expression for adding
// destructors of temporaries.
TempDtorContext Context;
VisitForTemporaryDtors(E->getSubExpr(), false, Context);
// Full expression has to be added as CFGStmt so it will be sequenced
// before destructors of it's temporaries.
asc = asc.withAlwaysAdd(true);
}
return Visit(E->getSubExpr(), asc);
}
CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
AddStmtChoice asc) {
if (asc.alwaysAdd(*this, E)) {
autoCreateBlock();
appendStmt(Block, E);
// We do not want to propagate the AlwaysAdd property.
asc = asc.withAlwaysAdd(false);
}
return Visit(E->getSubExpr(), asc);
}
CFGBlock *CFGBuilder::VisitCXXConstructExpr(CXXConstructExpr *C,
AddStmtChoice asc) {
autoCreateBlock();
appendStmt(Block, C);
return VisitChildren(C);
}
CFGBlock *CFGBuilder::VisitCXXNewExpr(CXXNewExpr *NE,
AddStmtChoice asc) {
autoCreateBlock();
appendStmt(Block, NE);
if (NE->getInitializer())
Block = Visit(NE->getInitializer());
if (BuildOpts.AddCXXNewAllocator)
appendNewAllocator(Block, NE);
if (NE->isArray())
Block = Visit(NE->getArraySize());
for (CXXNewExpr::arg_iterator I = NE->placement_arg_begin(),
E = NE->placement_arg_end(); I != E; ++I)
Block = Visit(*I);
return Block;
}
CFGBlock *CFGBuilder::VisitCXXDeleteExpr(CXXDeleteExpr *DE,
AddStmtChoice asc) {
autoCreateBlock();
appendStmt(Block, DE);
QualType DTy = DE->getDestroyedType();
DTy = DTy.getNonReferenceType();
CXXRecordDecl *RD = Context->getBaseElementType(DTy)->getAsCXXRecordDecl();
if (RD) {
if (RD->isCompleteDefinition() && !RD->hasTrivialDestructor())
appendDeleteDtor(Block, RD, DE);
}
return VisitChildren(DE);
}
CFGBlock *CFGBuilder::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
AddStmtChoice asc) {
if (asc.alwaysAdd(*this, E)) {
autoCreateBlock();
appendStmt(Block, E);
// We do not want to propagate the AlwaysAdd property.
asc = asc.withAlwaysAdd(false);
}
return Visit(E->getSubExpr(), asc);
}
CFGBlock *CFGBuilder::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
AddStmtChoice asc) {
autoCreateBlock();
appendStmt(Block, C);
return VisitChildren(C);
}
CFGBlock *CFGBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E,
AddStmtChoice asc) {
if (asc.alwaysAdd(*this, E)) {
autoCreateBlock();
appendStmt(Block, E);
}
return Visit(E->getSubExpr(), AddStmtChoice());
}
CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) {
// Lazily create the indirect-goto dispatch block if there isn't one already.
CFGBlock *IBlock = cfg->getIndirectGotoBlock();
if (!IBlock) {
IBlock = createBlock(false);
cfg->setIndirectGotoBlock(IBlock);
}
// IndirectGoto is a control-flow statement. Thus we stop processing the
// current block and create a new one.
if (badCFG)
return nullptr;
Block = createBlock(false);
Block->setTerminator(I);
addSuccessor(Block, IBlock);
return addStmt(I->getTarget());
}
CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary,
TempDtorContext &Context) {
assert(BuildOpts.AddImplicitDtors && BuildOpts.AddTemporaryDtors);
tryAgain:
if (!E) {
badCFG = true;
return nullptr;
}
switch (E->getStmtClass()) {
default:
return VisitChildrenForTemporaryDtors(E, Context);
case Stmt::BinaryOperatorClass:
return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E),
Context);
case Stmt::CXXBindTemporaryExprClass:
return VisitCXXBindTemporaryExprForTemporaryDtors(
cast<CXXBindTemporaryExpr>(E), BindToTemporary, Context);
case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass:
return VisitConditionalOperatorForTemporaryDtors(
cast<AbstractConditionalOperator>(E), BindToTemporary, Context);
case Stmt::ImplicitCastExprClass:
// For implicit cast we want BindToTemporary to be passed further.
E = cast<CastExpr>(E)->getSubExpr();
goto tryAgain;
case Stmt::CXXFunctionalCastExprClass:
// For functional cast we want BindToTemporary to be passed further.
E = cast<CXXFunctionalCastExpr>(E)->getSubExpr();
goto tryAgain;
case Stmt::ParenExprClass:
E = cast<ParenExpr>(E)->getSubExpr();
goto tryAgain;
case Stmt::MaterializeTemporaryExprClass: {
const MaterializeTemporaryExpr* MTE = cast<MaterializeTemporaryExpr>(E);
BindToTemporary = (MTE->getStorageDuration() != SD_FullExpression);
SmallVector<const Expr *, 2> CommaLHSs;
SmallVector<SubobjectAdjustment, 2> Adjustments;
// Find the expression whose lifetime needs to be extended.
E = const_cast<Expr *>(
cast<MaterializeTemporaryExpr>(E)
->GetTemporaryExpr()
->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
// Visit the skipped comma operator left-hand sides for other temporaries.
for (const Expr *CommaLHS : CommaLHSs) {
VisitForTemporaryDtors(const_cast<Expr *>(CommaLHS),
/*BindToTemporary=*/false, Context);
}
goto tryAgain;
}
case Stmt::BlockExprClass:
// Don't recurse into blocks; their subexpressions don't get evaluated
// here.
return Block;
case Stmt::LambdaExprClass: {
// For lambda expressions, only recurse into the capture initializers,
// and not the body.
auto *LE = cast<LambdaExpr>(E);
CFGBlock *B = Block;
for (Expr *Init : LE->capture_inits()) {
if (CFGBlock *R = VisitForTemporaryDtors(
Init, /*BindToTemporary=*/false, Context))
B = R;
}
return B;
}
case Stmt::CXXDefaultArgExprClass:
E = cast<CXXDefaultArgExpr>(E)->getExpr();
goto tryAgain;
case Stmt::CXXDefaultInitExprClass:
E = cast<CXXDefaultInitExpr>(E)->getExpr();
goto tryAgain;
}
}
CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E,
TempDtorContext &Context) {
if (isa<LambdaExpr>(E)) {
// Do not visit the children of lambdas; they have their own CFGs.
return Block;
}
// When visiting children for destructors we want to visit them in reverse
// order that they will appear in the CFG. Because the CFG is built
// bottom-up, this means we visit them in their natural order, which
// reverses them in the CFG.
CFGBlock *B = Block;
for (Stmt *Child : E->children())
if (Child)
if (CFGBlock *R = VisitForTemporaryDtors(Child, false, Context))
B = R;
return B;
}
CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(
BinaryOperator *E, TempDtorContext &Context) {
if (E->isLogicalOp()) {
VisitForTemporaryDtors(E->getLHS(), false, Context);
TryResult RHSExecuted = tryEvaluateBool(E->getLHS());
if (RHSExecuted.isKnown() && E->getOpcode() == BO_LOr)
RHSExecuted.negate();
// We do not know at CFG-construction time whether the right-hand-side was
// executed, thus we add a branch node that depends on the temporary
// constructor call.
TempDtorContext RHSContext(
bothKnownTrue(Context.KnownExecuted, RHSExecuted));
VisitForTemporaryDtors(E->getRHS(), false, RHSContext);
InsertTempDtorDecisionBlock(RHSContext);
return Block;
}
if (E->isAssignmentOp()) {
// For assignment operator (=) LHS expression is visited
// before RHS expression. For destructors visit them in reverse order.
CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
return LHSBlock ? LHSBlock : RHSBlock;
}
// For any other binary operator RHS expression is visited before
// LHS expression (order of children). For destructors visit them in reverse
// order.
CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS(), false, Context);
CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS(), false, Context);
return RHSBlock ? RHSBlock : LHSBlock;
}
CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors(
CXXBindTemporaryExpr *E, bool BindToTemporary, TempDtorContext &Context) {
// First add destructors for temporaries in subexpression.
CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr(), false, Context);
if (!BindToTemporary) {
// If lifetime of temporary is not prolonged (by assigning to constant
// reference) add destructor for it.
const CXXDestructorDecl *Dtor = E->getTemporary()->getDestructor();
if (Dtor->getParent()->isAnyDestructorNoReturn()) {
// If the destructor is marked as a no-return destructor, we need to
// create a new block for the destructor which does not have as a
// successor anything built thus far. Control won't flow out of this
// block.
if (B) Succ = B;
Block = createNoReturnBlock();
} else if (Context.needsTempDtorBranch()) {
// If we need to introduce a branch, we add a new block that we will hook
// up to a decision block later.
if (B) Succ = B;
Block = createBlock();
} else {
autoCreateBlock();
}
if (Context.needsTempDtorBranch()) {
Context.setDecisionPoint(Succ, E);
}
appendTemporaryDtor(Block, E);
B = Block;
}
return B;
}
void CFGBuilder::InsertTempDtorDecisionBlock(const TempDtorContext &Context,
CFGBlock *FalseSucc) {
if (!Context.TerminatorExpr) {
// If no temporary was found, we do not need to insert a decision point.
return;
}
assert(Context.TerminatorExpr);
CFGBlock *Decision = createBlock(false);
Decision->setTerminator(CFGTerminator(Context.TerminatorExpr, true));
addSuccessor(Decision, Block, !Context.KnownExecuted.isFalse());
addSuccessor(Decision, FalseSucc ? FalseSucc : Context.Succ,
!Context.KnownExecuted.isTrue());
Block = Decision;
}
CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
AbstractConditionalOperator *E, bool BindToTemporary,
TempDtorContext &Context) {
VisitForTemporaryDtors(E->getCond(), false, Context);
CFGBlock *ConditionBlock = Block;
CFGBlock *ConditionSucc = Succ;
TryResult ConditionVal = tryEvaluateBool(E->getCond());
TryResult NegatedVal = ConditionVal;
if (NegatedVal.isKnown()) NegatedVal.negate();
TempDtorContext TrueContext(
bothKnownTrue(Context.KnownExecuted, ConditionVal));
VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary, TrueContext);
CFGBlock *TrueBlock = Block;
Block = ConditionBlock;
Succ = ConditionSucc;
TempDtorContext FalseContext(
bothKnownTrue(Context.KnownExecuted, NegatedVal));
VisitForTemporaryDtors(E->getFalseExpr(), BindToTemporary, FalseContext);
if (TrueContext.TerminatorExpr && FalseContext.TerminatorExpr) {
InsertTempDtorDecisionBlock(FalseContext, TrueBlock);
} else if (TrueContext.TerminatorExpr) {
Block = TrueBlock;
InsertTempDtorDecisionBlock(TrueContext);
} else {
InsertTempDtorDecisionBlock(FalseContext);
}
return Block;
}
} // end anonymous namespace
/// createBlock - Constructs and adds a new CFGBlock to the CFG. The block has
/// no successors or predecessors. If this is the first block created in the
/// CFG, it is automatically set to be the Entry and Exit of the CFG.
CFGBlock *CFG::createBlock() {
bool first_block = begin() == end();
// Create the block.
CFGBlock *Mem = getAllocator().Allocate<CFGBlock>();
new (Mem) CFGBlock(NumBlockIDs++, BlkBVC, this);
Blocks.push_back(Mem, BlkBVC);
// If this is the first block, set it as the Entry and Exit.
if (first_block)
Entry = Exit = &back();
// Return the block.
return &back();
}
/// buildCFG - Constructs a CFG from an AST.
std::unique_ptr<CFG> CFG::buildCFG(const Decl *D, Stmt *Statement,
ASTContext *C, const BuildOptions &BO) {
CFGBuilder Builder(C, BO);
return Builder.buildCFG(D, Statement);
}
const CXXDestructorDecl *
CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
switch (getKind()) {
case CFGElement::Statement:
case CFGElement::Initializer:
case CFGElement::NewAllocator:
llvm_unreachable("getDestructorDecl should only be used with "
"ImplicitDtors");
case CFGElement::AutomaticObjectDtor: {
const VarDecl *var = castAs<CFGAutomaticObjDtor>().getVarDecl();
QualType ty = var->getType();
ty = ty.getNonReferenceType();
while (const ArrayType *arrayType = astContext.getAsArrayType(ty)) {
ty = arrayType->getElementType();
}
const RecordType *recordType = ty->getAs<RecordType>();
const CXXRecordDecl *classDecl =
cast<CXXRecordDecl>(recordType->getDecl());
return classDecl->getDestructor();
}
case CFGElement::DeleteDtor: {
const CXXDeleteExpr *DE = castAs<CFGDeleteDtor>().getDeleteExpr();
QualType DTy = DE->getDestroyedType();
DTy = DTy.getNonReferenceType();
const CXXRecordDecl *classDecl =
astContext.getBaseElementType(DTy)->getAsCXXRecordDecl();
return classDecl->getDestructor();
}
case CFGElement::TemporaryDtor: {
const CXXBindTemporaryExpr *bindExpr =
castAs<CFGTemporaryDtor>().getBindTemporaryExpr();
const CXXTemporary *temp = bindExpr->getTemporary();
return temp->getDestructor();
}
case CFGElement::BaseDtor:
case CFGElement::MemberDtor:
// Not yet supported.
return nullptr;
}
llvm_unreachable("getKind() returned bogus value");
}
bool CFGImplicitDtor::isNoReturn(ASTContext &astContext) const {
if (const CXXDestructorDecl *DD = getDestructorDecl(astContext))
return DD->isNoReturn();
return false;
}
//===----------------------------------------------------------------------===//
// CFGBlock operations.
//===----------------------------------------------------------------------===//
CFGBlock::AdjacentBlock::AdjacentBlock(CFGBlock *B, bool IsReachable)
: ReachableBlock(IsReachable ? B : nullptr),
UnreachableBlock(!IsReachable ? B : nullptr,
B && IsReachable ? AB_Normal : AB_Unreachable) {}
CFGBlock::AdjacentBlock::AdjacentBlock(CFGBlock *B, CFGBlock *AlternateBlock)
: ReachableBlock(B),
UnreachableBlock(B == AlternateBlock ? nullptr : AlternateBlock,
B == AlternateBlock ? AB_Alternate : AB_Normal) {}
void CFGBlock::addSuccessor(AdjacentBlock Succ,
BumpVectorContext &C) {
if (CFGBlock *B = Succ.getReachableBlock())
B->Preds.push_back(AdjacentBlock(this, Succ.isReachable()), C);
if (CFGBlock *UnreachableB = Succ.getPossiblyUnreachableBlock())
UnreachableB->Preds.push_back(AdjacentBlock(this, false), C);
Succs.push_back(Succ, C);
}
bool CFGBlock::FilterEdge(const CFGBlock::FilterOptions &F,
const CFGBlock *From, const CFGBlock *To) {
if (F.IgnoreNullPredecessors && !From)
return true;
if (To && From && F.IgnoreDefaultsWithCoveredEnums) {
// If the 'To' has no label or is labeled but the label isn't a
// CaseStmt then filter this edge.
if (const SwitchStmt *S =
dyn_cast_or_null<SwitchStmt>(From->getTerminator().getStmt())) {
if (S->isAllEnumCasesCovered()) {
const Stmt *L = To->getLabel();
if (!L || !isa<CaseStmt>(L))
return true;
}
}
}
return false;
}
//===----------------------------------------------------------------------===//
// CFG pretty printing
//===----------------------------------------------------------------------===//
namespace {
class StmtPrinterHelper : public PrinterHelper {
typedef llvm::DenseMap<const Stmt*,std::pair<unsigned,unsigned> > StmtMapTy;
typedef llvm::DenseMap<const Decl*,std::pair<unsigned,unsigned> > DeclMapTy;
StmtMapTy StmtMap;
DeclMapTy DeclMap;
signed currentBlock;
unsigned currStmt;
const LangOptions &LangOpts;
public:
StmtPrinterHelper(const CFG* cfg, const LangOptions &LO)
: currentBlock(0), currStmt(0), LangOpts(LO)
{
for (CFG::const_iterator I = cfg->begin(), E = cfg->end(); I != E; ++I ) {
unsigned j = 1;
for (CFGBlock::const_iterator BI = (*I)->begin(), BEnd = (*I)->end() ;
BI != BEnd; ++BI, ++j ) {
if (Optional<CFGStmt> SE = BI->getAs<CFGStmt>()) {
const Stmt *stmt= SE->getStmt();
std::pair<unsigned, unsigned> P((*I)->getBlockID(), j);
StmtMap[stmt] = P;
switch (stmt->getStmtClass()) {
case Stmt::DeclStmtClass:
DeclMap[cast<DeclStmt>(stmt)->getSingleDecl()] = P;
break;
case Stmt::IfStmtClass: {
const VarDecl *var = cast<IfStmt>(stmt)->getConditionVariable();
if (var)
DeclMap[var] = P;
break;
}
case Stmt::ForStmtClass: {
const VarDecl *var = cast<ForStmt>(stmt)->getConditionVariable();
if (var)
DeclMap[var] = P;
break;
}
case Stmt::WhileStmtClass: {
const VarDecl *var =
cast<WhileStmt>(stmt)->getConditionVariable();
if (var)
DeclMap[var] = P;
break;
}
case Stmt::SwitchStmtClass: {
const VarDecl *var =
cast<SwitchStmt>(stmt)->getConditionVariable();
if (var)
DeclMap[var] = P;
break;
}
case Stmt::CXXCatchStmtClass: {
const VarDecl *var =
cast<CXXCatchStmt>(stmt)->getExceptionDecl();
if (var)
DeclMap[var] = P;
break;
}
default:
break;
}
}
}
}
}
~StmtPrinterHelper() override {}
const LangOptions &getLangOpts() const { return LangOpts; }
void setBlockID(signed i) { currentBlock = i; }
void setStmtID(unsigned i) { currStmt = i; }
bool handledStmt(Stmt *S, raw_ostream &OS) override {
StmtMapTy::iterator I = StmtMap.find(S);
if (I == StmtMap.end())
return false;
if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
&& I->second.second == currStmt) {
return false;
}
OS << "[B" << I->second.first << "." << I->second.second << "]";
return true;
}
bool handleDecl(const Decl *D, raw_ostream &OS) {
DeclMapTy::iterator I = DeclMap.find(D);
if (I == DeclMap.end())
return false;
if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
&& I->second.second == currStmt) {
return false;
}
OS << "[B" << I->second.first << "." << I->second.second << "]";
return true;
}
};
} // end anonymous namespace
namespace {
class CFGBlockTerminatorPrint
: public StmtVisitor<CFGBlockTerminatorPrint,void> {
raw_ostream &OS;
StmtPrinterHelper* Helper;
PrintingPolicy Policy;
public:
CFGBlockTerminatorPrint(raw_ostream &os, StmtPrinterHelper* helper,
const PrintingPolicy &Policy)
: OS(os), Helper(helper), Policy(Policy) {
this->Policy.IncludeNewlines = false;
}
void VisitIfStmt(IfStmt *I) {
OS << "if ";
if (Stmt *C = I->getCond())
C->printPretty(OS, Helper, Policy);
}
// Default case.
void VisitStmt(Stmt *Terminator) {
Terminator->printPretty(OS, Helper, Policy);
}
void VisitDeclStmt(DeclStmt *DS) {
VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
OS << "static init " << VD->getName();
}
void VisitForStmt(ForStmt *F) {
OS << "for (" ;
if (F->getInit())
OS << "...";
OS << "; ";
if (Stmt *C = F->getCond())
C->printPretty(OS, Helper, Policy);
OS << "; ";
if (F->getInc())
OS << "...";
OS << ")";
}
void VisitWhileStmt(WhileStmt *W) {
OS << "while " ;
if (Stmt *C = W->getCond())
C->printPretty(OS, Helper, Policy);
}
void VisitDoStmt(DoStmt *D) {
OS << "do ... while ";
if (Stmt *C = D->getCond())
C->printPretty(OS, Helper, Policy);
}
void VisitSwitchStmt(SwitchStmt *Terminator) {
OS << "switch ";
Terminator->getCond()->printPretty(OS, Helper, Policy);
}
void VisitCXXTryStmt(CXXTryStmt *CS) {
OS << "try ...";
}
void VisitAbstractConditionalOperator(AbstractConditionalOperator* C) {
if (Stmt *Cond = C->getCond())
Cond->printPretty(OS, Helper, Policy);
OS << " ? ... : ...";
}
void VisitChooseExpr(ChooseExpr *C) {
OS << "__builtin_choose_expr( ";
if (Stmt *Cond = C->getCond())
Cond->printPretty(OS, Helper, Policy);
OS << " )";
}
void VisitIndirectGotoStmt(IndirectGotoStmt *I) {
OS << "goto *";
if (Stmt *T = I->getTarget())
T->printPretty(OS, Helper, Policy);
}
void VisitBinaryOperator(BinaryOperator* B) {
if (!B->isLogicalOp()) {
VisitExpr(B);
return;
}
if (B->getLHS())
B->getLHS()->printPretty(OS, Helper, Policy);
switch (B->getOpcode()) {
case BO_LOr:
OS << " || ...";
return;
case BO_LAnd:
OS << " && ...";
return;
default:
llvm_unreachable("Invalid logical operator.");
}
}
void VisitExpr(Expr *E) {
E->printPretty(OS, Helper, Policy);
}
public:
void print(CFGTerminator T) {
if (T.isTemporaryDtorsBranch())
OS << "(Temp Dtor) ";
Visit(T.getStmt());
}
};
} // end anonymous namespace
static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
const CFGElement &E) {
if (Optional<CFGStmt> CS = E.getAs<CFGStmt>()) {
const Stmt *S = CS->getStmt();
assert(S != nullptr && "Expecting non-null Stmt");
// special printing for statement-expressions.
if (const StmtExpr *SE = dyn_cast<StmtExpr>(S)) {
const CompoundStmt *Sub = SE->getSubStmt();
if (Sub->children()) {
OS << "({ ... ; ";
Helper.handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
OS << " })\n";
return;
}
}
// special printing for comma expressions.
if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
if (B->getOpcode() == BO_Comma) {
OS << "... , ";
Helper.handledStmt(B->getRHS(),OS);
OS << '\n';
return;
}
}
S->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
if (isa<CXXOperatorCallExpr>(S)) {
OS << " (OperatorCall)";
}
else if (isa<CXXBindTemporaryExpr>(S)) {
OS << " (BindTemporary)";
}
else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
OS << " (CXXConstructExpr, " << CCE->getType().getAsString() << ")";
}
else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
OS << " (" << CE->getStmtClassName() << ", "
<< CE->getCastKindName()
<< ", " << CE->getType().getAsString()
<< ")";
}
// Expressions need a newline.
if (isa<Expr>(S))
OS << '\n';
} else if (Optional<CFGInitializer> IE = E.getAs<CFGInitializer>()) {
const CXXCtorInitializer *I = IE->getInitializer();
if (I->isBaseInitializer())
OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
else if (I->isDelegatingInitializer())
OS << I->getTypeSourceInfo()->getType()->getAsCXXRecordDecl()->getName();
else OS << I->getAnyMember()->getName();
OS << "(";
if (Expr *IE = I->getInit())
IE->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
OS << ")";
if (I->isBaseInitializer())
OS << " (Base initializer)\n";
else if (I->isDelegatingInitializer())
OS << " (Delegating initializer)\n";
else OS << " (Member initializer)\n";
} else if (Optional<CFGAutomaticObjDtor> DE =
E.getAs<CFGAutomaticObjDtor>()) {
const VarDecl *VD = DE->getVarDecl();
Helper.handleDecl(VD, OS);
const Type* T = VD->getType().getTypePtr();
if (const ReferenceType* RT = T->getAs<ReferenceType>())
T = RT->getPointeeType().getTypePtr();
T = T->getBaseElementTypeUnsafe();
OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
OS << " (Implicit destructor)\n";
} else if (Optional<CFGNewAllocator> NE = E.getAs<CFGNewAllocator>()) {
OS << "CFGNewAllocator(";
if (const CXXNewExpr *AllocExpr = NE->getAllocatorExpr())
AllocExpr->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
OS << ")\n";
} else if (Optional<CFGDeleteDtor> DE = E.getAs<CFGDeleteDtor>()) {
const CXXRecordDecl *RD = DE->getCXXRecordDecl();
if (!RD)
return;
CXXDeleteExpr *DelExpr =
const_cast<CXXDeleteExpr*>(DE->getDeleteExpr());
Helper.handledStmt(cast<Stmt>(DelExpr->getArgument()), OS);
OS << "->~" << RD->getName().str() << "()";
OS << " (Implicit destructor)\n";
} else if (Optional<CFGBaseDtor> BE = E.getAs<CFGBaseDtor>()) {
const CXXBaseSpecifier *BS = BE->getBaseSpecifier();
OS << "~" << BS->getType()->getAsCXXRecordDecl()->getName() << "()";
OS << " (Base object destructor)\n";
} else if (Optional<CFGMemberDtor> ME = E.getAs<CFGMemberDtor>()) {
const FieldDecl *FD = ME->getFieldDecl();
const Type *T = FD->getType()->getBaseElementTypeUnsafe();
OS << "this->" << FD->getName();
OS << ".~" << T->getAsCXXRecordDecl()->getName() << "()";
OS << " (Member object destructor)\n";
} else if (Optional<CFGTemporaryDtor> TE = E.getAs<CFGTemporaryDtor>()) {
const CXXBindTemporaryExpr *BT = TE->getBindTemporaryExpr();
OS << "~";
BT->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
OS << "() (Temporary object destructor)\n";
}
}
static void print_block(raw_ostream &OS, const CFG* cfg,
const CFGBlock &B,
StmtPrinterHelper &Helper, bool print_edges,
bool ShowColors) {
Helper.setBlockID(B.getBlockID());
// Print the header.
if (ShowColors)
OS.changeColor(raw_ostream::YELLOW, true);
OS << "\n [B" << B.getBlockID();
if (&B == &cfg->getEntry())
OS << " (ENTRY)]\n";
else if (&B == &cfg->getExit())
OS << " (EXIT)]\n";
else if (&B == cfg->getIndirectGotoBlock())
OS << " (INDIRECT GOTO DISPATCH)]\n";
else if (B.hasNoReturnElement())
OS << " (NORETURN)]\n";
else
OS << "]\n";
if (ShowColors)
OS.resetColor();
// Print the label of this block.
if (Stmt *Label = const_cast<Stmt*>(B.getLabel())) {
if (print_edges)
OS << " ";
if (LabelStmt *L = dyn_cast<LabelStmt>(Label))
OS << L->getName();
else if (CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
OS << "case ";
if (C->getLHS())
C->getLHS()->printPretty(OS, &Helper,
PrintingPolicy(Helper.getLangOpts()));
if (C->getRHS()) {
OS << " ... ";
C->getRHS()->printPretty(OS, &Helper,
PrintingPolicy(Helper.getLangOpts()));
}
} else if (isa<DefaultStmt>(Label))
OS << "default";
else if (CXXCatchStmt *CS = dyn_cast<CXXCatchStmt>(Label)) {
OS << "catch (";
if (CS->getExceptionDecl())
CS->getExceptionDecl()->print(OS, PrintingPolicy(Helper.getLangOpts()),
0);
else
OS << "...";
OS << ")";
} else
llvm_unreachable("Invalid label statement in CFGBlock.");
OS << ":\n";
}
// Iterate through the statements in the block and print them.
unsigned j = 1;
for (CFGBlock::const_iterator I = B.begin(), E = B.end() ;
I != E ; ++I, ++j ) {
// Print the statement # in the basic block and the statement itself.
if (print_edges)
OS << " ";
OS << llvm::format("%3d", j) << ": ";
Helper.setStmtID(j);
print_elem(OS, Helper, *I);
}
// Print the terminator of this block.
if (B.getTerminator()) {
if (ShowColors)
OS.changeColor(raw_ostream::GREEN);
OS << " T: ";
Helper.setBlockID(-1);
PrintingPolicy PP(Helper.getLangOpts());
CFGBlockTerminatorPrint TPrinter(OS, &Helper, PP);
TPrinter.print(B.getTerminator());
OS << '\n';
if (ShowColors)
OS.resetColor();
}
if (print_edges) {
// Print the predecessors of this block.
if (!B.pred_empty()) {
const raw_ostream::Colors Color = raw_ostream::BLUE;
if (ShowColors)
OS.changeColor(Color);
OS << " Preds " ;
if (ShowColors)
OS.resetColor();
OS << '(' << B.pred_size() << "):";
unsigned i = 0;
if (ShowColors)
OS.changeColor(Color);
for (CFGBlock::const_pred_iterator I = B.pred_begin(), E = B.pred_end();
I != E; ++I, ++i) {
if (i % 10 == 8)
OS << "\n ";
CFGBlock *B = *I;
bool Reachable = true;
if (!B) {
Reachable = false;
B = I->getPossiblyUnreachableBlock();
}
OS << " B" << B->getBlockID();
if (!Reachable)
OS << "(Unreachable)";
}
if (ShowColors)
OS.resetColor();
OS << '\n';
}
// Print the successors of this block.
if (!B.succ_empty()) {
const raw_ostream::Colors Color = raw_ostream::MAGENTA;
if (ShowColors)
OS.changeColor(Color);
OS << " Succs ";
if (ShowColors)
OS.resetColor();
OS << '(' << B.succ_size() << "):";
unsigned i = 0;
if (ShowColors)
OS.changeColor(Color);
for (CFGBlock::const_succ_iterator I = B.succ_begin(), E = B.succ_end();
I != E; ++I, ++i) {
if (i % 10 == 8)
OS << "\n ";
CFGBlock *B = *I;
bool Reachable = true;
if (!B) {
Reachable = false;
B = I->getPossiblyUnreachableBlock();
}
if (B) {
OS << " B" << B->getBlockID();
if (!Reachable)
OS << "(Unreachable)";
}
else {
OS << " NULL";
}
}
if (ShowColors)
OS.resetColor();
OS << '\n';
}
}
}
/// dump - A simple pretty printer of a CFG that outputs to stderr.
void CFG::dump(const LangOptions &LO, bool ShowColors) const {
print(llvm::errs(), LO, ShowColors);
}
/// print - A simple pretty printer of a CFG that outputs to an ostream.
void CFG::print(raw_ostream &OS, const LangOptions &LO, bool ShowColors) const {
StmtPrinterHelper Helper(this, LO);
// Print the entry block.
print_block(OS, this, getEntry(), Helper, true, ShowColors);
// Iterate through the CFGBlocks and print them one by one.
for (const_iterator I = Blocks.begin(), E = Blocks.end() ; I != E ; ++I) {
// Skip the entry block, because we already printed it.
if (&(**I) == &getEntry() || &(**I) == &getExit())
continue;
print_block(OS, this, **I, Helper, true, ShowColors);
}
// Print the exit block.
print_block(OS, this, getExit(), Helper, true, ShowColors);
OS << '\n';
OS.flush();
}
/// dump - A simply pretty printer of a CFGBlock that outputs to stderr.
void CFGBlock::dump(const CFG* cfg, const LangOptions &LO,
bool ShowColors) const {
print(llvm::errs(), cfg, LO, ShowColors);
}
void CFGBlock::dump() const {
dump(getParent(), LangOptions(), false);
}
/// print - A simple pretty printer of a CFGBlock that outputs to an ostream.
/// Generally this will only be called from CFG::print.
void CFGBlock::print(raw_ostream &OS, const CFG* cfg,
const LangOptions &LO, bool ShowColors) const {
StmtPrinterHelper Helper(cfg, LO);
print_block(OS, cfg, *this, Helper, true, ShowColors);
OS << '\n';
}
/// printTerminator - A simple pretty printer of the terminator of a CFGBlock.
void CFGBlock::printTerminator(raw_ostream &OS,
const LangOptions &LO) const {
CFGBlockTerminatorPrint TPrinter(OS, nullptr, PrintingPolicy(LO));
TPrinter.print(getTerminator());
}
Stmt *CFGBlock::getTerminatorCondition(bool StripParens) {
Stmt *Terminator = this->Terminator;
if (!Terminator)
return nullptr;
Expr *E = nullptr;
switch (Terminator->getStmtClass()) {
default:
break;
case Stmt::CXXForRangeStmtClass:
E = cast<CXXForRangeStmt>(Terminator)->getCond();
break;
case Stmt::ForStmtClass:
E = cast<ForStmt>(Terminator)->getCond();
break;
case Stmt::WhileStmtClass:
E = cast<WhileStmt>(Terminator)->getCond();
break;
case Stmt::DoStmtClass:
E = cast<DoStmt>(Terminator)->getCond();
break;
case Stmt::IfStmtClass:
E = cast<IfStmt>(Terminator)->getCond();
break;
case Stmt::ChooseExprClass:
E = cast<ChooseExpr>(Terminator)->getCond();
break;
case Stmt::IndirectGotoStmtClass:
E = cast<IndirectGotoStmt>(Terminator)->getTarget();
break;
case Stmt::SwitchStmtClass:
E = cast<SwitchStmt>(Terminator)->getCond();
break;
case Stmt::BinaryConditionalOperatorClass:
E = cast<BinaryConditionalOperator>(Terminator)->getCond();
break;
case Stmt::ConditionalOperatorClass:
E = cast<ConditionalOperator>(Terminator)->getCond();
break;
case Stmt::BinaryOperatorClass: // '&&' and '||'
E = cast<BinaryOperator>(Terminator)->getLHS();
break;
case Stmt::ObjCForCollectionStmtClass:
return Terminator;
}
if (!StripParens)
return E;
return E ? E->IgnoreParens() : nullptr;
}
//===----------------------------------------------------------------------===//
// CFG Graphviz Visualization
//===----------------------------------------------------------------------===//
#ifndef NDEBUG
static StmtPrinterHelper* GraphHelper;
#endif
void CFG::viewCFG(const LangOptions &LO) const {
#ifndef NDEBUG
StmtPrinterHelper H(this, LO);
GraphHelper = &H;
llvm::ViewGraph(this,"CFG");
GraphHelper = nullptr;
#endif
}
namespace llvm {
template<>
struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getNodeLabel(const CFGBlock *Node, const CFG* Graph) {
#ifndef NDEBUG
std::string OutSStr;
llvm::raw_string_ostream Out(OutSStr);
print_block(Out,Graph, *Node, *GraphHelper, false, false);
std::string& OutStr = Out.str();
if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
// Process string output to make it nicer...
for (unsigned i = 0; i != OutStr.length(); ++i)
if (OutStr[i] == '\n') { // Left justify
OutStr[i] = '\\';
OutStr.insert(OutStr.begin()+i+1, 'l');
}
return OutStr;
#else
return "";
#endif
}
};
} // end namespace llvm
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp | //==- CFGReachabilityAnalysis.cpp - Basic reachability analysis --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a flow-sensitive, (mostly) path-insensitive reachability
// analysis based on Clang's CFGs. Clients can query if a given basic block
// is reachable within the CFG.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SmallVector.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/CFG.h"
using namespace clang;
CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(const CFG &cfg)
: analyzed(cfg.getNumBlockIDs(), false) {}
bool CFGReverseBlockReachabilityAnalysis::isReachable(const CFGBlock *Src,
const CFGBlock *Dst) {
const unsigned DstBlockID = Dst->getBlockID();
// If we haven't analyzed the destination node, run the analysis now
if (!analyzed[DstBlockID]) {
mapReachability(Dst);
analyzed[DstBlockID] = true;
}
// Return the cached result
return reachable[DstBlockID][Src->getBlockID()];
}
// Maps reachability to a common node by walking the predecessors of the
// destination node.
void CFGReverseBlockReachabilityAnalysis::mapReachability(const CFGBlock *Dst) {
SmallVector<const CFGBlock *, 11> worklist;
llvm::BitVector visited(analyzed.size());
ReachableSet &DstReachability = reachable[Dst->getBlockID()];
DstReachability.resize(analyzed.size(), false);
// Start searching from the destination node, since we commonly will perform
// multiple queries relating to a destination node.
worklist.push_back(Dst);
bool firstRun = true;
while (!worklist.empty()) {
const CFGBlock *block = worklist.pop_back_val();
if (visited[block->getBlockID()])
continue;
visited[block->getBlockID()] = true;
// Update reachability information for this node -> Dst
if (!firstRun) {
// Don't insert Dst -> Dst unless it was a predecessor of itself
DstReachability[block->getBlockID()] = true;
}
else
firstRun = false;
// Add the predecessors to the worklist.
for (CFGBlock::const_pred_iterator i = block->pred_begin(),
e = block->pred_end(); i != e; ++i) {
if (*i)
worklist.push_back(*i);
}
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ThreadSafety.cpp | //===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// A intra-procedural analysis for thread safety (e.g. deadlocks and race
// conditions), based off of an annotation system.
//
// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
// for more information.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafety.h"
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <ostream>
#include <sstream>
#include <utility>
#include <vector>
using namespace clang;
using namespace threadSafety;
// Key method definition
ThreadSafetyHandler::~ThreadSafetyHandler() {}
namespace {
class TILPrinter :
public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
/// Issue a warning about an invalid lock expression
static void warnInvalidLock(ThreadSafetyHandler &Handler,
const Expr *MutexExp, const NamedDecl *D,
const Expr *DeclExp, StringRef Kind) {
SourceLocation Loc;
if (DeclExp)
Loc = DeclExp->getExprLoc();
// FIXME: add a note about the attribute location in MutexExp or D
if (Loc.isValid())
Handler.handleInvalidLockExp(Kind, Loc);
}
/// \brief A set of CapabilityInfo objects, which are compiled from the
/// requires attributes on a function.
class CapExprSet : public SmallVector<CapabilityExpr, 4> {
public:
/// \brief Push M onto list, but discard duplicates.
void push_back_nodup(const CapabilityExpr &CapE) {
iterator It = std::find_if(begin(), end(),
[=](const CapabilityExpr &CapE2) {
return CapE.equals(CapE2);
});
if (It == end())
push_back(CapE);
}
};
class FactManager;
class FactSet;
/// \brief This is a helper class that stores a fact that is known at a
/// particular point in program execution. Currently, a fact is a capability,
/// along with additional information, such as where it was acquired, whether
/// it is exclusive or shared, etc.
///
/// FIXME: this analysis does not currently support either re-entrant
/// locking or lock "upgrading" and "downgrading" between exclusive and
/// shared.
class FactEntry : public CapabilityExpr {
private:
LockKind LKind; ///< exclusive or shared
SourceLocation AcquireLoc; ///< where it was acquired.
bool Asserted; ///< true if the lock was asserted
bool Declared; ///< true if the lock was declared
public:
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
bool Asrt, bool Declrd = false)
: CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt),
Declared(Declrd) {}
virtual ~FactEntry() {}
LockKind kind() const { return LKind; }
SourceLocation loc() const { return AcquireLoc; }
bool asserted() const { return Asserted; }
bool declared() const { return Declared; }
void setDeclared(bool D) { Declared = D; }
virtual void
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const = 0;
virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
bool FullyRemove, ThreadSafetyHandler &Handler,
StringRef DiagKind) const = 0;
// Return true if LKind >= LK, where exclusive > shared
bool isAtLeast(LockKind LK) {
return (LKind == LK_Exclusive) || (LK == LK_Shared);
}
};
typedef unsigned short FactID;
/// \brief FactManager manages the memory for all facts that are created during
/// the analysis of a single routine.
class FactManager {
private:
std::vector<std::unique_ptr<FactEntry>> Facts;
public:
FactID newFact(std::unique_ptr<FactEntry> Entry) {
Facts.push_back(std::move(Entry));
return static_cast<unsigned short>(Facts.size() - 1);
}
const FactEntry &operator[](FactID F) const { return *Facts[F]; }
FactEntry &operator[](FactID F) { return *Facts[F]; }
};
/// \brief A FactSet is the set of facts that are known to be true at a
/// particular program point. FactSets must be small, because they are
/// frequently copied, and are thus implemented as a set of indices into a
/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
/// locks, so we can get away with doing a linear search for lookup. Note
/// that a hashtable or map is inappropriate in this case, because lookups
/// may involve partial pattern matches, rather than exact matches.
class FactSet {
private:
typedef SmallVector<FactID, 4> FactVec;
FactVec FactIDs;
public:
typedef FactVec::iterator iterator;
typedef FactVec::const_iterator const_iterator;
iterator begin() { return FactIDs.begin(); }
const_iterator begin() const { return FactIDs.begin(); }
iterator end() { return FactIDs.end(); }
const_iterator end() const { return FactIDs.end(); }
bool isEmpty() const { return FactIDs.size() == 0; }
// Return true if the set contains only negative facts
bool isEmpty(FactManager &FactMan) const {
for (FactID FID : *this) {
if (!FactMan[FID].negative())
return false;
}
return true;
}
void addLockByID(FactID ID) { FactIDs.push_back(ID); }
FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
FactID F = FM.newFact(std::move(Entry));
FactIDs.push_back(F);
return F;
}
bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
unsigned n = FactIDs.size();
if (n == 0)
return false;
for (unsigned i = 0; i < n-1; ++i) {
if (FM[FactIDs[i]].matches(CapE)) {
FactIDs[i] = FactIDs[n-1];
FactIDs.pop_back();
return true;
}
}
if (FM[FactIDs[n-1]].matches(CapE)) {
FactIDs.pop_back();
return true;
}
return false;
}
iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
return std::find_if(begin(), end(), [&](FactID ID) {
return FM[ID].matches(CapE);
});
}
FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) {
return FM[ID].matches(CapE);
});
return I != end() ? &FM[*I] : nullptr;
}
FactEntry *findLockUniv(FactManager &FM, const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
return FM[ID].matchesUniv(CapE);
});
return I != end() ? &FM[*I] : nullptr;
}
FactEntry *findPartialMatch(FactManager &FM,
const CapabilityExpr &CapE) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
return FM[ID].partiallyMatches(CapE);
});
return I != end() ? &FM[*I] : nullptr;
}
bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
return FM[ID].valueDecl() == Vd;
});
return I != end();
}
};
class ThreadSafetyAnalyzer;
} // namespace
namespace clang {
namespace threadSafety {
class BeforeSet {
private:
typedef SmallVector<const ValueDecl*, 4> BeforeVect;
struct BeforeInfo {
BeforeInfo() : Vect(nullptr), Visited(false) { }
BeforeInfo(BeforeInfo &&O)
: Vect(std::move(O.Vect)), Visited(O.Visited)
{}
std::unique_ptr<BeforeVect> Vect;
int Visited;
};
typedef llvm::DenseMap<const ValueDecl*, BeforeInfo> BeforeMap;
typedef llvm::DenseMap<const ValueDecl*, bool> CycleMap;
public:
BeforeSet() { }
BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
ThreadSafetyAnalyzer& Analyzer);
void checkBeforeAfter(const ValueDecl* Vd,
const FactSet& FSet,
ThreadSafetyAnalyzer& Analyzer,
SourceLocation Loc, StringRef CapKind);
private:
BeforeMap BMap;
CycleMap CycMap;
};
} // end namespace threadSafety
} // end namespace clang
namespace {
typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
class LocalVariableMap;
/// A side (entry or exit) of a CFG node.
enum CFGBlockSide { CBS_Entry, CBS_Exit };
/// CFGBlockInfo is a struct which contains all the information that is
/// maintained for each block in the CFG. See LocalVariableMap for more
/// information about the contexts.
struct CFGBlockInfo {
FactSet EntrySet; // Lockset held at entry to block
FactSet ExitSet; // Lockset held at exit from block
LocalVarContext EntryContext; // Context held at entry to block
LocalVarContext ExitContext; // Context held at exit from block
SourceLocation EntryLoc; // Location of first statement in block
SourceLocation ExitLoc; // Location of last statement in block.
unsigned EntryIndex; // Used to replay contexts later
bool Reachable; // Is this block reachable?
const FactSet &getSet(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntrySet : ExitSet;
}
SourceLocation getLocation(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntryLoc : ExitLoc;
}
private:
CFGBlockInfo(LocalVarContext EmptyCtx)
: EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false)
{ }
public:
static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
};
// A LocalVariableMap maintains a map from local variables to their currently
// valid definitions. It provides SSA-like functionality when traversing the
// CFG. Like SSA, each definition or assignment to a variable is assigned a
// unique name (an integer), which acts as the SSA name for that definition.
// The total set of names is shared among all CFG basic blocks.
// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
// with their SSA-names. Instead, we compute a Context for each point in the
// code, which maps local variables to the appropriate SSA-name. This map
// changes with each assignment.
//
// The map is computed in a single pass over the CFG. Subsequent analyses can
// then query the map to find the appropriate Context for a statement, and use
// that Context to look up the definitions of variables.
class LocalVariableMap {
public:
typedef LocalVarContext Context;
/// A VarDefinition consists of an expression, representing the value of the
/// variable, along with the context in which that expression should be
/// interpreted. A reference VarDefinition does not itself contain this
/// information, but instead contains a pointer to a previous VarDefinition.
struct VarDefinition {
public:
friend class LocalVariableMap;
const NamedDecl *Dec; // The original declaration for this variable.
const Expr *Exp; // The expression for this variable, OR
unsigned Ref; // Reference to another VarDefinition
Context Ctx; // The map with which Exp should be interpreted.
bool isReference() { return !Exp; }
private:
// Create ordinary variable definition
VarDefinition(const NamedDecl *D, const Expr *E, Context C)
: Dec(D), Exp(E), Ref(0), Ctx(C)
{ }
// Create reference to previous definition
VarDefinition(const NamedDecl *D, unsigned R, Context C)
: Dec(D), Exp(nullptr), Ref(R), Ctx(C)
{ }
};
private:
Context::Factory ContextFactory;
std::vector<VarDefinition> VarDefinitions;
std::vector<unsigned> CtxIndices;
std::vector<std::pair<Stmt*, Context> > SavedContexts;
public:
LocalVariableMap() {
// index 0 is a placeholder for undefined variables (aka phi-nodes).
VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
}
/// Look up a definition, within the given context.
const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
const unsigned *i = Ctx.lookup(D);
if (!i)
return nullptr;
assert(*i < VarDefinitions.size());
return &VarDefinitions[*i];
}
/// Look up the definition for D within the given context. Returns
/// NULL if the expression is not statically known. If successful, also
/// modifies Ctx to hold the context of the return Expr.
const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
const unsigned *P = Ctx.lookup(D);
if (!P)
return nullptr;
unsigned i = *P;
while (i > 0) {
if (VarDefinitions[i].Exp) {
Ctx = VarDefinitions[i].Ctx;
return VarDefinitions[i].Exp;
}
i = VarDefinitions[i].Ref;
}
return nullptr;
}
Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
/// Return the next context after processing S. This function is used by
/// clients of the class to get the appropriate context when traversing the
/// CFG. It must be called for every assignment or DeclStmt.
Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
if (SavedContexts[CtxIndex+1].first == S) {
CtxIndex++;
Context Result = SavedContexts[CtxIndex].second;
return Result;
}
return C;
}
void dumpVarDefinitionName(unsigned i) {
if (i == 0) {
llvm::errs() << "Undefined";
return;
}
const NamedDecl *Dec = VarDefinitions[i].Dec;
if (!Dec) {
llvm::errs() << "<<NULL>>";
return;
}
Dec->printName(llvm::errs());
llvm::errs() << "." << i << " " << ((const void*) Dec);
}
/// Dumps an ASCII representation of the variable map to llvm::errs()
void dump() {
for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
const Expr *Exp = VarDefinitions[i].Exp;
unsigned Ref = VarDefinitions[i].Ref;
dumpVarDefinitionName(i);
llvm::errs() << " = ";
if (Exp) Exp->dump();
else {
dumpVarDefinitionName(Ref);
llvm::errs() << "\n";
}
}
}
/// Dumps an ASCII representation of a Context to llvm::errs()
void dumpContext(Context C) {
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
const NamedDecl *D = I.getKey();
D->printName(llvm::errs());
const unsigned *i = C.lookup(D);
llvm::errs() << " -> ";
dumpVarDefinitionName(*i);
llvm::errs() << "\n";
}
}
/// Builds the variable map.
void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
std::vector<CFGBlockInfo> &BlockInfo);
protected:
// Get the current context index
unsigned getContextIndex() { return SavedContexts.size()-1; }
// Save the current context for later replay
void saveContext(Stmt *S, Context C) {
SavedContexts.push_back(std::make_pair(S,C));
}
// Adds a new definition to the given context, and returns a new context.
// This method should be called when declaring a new variable.
Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
assert(!Ctx.contains(D));
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.add(Ctx, D, newID);
VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
return NewCtx;
}
// Add a new reference to an existing definition.
Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.add(Ctx, D, newID);
VarDefinitions.push_back(VarDefinition(D, i, Ctx));
return NewCtx;
}
// Updates a definition only if that definition is already in the map.
// This method should be called when assigning to an existing variable.
Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
if (Ctx.contains(D)) {
unsigned newID = VarDefinitions.size();
Context NewCtx = ContextFactory.remove(Ctx, D);
NewCtx = ContextFactory.add(NewCtx, D, newID);
VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
return NewCtx;
}
return Ctx;
}
// Removes a definition from the context, but keeps the variable name
// as a valid variable. The index 0 is a placeholder for cleared definitions.
Context clearDefinition(const NamedDecl *D, Context Ctx) {
Context NewCtx = Ctx;
if (NewCtx.contains(D)) {
NewCtx = ContextFactory.remove(NewCtx, D);
NewCtx = ContextFactory.add(NewCtx, D, 0);
}
return NewCtx;
}
// Remove a definition entirely frmo the context.
Context removeDefinition(const NamedDecl *D, Context Ctx) {
Context NewCtx = Ctx;
if (NewCtx.contains(D)) {
NewCtx = ContextFactory.remove(NewCtx, D);
}
return NewCtx;
}
Context intersectContexts(Context C1, Context C2);
Context createReferenceContext(Context C);
void intersectBackEdge(Context C1, Context C2);
friend class VarMapBuilder;
};
// This has to be defined after LocalVariableMap.
CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
return CFGBlockInfo(M.getEmptyContext());
}
/// Visitor which builds a LocalVariableMap
class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
public:
LocalVariableMap* VMap;
LocalVariableMap::Context Ctx;
VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
: VMap(VM), Ctx(C) {}
void VisitDeclStmt(DeclStmt *S);
void VisitBinaryOperator(BinaryOperator *BO);
};
// Add new local variables to the variable map
void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
bool modifiedCtx = false;
DeclGroupRef DGrp = S->getDeclGroup();
for (const auto *D : DGrp) {
if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
const Expr *E = VD->getInit();
// Add local variables with trivial type to the variable map
QualType T = VD->getType();
if (T.isTrivialType(VD->getASTContext())) {
Ctx = VMap->addDefinition(VD, E, Ctx);
modifiedCtx = true;
}
}
}
if (modifiedCtx)
VMap->saveContext(S, Ctx);
}
// Update local variable definitions in variable map
void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
if (!BO->isAssignmentOp())
return;
Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
// Update the variable map and current context.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
ValueDecl *VDec = DRE->getDecl();
if (Ctx.lookup(VDec)) {
if (BO->getOpcode() == BO_Assign)
Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
else
// FIXME -- handle compound assignment operators
Ctx = VMap->clearDefinition(VDec, Ctx);
VMap->saveContext(BO, Ctx);
}
}
}
// Computes the intersection of two contexts. The intersection is the
// set of variables which have the same definition in both contexts;
// variables with different definitions are discarded.
LocalVariableMap::Context
LocalVariableMap::intersectContexts(Context C1, Context C2) {
Context Result = C1;
for (const auto &P : C1) {
const NamedDecl *Dec = P.first;
const unsigned *i2 = C2.lookup(Dec);
if (!i2) // variable doesn't exist on second path
Result = removeDefinition(Dec, Result);
else if (*i2 != P.second) // variable exists, but has different definition
Result = clearDefinition(Dec, Result);
}
return Result;
}
// For every variable in C, create a new variable that refers to the
// definition in C. Return a new context that contains these new variables.
// (We use this for a naive implementation of SSA on loop back-edges.)
LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
Context Result = getEmptyContext();
for (const auto &P : C)
Result = addReference(P.first, P.second, Result);
return Result;
}
// This routine also takes the intersection of C1 and C2, but it does so by
// altering the VarDefinitions. C1 must be the result of an earlier call to
// createReferenceContext.
void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
for (const auto &P : C1) {
unsigned i1 = P.second;
VarDefinition *VDef = &VarDefinitions[i1];
assert(VDef->isReference());
const unsigned *i2 = C2.lookup(P.first);
if (!i2 || (*i2 != i1))
VDef->Ref = 0; // Mark this variable as undefined
}
}
// Traverse the CFG in topological order, so all predecessors of a block
// (excluding back-edges) are visited before the block itself. At
// each point in the code, we calculate a Context, which holds the set of
// variable definitions which are visible at that point in execution.
// Visible variables are mapped to their definitions using an array that
// contains all definitions.
//
// At join points in the CFG, the set is computed as the intersection of
// the incoming sets along each edge, E.g.
//
// { Context | VarDefinitions }
// int x = 0; { x -> x1 | x1 = 0 }
// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
//
// This is essentially a simpler and more naive version of the standard SSA
// algorithm. Those definitions that remain in the intersection are from blocks
// that strictly dominate the current block. We do not bother to insert proper
// phi nodes, because they are not used in our analysis; instead, wherever
// a phi node would be required, we simply remove that definition from the
// context (E.g. x above).
//
// The initial traversal does not capture back-edges, so those need to be
// handled on a separate pass. Whenever the first pass encounters an
// incoming back edge, it duplicates the context, creating new definitions
// that refer back to the originals. (These correspond to places where SSA
// might have to insert a phi node.) On the second pass, these definitions are
// set to NULL if the variable has changed on the back-edge (i.e. a phi
// node was actually required.) E.g.
//
// { Context | VarDefinitions }
// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
// ... { y -> y1 | x3 = 2, x2 = 1, ... }
//
void LocalVariableMap::traverseCFG(CFG *CFGraph,
const PostOrderCFGView *SortedGraph,
std::vector<CFGBlockInfo> &BlockInfo) {
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
CtxIndices.resize(CFGraph->getNumBlockIDs());
for (const auto *CurrBlock : *SortedGraph) {
int CurrBlockID = CurrBlock->getBlockID();
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
VisitedBlocks.insert(CurrBlock);
// Calculate the entry context for the current block
bool HasBackEdges = false;
bool CtxInit = true;
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
// if *PI -> CurrBlock is a back edge, so skip it
if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
HasBackEdges = true;
continue;
}
int PrevBlockID = (*PI)->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
if (CtxInit) {
CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
CtxInit = false;
}
else {
CurrBlockInfo->EntryContext =
intersectContexts(CurrBlockInfo->EntryContext,
PrevBlockInfo->ExitContext);
}
}
// Duplicate the context if we have back-edges, so we can call
// intersectBackEdges later.
if (HasBackEdges)
CurrBlockInfo->EntryContext =
createReferenceContext(CurrBlockInfo->EntryContext);
// Create a starting context index for the current block
saveContext(nullptr, CurrBlockInfo->EntryContext);
CurrBlockInfo->EntryIndex = getContextIndex();
// Visit all the statements in the basic block.
VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
BE = CurrBlock->end(); BI != BE; ++BI) {
switch (BI->getKind()) {
case CFGElement::Statement: {
CFGStmt CS = BI->castAs<CFGStmt>();
VMapBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
break;
}
default:
break;
}
}
CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
// Mark variables on back edges as "unknown" if they've been changed.
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
// if CurrBlock -> *SI is *not* a back edge
if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
continue;
CFGBlock *FirstLoopBlock = *SI;
Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
Context LoopEnd = CurrBlockInfo->ExitContext;
intersectBackEdge(LoopBegin, LoopEnd);
}
}
// Put an extra entry at the end of the indexed context array
unsigned exitID = CFGraph->getExit().getBlockID();
saveContext(nullptr, BlockInfo[exitID].ExitContext);
}
/// Find the appropriate source locations to use when producing diagnostics for
/// each block in the CFG.
static void findBlockLocations(CFG *CFGraph,
const PostOrderCFGView *SortedGraph,
std::vector<CFGBlockInfo> &BlockInfo) {
for (const auto *CurrBlock : *SortedGraph) {
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
// Find the source location of the last statement in the block, if the
// block is not empty.
if (const Stmt *S = CurrBlock->getTerminator()) {
CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
} else {
for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
BE = CurrBlock->rend(); BI != BE; ++BI) {
// FIXME: Handle other CFGElement kinds.
if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
break;
}
}
}
if (!CurrBlockInfo->ExitLoc.isInvalid()) {
// This block contains at least one statement. Find the source location
// of the first statement in the block.
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
BE = CurrBlock->end(); BI != BE; ++BI) {
// FIXME: Handle other CFGElement kinds.
if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
break;
}
}
} else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
CurrBlock != &CFGraph->getExit()) {
// The block is empty, and has a single predecessor. Use its exit
// location.
CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
}
}
}
class LockableFactEntry : public FactEntry {
private:
bool Managed; ///< managed by ScopedLockable object
public:
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
bool Mng = false, bool Asrt = false)
: FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
void
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
if (!Managed && !asserted() && !negative() && !isUniversal()) {
Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
LEK);
}
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
bool FullyRemove, ThreadSafetyHandler &Handler,
StringRef DiagKind) const override {
FSet.removeLock(FactMan, Cp);
if (!Cp.negative()) {
FSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
!Cp, LK_Exclusive, UnlockLoc));
}
}
};
class ScopedLockableFactEntry : public FactEntry {
private:
SmallVector<const til::SExpr *, 4> UnderlyingMutexes;
public:
ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
const CapExprSet &Excl, const CapExprSet &Shrd)
: FactEntry(CE, LK_Exclusive, Loc, false) {
for (const auto &M : Excl)
UnderlyingMutexes.push_back(M.sexpr());
for (const auto &M : Shrd)
UnderlyingMutexes.push_back(M.sexpr());
}
void
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
// If this scoped lock manages another mutex, and if the underlying
// mutex is still held, then warn about the underlying mutex.
Handler.handleMutexHeldEndOfScope(
"mutex", sx::toString(UnderlyingMutex), loc(), JoinLoc, LEK);
}
}
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
bool FullyRemove, ThreadSafetyHandler &Handler,
StringRef DiagKind) const override {
assert(!Cp.negative() && "Managing object cannot be negative.");
for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
CapabilityExpr UnderCp(UnderlyingMutex, false);
auto UnderEntry = llvm::make_unique<LockableFactEntry>(
!UnderCp, LK_Exclusive, UnlockLoc);
if (FullyRemove) {
// We're destroying the managing object.
// Remove the underlying mutex if it exists; but don't warn.
if (FSet.findLock(FactMan, UnderCp)) {
FSet.removeLock(FactMan, UnderCp);
FSet.addLock(FactMan, std::move(UnderEntry));
}
} else {
// We're releasing the underlying mutex, but not destroying the
// managing object. Warn on dual release.
if (!FSet.findLock(FactMan, UnderCp)) {
Handler.handleUnmatchedUnlock(DiagKind, UnderCp.toString(),
UnlockLoc);
}
FSet.removeLock(FactMan, UnderCp);
FSet.addLock(FactMan, std::move(UnderEntry));
}
}
if (FullyRemove)
FSet.removeLock(FactMan, Cp);
}
};
/// \brief Class which implements the core thread safety analysis routines.
class ThreadSafetyAnalyzer {
friend class BuildLockset;
friend class threadSafety::BeforeSet;
llvm::BumpPtrAllocator Bpa;
threadSafety::til::MemRegionRef Arena;
threadSafety::SExprBuilder SxBuilder;
ThreadSafetyHandler &Handler;
const CXXMethodDecl *CurrentMethod;
LocalVariableMap LocalVarMap;
FactManager FactMan;
std::vector<CFGBlockInfo> BlockInfo;
BeforeSet* GlobalBeforeSet;
public:
ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
: Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
bool inCurrentScope(const CapabilityExpr &CapE);
void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
StringRef DiagKind, bool ReqAttr = false);
void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
StringRef DiagKind);
template <typename AttrType>
void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
const NamedDecl *D, VarDecl *SelfDecl = nullptr);
template <class AttrType>
void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, Expr *Exp,
const NamedDecl *D,
const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
Expr *BrE, bool Neg);
const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
bool &Negate);
void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
const CFGBlock* PredBlock,
const CFGBlock *CurrBlock);
void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
SourceLocation JoinLoc,
LockErrorKind LEK1, LockErrorKind LEK2,
bool Modify=true);
void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
SourceLocation JoinLoc, LockErrorKind LEK1,
bool Modify=true) {
intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify);
}
void runAnalysis(AnalysisDeclContext &AC);
};
} // namespace
/// Process acquired_before and acquired_after attributes on Vd.
BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
ThreadSafetyAnalyzer& Analyzer) {
// Create a new entry for Vd.
auto& Entry = BMap.FindAndConstruct(Vd);
BeforeInfo* Info = &Entry.second;
BeforeVect* Bv = nullptr;
for (Attr* At : Vd->attrs()) {
switch (At->getKind()) {
case attr::AcquiredBefore: {
auto *A = cast<AcquiredBeforeAttr>(At);
// Create a new BeforeVect for Vd if necessary.
if (!Bv) {
Bv = new BeforeVect;
Info->Vect.reset(Bv);
}
// Read exprs from the attribute, and add them to BeforeVect.
for (const auto *Arg : A->args()) {
CapabilityExpr Cp =
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
if (const ValueDecl *Cpvd = Cp.valueDecl()) {
Bv->push_back(Cpvd);
auto It = BMap.find(Cpvd);
if (It == BMap.end())
insertAttrExprs(Cpvd, Analyzer);
}
}
break;
}
case attr::AcquiredAfter: {
auto *A = cast<AcquiredAfterAttr>(At);
// Read exprs from the attribute, and add them to BeforeVect.
for (const auto *Arg : A->args()) {
CapabilityExpr Cp =
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
if (const ValueDecl *ArgVd = Cp.valueDecl()) {
// Get entry for mutex listed in attribute
BeforeInfo* ArgInfo;
auto It = BMap.find(ArgVd);
if (It == BMap.end())
ArgInfo = insertAttrExprs(ArgVd, Analyzer);
else
ArgInfo = &It->second;
// Create a new BeforeVect if necessary.
BeforeVect* ArgBv = ArgInfo->Vect.get();
if (!ArgBv) {
ArgBv = new BeforeVect;
ArgInfo->Vect.reset(ArgBv);
}
ArgBv->push_back(Vd);
}
}
break;
}
default:
break;
}
}
return Info;
}
/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
const FactSet& FSet,
ThreadSafetyAnalyzer& Analyzer,
SourceLocation Loc, StringRef CapKind) {
SmallVector<BeforeInfo*, 8> InfoVect;
// Do a depth-first traversal of Vd.
// Return true if there are cycles.
std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
if (!Vd)
return false;
BeforeSet::BeforeInfo* Info;
auto It = BMap.find(Vd);
if (It == BMap.end())
Info = insertAttrExprs(Vd, Analyzer);
else
Info = &It->second;
if (Info->Visited == 1)
return true;
if (Info->Visited == 2)
return false;
BeforeVect* Bv = Info->Vect.get();
if (!Bv)
return false;
InfoVect.push_back(Info);
Info->Visited = 1;
for (auto *Vdb : *Bv) {
// Exclude mutexes in our immediate before set.
if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
StringRef L1 = StartVd->getName();
StringRef L2 = Vdb->getName();
Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
}
// Transitively search other before sets, and warn on cycles.
if (traverse(Vdb)) {
if (CycMap.find(Vd) == CycMap.end()) {
CycMap.insert(std::make_pair(Vd, true));
StringRef L1 = Vd->getName();
Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
}
}
}
Info->Visited = 2;
return false;
};
traverse(StartVd);
for (auto* Info : InfoVect)
Info->Visited = 0;
}
/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const ValueDecl *getValueDecl(const Expr *Exp) {
if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
return getValueDecl(CE->getSubExpr());
if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
return DR->getDecl();
if (const auto *ME = dyn_cast<MemberExpr>(Exp))
return ME->getMemberDecl();
return nullptr;
}
namespace {
template <typename Ty>
class has_arg_iterator_range {
typedef char yes[1];
typedef char no[2];
template <typename Inner>
static yes& test(Inner *I, decltype(I->args()) * = nullptr);
template <typename>
static no& test(...);
public:
static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
};
} // namespace
static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
return A->getName();
}
static StringRef ClassifyDiagnostic(QualType VDT) {
// We need to look at the declaration of the type of the value to determine
// which it is. The type should either be a record or a typedef, or a pointer
// or reference thereof.
if (const auto *RT = VDT->getAs<RecordType>()) {
if (const auto *RD = RT->getDecl())
if (const auto *CA = RD->getAttr<CapabilityAttr>())
return ClassifyDiagnostic(CA);
} else if (const auto *TT = VDT->getAs<TypedefType>()) {
if (const auto *TD = TT->getDecl())
if (const auto *CA = TD->getAttr<CapabilityAttr>())
return ClassifyDiagnostic(CA);
} else if (VDT->isPointerType() || VDT->isReferenceType())
return ClassifyDiagnostic(VDT->getPointeeType());
return "mutex";
}
static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
assert(VD && "No ValueDecl passed");
// The ValueDecl is the declaration of a mutex or role (hopefully).
return ClassifyDiagnostic(VD->getType());
}
template <typename AttrTy>
static typename std::enable_if<!has_arg_iterator_range<AttrTy>::value,
StringRef>::type
ClassifyDiagnostic(const AttrTy *A) {
if (const ValueDecl *VD = getValueDecl(A->getArg()))
return ClassifyDiagnostic(VD);
return "mutex";
}
template <typename AttrTy>
static typename std::enable_if<has_arg_iterator_range<AttrTy>::value,
StringRef>::type
ClassifyDiagnostic(const AttrTy *A) {
for (const auto *Arg : A->args()) {
if (const ValueDecl *VD = getValueDecl(Arg))
return ClassifyDiagnostic(VD);
}
return "mutex";
}
inline bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
if (!CurrentMethod)
return false;
if (auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
auto *VD = P->clangDecl();
if (VD)
return VD->getDeclContext() == CurrentMethod->getDeclContext();
}
return false;
}
/// \brief Add a new lock to the lockset, warning if the lock is already there.
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
std::unique_ptr<FactEntry> Entry,
StringRef DiagKind, bool ReqAttr) {
if (Entry->shouldIgnore())
return;
if (!ReqAttr && !Entry->negative()) {
// look for the negative capability, and remove it from the fact set.
CapabilityExpr NegC = !*Entry;
FactEntry *Nen = FSet.findLock(FactMan, NegC);
if (Nen) {
FSet.removeLock(FactMan, NegC);
}
else {
if (inCurrentScope(*Entry) && !Entry->asserted())
Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
NegC.toString(), Entry->loc());
}
}
// Check before/after constraints
if (Handler.issueBetaWarnings() &&
!Entry->asserted() && !Entry->declared()) {
GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
Entry->loc(), DiagKind);
}
// FIXME: Don't always warn when we have support for reentrant locks.
if (FSet.findLock(FactMan, *Entry)) {
if (!Entry->asserted())
Handler.handleDoubleLock(DiagKind, Entry->toString(), Entry->loc());
} else {
FSet.addLock(FactMan, std::move(Entry));
}
}
/// \brief Remove a lock from the lockset, warning if the lock is not there.
/// \param UnlockLoc The source location of the unlock (only used in error msg)
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation UnlockLoc,
bool FullyRemove, LockKind ReceivedKind,
StringRef DiagKind) {
if (Cp.shouldIgnore())
return;
const FactEntry *LDat = FSet.findLock(FactMan, Cp);
if (!LDat) {
Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc);
return;
}
// Generic lock removal doesn't care about lock kind mismatches, but
// otherwise diagnose when the lock kinds are mismatched.
if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(),
LDat->kind(), ReceivedKind, UnlockLoc);
}
LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
DiagKind);
}
/// \brief Extract the list of mutexIDs from the attribute on an expression,
/// and push them onto Mtxs, discarding any duplicates.
template <typename AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
Expr *Exp, const NamedDecl *D,
VarDecl *SelfDecl) {
if (Attr->args_size() == 0) {
// The mutex held is the "this" object.
CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
if (Cp.isInvalid()) {
warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
return;
}
//else
if (!Cp.shouldIgnore())
Mtxs.push_back_nodup(Cp);
return;
}
for (const auto *Arg : Attr->args()) {
CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
if (Cp.isInvalid()) {
warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
continue;
}
//else
if (!Cp.shouldIgnore())
Mtxs.push_back_nodup(Cp);
}
}
/// \brief Extract the list of mutexIDs from a trylock attribute. If the
/// trylock applies to the given edge, then push them onto Mtxs, discarding
/// any duplicates.
template <class AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
Expr *Exp, const NamedDecl *D,
const CFGBlock *PredBlock,
const CFGBlock *CurrBlock,
Expr *BrE, bool Neg) {
// Find out which branch has the lock
bool branch = false;
if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
branch = BLE->getValue();
else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
branch = ILE->getValue().getBoolValue();
int branchnum = branch ? 0 : 1;
if (Neg)
branchnum = !branchnum;
// If we've taken the trylock branch, then add the lock
int i = 0;
for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
if (*SI == CurrBlock && i == branchnum)
getMutexIDs(Mtxs, Attr, Exp, D);
}
}
static bool getStaticBooleanValue(Expr *E, bool &TCond) {
if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
TCond = false;
return true;
} else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
TCond = BLE->getValue();
return true;
} else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) {
TCond = ILE->getValue().getBoolValue();
return true;
} else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
return getStaticBooleanValue(CE->getSubExpr(), TCond);
}
return false;
}
// If Cond can be traced back to a function call, return the call expression.
// The negate variable should be called with false, and will be set to true
// if the function call is negated, e.g. if (!mu.tryLock(...))
const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
LocalVarContext C,
bool &Negate) {
if (!Cond)
return nullptr;
if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
return CallExp;
}
else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) {
return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
}
else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
}
else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) {
return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
}
else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
return getTrylockCallExpr(E, C, Negate);
}
else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
if (UOP->getOpcode() == UO_LNot) {
Negate = !Negate;
return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
}
return nullptr;
}
else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) {
if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
if (BOP->getOpcode() == BO_NE)
Negate = !Negate;
bool TCond = false;
if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
if (!TCond) Negate = !Negate;
return getTrylockCallExpr(BOP->getLHS(), C, Negate);
}
TCond = false;
if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
if (!TCond) Negate = !Negate;
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
}
return nullptr;
}
if (BOP->getOpcode() == BO_LAnd) {
// LHS must have been evaluated in a different block.
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
}
if (BOP->getOpcode() == BO_LOr) {
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
}
return nullptr;
}
return nullptr;
}
/// \brief Find the lockset that holds on the edge between PredBlock
/// and CurrBlock. The edge set is the exit set of PredBlock (passed
/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
const FactSet &ExitSet,
const CFGBlock *PredBlock,
const CFGBlock *CurrBlock) {
Result = ExitSet;
const Stmt *Cond = PredBlock->getTerminatorCondition();
if (!Cond)
return;
bool Negate = false;
const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
StringRef CapDiagKind = "mutex";
CallExpr *Exp =
const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
if (!Exp)
return;
NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
if(!FunDecl || !FunDecl->hasAttrs())
return;
CapExprSet ExclusiveLocksToAdd;
CapExprSet SharedLocksToAdd;
// If the condition is a call to a Trylock function, then grab the attributes
for (auto *Attr : FunDecl->attrs()) {
switch (Attr->getKind()) {
case attr::ExclusiveTrylockFunction: {
ExclusiveTrylockFunctionAttr *A =
cast<ExclusiveTrylockFunctionAttr>(Attr);
getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
PredBlock, CurrBlock, A->getSuccessValue(), Negate);
CapDiagKind = ClassifyDiagnostic(A);
break;
}
case attr::SharedTrylockFunction: {
SharedTrylockFunctionAttr *A =
cast<SharedTrylockFunctionAttr>(Attr);
getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
PredBlock, CurrBlock, A->getSuccessValue(), Negate);
CapDiagKind = ClassifyDiagnostic(A);
break;
}
default:
break;
}
}
// Add and remove locks.
SourceLocation Loc = Exp->getExprLoc();
for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
addLock(Result, llvm::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
LK_Exclusive, Loc),
CapDiagKind);
for (const auto &SharedLockToAdd : SharedLocksToAdd)
addLock(Result, llvm::make_unique<LockableFactEntry>(SharedLockToAdd,
LK_Shared, Loc),
CapDiagKind);
}
namespace {
/// \brief We use this class to visit different types of expressions in
/// CFGBlocks, and build up the lockset.
/// An expression may cause us to add or remove locks from the lockset, or else
/// output error messages related to missing locks.
/// FIXME: In future, we may be able to not inherit from a visitor.
class BuildLockset : public StmtVisitor<BuildLockset> {
friend class ThreadSafetyAnalyzer;
ThreadSafetyAnalyzer *Analyzer;
FactSet FSet;
LocalVariableMap::Context LVarCtx;
unsigned CtxIndex;
// helper functions
void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
Expr *MutexExp, ProtectedOperationKind POK,
StringRef DiagKind, SourceLocation Loc);
void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
StringRef DiagKind);
void checkAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK = POK_VarAccess);
void checkPtAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK = POK_VarAccess);
void handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
public:
BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
: StmtVisitor<BuildLockset>(),
Analyzer(Anlzr),
FSet(Info.EntrySet),
LVarCtx(Info.EntryContext),
CtxIndex(Info.EntryIndex)
{}
void VisitUnaryOperator(UnaryOperator *UO);
void VisitBinaryOperator(BinaryOperator *BO);
void VisitCastExpr(CastExpr *CE);
void VisitCallExpr(CallExpr *Exp);
void VisitCXXConstructExpr(CXXConstructExpr *Exp);
void VisitDeclStmt(DeclStmt *S);
};
} // namespace
/// \brief Warn if the LSet does not contain a lock sufficient to protect access
/// of at least the passed in AccessKind.
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
AccessKind AK, Expr *MutexExp,
ProtectedOperationKind POK,
StringRef DiagKind, SourceLocation Loc) {
LockKind LK = getLockKindFromAccessKind(AK);
CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
if (Cp.isInvalid()) {
warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
return;
} else if (Cp.shouldIgnore()) {
return;
}
if (Cp.negative()) {
// Negative capabilities act like locks excluded
FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
if (LDat) {
Analyzer->Handler.handleFunExcludesLock(
DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
return;
}
// If this does not refer to a negative capability in the same class,
// then stop here.
if (!Analyzer->inCurrentScope(Cp))
return;
// Otherwise the negative requirement must be propagated to the caller.
LDat = FSet.findLock(Analyzer->FactMan, Cp);
if (!LDat) {
Analyzer->Handler.handleMutexNotHeld("", D, POK, Cp.toString(),
LK_Shared, Loc);
}
return;
}
FactEntry* LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
bool NoError = true;
if (!LDat) {
// No exact match found. Look for a partial match.
LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
if (LDat) {
// Warn that there's no precise match.
std::string PartMatchStr = LDat->toString();
StringRef PartMatchName(PartMatchStr);
Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
LK, Loc, &PartMatchName);
} else {
// Warn that there's no match at all.
Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
LK, Loc);
}
NoError = false;
}
// Make sure the mutex we found is the right kind.
if (NoError && LDat && !LDat->isAtLeast(LK)) {
Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
LK, Loc);
}
}
/// \brief Warn if the LSet contains the given lock.
void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
Expr *MutexExp, StringRef DiagKind) {
CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
if (Cp.isInvalid()) {
warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
return;
} else if (Cp.shouldIgnore()) {
return;
}
FactEntry* LDat = FSet.findLock(Analyzer->FactMan, Cp);
if (LDat) {
Analyzer->Handler.handleFunExcludesLock(
DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
}
}
/// \brief Checks guarded_by and pt_guarded_by attributes.
/// Whenever we identify an access (read or write) to a DeclRefExpr that is
/// marked with guarded_by, we must ensure the appropriate mutexes are held.
/// Similarly, we check if the access is to an expression that dereferences
/// a pointer marked with pt_guarded_by.
void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK) {
Exp = Exp->IgnoreParenCasts();
SourceLocation Loc = Exp->getExprLoc();
// Local variables of reference type cannot be re-assigned;
// map them to their initializer.
while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
if (const auto *E = VD->getInit()) {
Exp = E;
continue;
}
}
break;
}
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) {
// For dereferences
if (UO->getOpcode() == clang::UO_Deref)
checkPtAccess(UO->getSubExpr(), AK, POK);
return;
}
if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
checkPtAccess(AE->getLHS(), AK, POK);
return;
}
if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
if (ME->isArrow())
checkPtAccess(ME->getBase(), AK, POK);
else
checkAccess(ME->getBase(), AK, POK);
}
const ValueDecl *D = getValueDecl(Exp);
if (!D || !D->hasAttrs())
return;
if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
}
for (const auto *I : D->specific_attrs<GuardedByAttr>())
warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
ClassifyDiagnostic(I), Loc);
}
/// \brief Checks pt_guarded_by and pt_guarded_var attributes.
/// POK is the same operationKind that was passed to checkAccess.
void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK) {
while (true) {
if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
Exp = PE->getSubExpr();
continue;
}
if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
if (CE->getCastKind() == CK_ArrayToPointerDecay) {
// If it's an actual array, and not a pointer, then it's elements
// are protected by GUARDED_BY, not PT_GUARDED_BY;
checkAccess(CE->getSubExpr(), AK, POK);
return;
}
Exp = CE->getSubExpr();
continue;
}
break;
}
// Pass by reference warnings are under a different flag.
ProtectedOperationKind PtPOK = POK_VarDereference;
if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
const ValueDecl *D = getValueDecl(Exp);
if (!D || !D->hasAttrs())
return;
if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
Exp->getExprLoc());
for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
ClassifyDiagnostic(I), Exp->getExprLoc());
}
/// \brief Process a function call, method call, constructor call,
/// or destructor call. This involves looking at the attributes on the
/// corresponding function/method/constructor/destructor, issuing warnings,
/// and updating the locksets accordingly.
///
/// FIXME: For classes annotated with one of the guarded annotations, we need
/// to treat const method calls as reads and non-const method calls as writes,
/// and check that the appropriate locks are held. Non-const method calls with
/// the same signature as const method calls can be also treated as reads.
///
void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
SourceLocation Loc = Exp->getExprLoc();
CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
CapExprSet ScopedExclusiveReqs, ScopedSharedReqs;
StringRef CapDiagKind = "mutex";
// Figure out if we're calling the constructor of scoped lockable class
bool isScopedVar = false;
if (VD) {
if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
const CXXRecordDecl* PD = CD->getParent();
if (PD && PD->hasAttr<ScopedLockableAttr>())
isScopedVar = true;
}
}
for(Attr *Atconst : D->attrs()) {
Attr* At = const_cast<Attr*>(Atconst);
switch (At->getKind()) {
// When we encounter a lock function, we need to add the lock to our
// lockset.
case attr::AcquireCapability: {
auto *A = cast<AcquireCapabilityAttr>(At);
Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
: ExclusiveLocksToAdd,
A, Exp, D, VD);
CapDiagKind = ClassifyDiagnostic(A);
break;
}
// An assert will add a lock to the lockset, but will not generate
// a warning if it is already there, and will not generate a warning
// if it is not removed.
case attr::AssertExclusiveLock: {
AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
Analyzer->addLock(FSet,
llvm::make_unique<LockableFactEntry>(
AssertLock, LK_Exclusive, Loc, false, true),
ClassifyDiagnostic(A));
break;
}
case attr::AssertSharedLock: {
AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
AssertLock, LK_Shared, Loc, false, true),
ClassifyDiagnostic(A));
break;
}
// When we encounter an unlock function, we need to remove unlocked
// mutexes from the lockset, and flag a warning if they are not there.
case attr::ReleaseCapability: {
auto *A = cast<ReleaseCapabilityAttr>(At);
if (A->isGeneric())
Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
else if (A->isShared())
Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
else
Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
CapDiagKind = ClassifyDiagnostic(A);
break;
}
case attr::RequiresCapability: {
RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
for (auto *Arg : A->args()) {
warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
POK_FunctionCall, ClassifyDiagnostic(A),
Exp->getExprLoc());
// use for adopting a lock
if (isScopedVar) {
Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs
: ScopedExclusiveReqs,
A, Exp, D, VD);
}
}
break;
}
case attr::LocksExcluded: {
LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
for (auto *Arg : A->args())
warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
break;
}
// Ignore attributes unrelated to thread-safety
default:
break;
}
}
// Add locks.
for (const auto &M : ExclusiveLocksToAdd)
Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
M, LK_Exclusive, Loc, isScopedVar),
CapDiagKind);
for (const auto &M : SharedLocksToAdd)
Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
M, LK_Shared, Loc, isScopedVar),
CapDiagKind);
if (isScopedVar) {
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
SourceLocation MLoc = VD->getLocation();
DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
// FIXME: does this store a pointer to DRE?
CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(),
std::back_inserter(ExclusiveLocksToAdd));
std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(),
std::back_inserter(SharedLocksToAdd));
Analyzer->addLock(FSet,
llvm::make_unique<ScopedLockableFactEntry>(
Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
CapDiagKind);
}
// Remove locks.
// FIXME -- should only fully remove if the attribute refers to 'this'.
bool Dtor = isa<CXXDestructorDecl>(D);
for (const auto &M : ExclusiveLocksToRemove)
Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
for (const auto &M : SharedLocksToRemove)
Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
for (const auto &M : GenericLocksToRemove)
Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
}
/// \brief For unary operations which read and write a variable, we need to
/// check whether we hold any required mutexes. Reads are checked in
/// VisitCastExpr.
void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
switch (UO->getOpcode()) {
case clang::UO_PostDec:
case clang::UO_PostInc:
case clang::UO_PreDec:
case clang::UO_PreInc: {
checkAccess(UO->getSubExpr(), AK_Written);
break;
}
default:
break;
}
}
/// For binary operations which assign to a variable (writes), we need to check
/// whether we hold any required mutexes.
/// FIXME: Deal with non-primitive types.
void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
if (!BO->isAssignmentOp())
return;
// adjust the context
LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
checkAccess(BO->getLHS(), AK_Written);
}
/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
/// need to ensure we hold any required mutexes.
/// FIXME: Deal with non-primitive types.
void BuildLockset::VisitCastExpr(CastExpr *CE) {
if (CE->getCastKind() != CK_LValueToRValue)
return;
checkAccess(CE->getSubExpr(), AK_Read);
}
void BuildLockset::VisitCallExpr(CallExpr *Exp) {
bool ExamineArgs = true;
bool OperatorFun = false;
if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee());
// ME can be null when calling a method pointer
CXXMethodDecl *MD = CE->getMethodDecl();
if (ME && MD) {
if (ME->isArrow()) {
if (MD->isConst()) {
checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
} else { // FIXME -- should be AK_Written
checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
}
} else {
if (MD->isConst())
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
else // FIXME -- should be AK_Written
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
}
}
} else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
OperatorFun = true;
auto OEop = OE->getOperator();
switch (OEop) {
case OO_Equal: {
ExamineArgs = false;
const Expr *Target = OE->getArg(0);
const Expr *Source = OE->getArg(1);
checkAccess(Target, AK_Written);
checkAccess(Source, AK_Read);
break;
}
case OO_Star:
case OO_Arrow:
case OO_Subscript: {
const Expr *Obj = OE->getArg(0);
checkAccess(Obj, AK_Read);
if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
// Grrr. operator* can be multiplication...
checkPtAccess(Obj, AK_Read);
}
break;
}
default: {
// TODO: get rid of this, and rely on pass-by-ref instead.
const Expr *Obj = OE->getArg(0);
checkAccess(Obj, AK_Read);
break;
}
}
}
if (ExamineArgs) {
if (FunctionDecl *FD = Exp->getDirectCallee()) {
unsigned Fn = FD->getNumParams();
unsigned Cn = Exp->getNumArgs();
unsigned Skip = 0;
unsigned i = 0;
if (OperatorFun) {
if (isa<CXXMethodDecl>(FD)) {
// First arg in operator call is implicit self argument,
// and doesn't appear in the FunctionDecl.
Skip = 1;
Cn--;
} else {
// Ignore the first argument of operators; it's been checked above.
i = 1;
}
}
// Ignore default arguments
unsigned n = (Fn < Cn) ? Fn : Cn;
for (; i < n; ++i) {
ParmVarDecl* Pvd = FD->getParamDecl(i);
Expr* Arg = Exp->getArg(i+Skip);
QualType Qt = Pvd->getType();
if (Qt->isReferenceType())
checkAccess(Arg, AK_Read, POK_PassByRef);
}
}
}
NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
if(!D || !D->hasAttrs())
return;
handleCall(Exp, D);
}
void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
const CXXConstructorDecl *D = Exp->getConstructor();
if (D && D->isCopyConstructor()) {
const Expr* Source = Exp->getArg(0);
checkAccess(Source, AK_Read);
}
// FIXME -- only handles constructors in DeclStmt below.
}
void BuildLockset::VisitDeclStmt(DeclStmt *S) {
// adjust the context
LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
for (auto *D : S->getDeclGroup()) {
if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
Expr *E = VD->getInit();
// handle constructors that involve temporaries
if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
E = EWC->getSubExpr();
if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
if (!CtorD || !CtorD->hasAttrs())
return;
handleCall(CE, CtorD, VD);
}
}
}
}
/// \brief Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
/// This function is used at a merge point in the CFG when comparing the lockset
/// of each branch being merged. For example, given the following sequence:
/// A; if () then B; else C; D; we need to check that the lockset after B and C
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
///
/// \param FSet1 The first lockset.
/// \param FSet2 The second lockset.
/// \param JoinLoc The location of the join point for error reporting
/// \param LEK1 The error message to report if a mutex is missing from LSet1
/// \param LEK2 The error message to report if a mutex is missing from Lset2
void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
const FactSet &FSet2,
SourceLocation JoinLoc,
LockErrorKind LEK1,
LockErrorKind LEK2,
bool Modify) {
FactSet FSet1Orig = FSet1;
// Find locks in FSet2 that conflict or are not in FSet1, and warn.
for (const auto &Fact : FSet2) {
const FactEntry *LDat1 = nullptr;
const FactEntry *LDat2 = &FactMan[Fact];
FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2);
if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1];
if (LDat1) {
if (LDat1->kind() != LDat2->kind()) {
Handler.handleExclusiveAndShared("mutex", LDat2->toString(),
LDat2->loc(), LDat1->loc());
if (Modify && LDat1->kind() != LK_Exclusive) {
// Take the exclusive lock, which is the one in FSet2.
*Iter1 = Fact;
}
}
else if (Modify && LDat1->asserted() && !LDat2->asserted()) {
// The non-asserted lock in FSet2 is the one we want to track.
*Iter1 = Fact;
}
} else {
LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
Handler);
}
}
// Find locks in FSet1 that are not in FSet2, and remove them.
for (const auto &Fact : FSet1Orig) {
const FactEntry *LDat1 = &FactMan[Fact];
const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
if (!LDat2) {
LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
Handler);
if (Modify)
FSet1.removeLock(FactMan, *LDat1);
}
}
}
// Return true if block B never continues to its successors.
static bool neverReturns(const CFGBlock *B) {
if (B->hasNoReturnElement())
return true;
if (B->empty())
return false;
CFGElement Last = B->back();
if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
if (isa<CXXThrowExpr>(S->getStmt()))
return true;
}
return false;
}
/// \brief Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
/// Each block in the CFG is traversed exactly once.
void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// TODO: this whole function needs be rewritten as a visitor for CFGWalker.
// For now, we just use the walker to set things up.
threadSafety::CFGWalker walker;
if (!walker.init(AC))
return;
// AC.dumpCFG(true);
// threadSafety::printSCFG(walker);
CFG *CFGraph = walker.getGraph();
const NamedDecl *D = walker.getDecl();
const FunctionDecl *CurrentFunction = dyn_cast<FunctionDecl>(D);
CurrentMethod = dyn_cast<CXXMethodDecl>(D);
if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
return;
// FIXME: Do something a bit more intelligent inside constructor and
// destructor code. Constructors and destructors must assume unique access
// to 'this', so checks on member variable access is disabled, but we should
// still enable checks on other objects.
if (isa<CXXConstructorDecl>(D))
return; // Don't check inside constructors.
if (isa<CXXDestructorDecl>(D))
return; // Don't check inside destructors.
Handler.enterFunction(CurrentFunction);
BlockInfo.resize(CFGraph->getNumBlockIDs(),
CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
// We need to explore the CFG via a "topological" ordering.
// That way, we will be guaranteed to have information about required
// predecessor locksets when exploring a new block.
const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
// Mark entry block as reachable
BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
// Compute SSA names for local variables
LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
// Fill in source locations for all CFGBlocks.
findBlockLocations(CFGraph, SortedGraph, BlockInfo);
CapExprSet ExclusiveLocksAcquired;
CapExprSet SharedLocksAcquired;
CapExprSet LocksReleased;
// Add locks from exclusive_locks_required and shared_locks_required
// to initial lockset. Also turn off checking for lock and unlock functions.
// FIXME: is there a more intelligent way to check lock/unlock functions?
if (!SortedGraph->empty() && D->hasAttrs()) {
const CFGBlock *FirstBlock = *SortedGraph->begin();
FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
CapExprSet ExclusiveLocksToAdd;
CapExprSet SharedLocksToAdd;
StringRef CapDiagKind = "mutex";
SourceLocation Loc = D->getLocation();
for (const auto *Attr : D->attrs()) {
Loc = Attr->getLocation();
if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
nullptr, D);
CapDiagKind = ClassifyDiagnostic(A);
} else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
// UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
// We must ignore such methods.
if (A->args_size() == 0)
return;
// FIXME -- deal with exclusive vs. shared unlock functions?
getMutexIDs(ExclusiveLocksToAdd, A, nullptr, D);
getMutexIDs(LocksReleased, A, nullptr, D);
CapDiagKind = ClassifyDiagnostic(A);
} else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
if (A->args_size() == 0)
return;
getMutexIDs(A->isShared() ? SharedLocksAcquired
: ExclusiveLocksAcquired,
A, nullptr, D);
CapDiagKind = ClassifyDiagnostic(A);
} else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
// Don't try to check trylock functions for now
return;
} else if (isa<SharedTrylockFunctionAttr>(Attr)) {
// Don't try to check trylock functions for now
return;
}
}
// FIXME -- Loc can be wrong here.
for (const auto &Mu : ExclusiveLocksToAdd) {
auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc);
Entry->setDeclared(true);
addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
}
for (const auto &Mu : SharedLocksToAdd) {
auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc);
Entry->setDeclared(true);
addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
}
}
for (const auto *CurrBlock : *SortedGraph) {
int CurrBlockID = CurrBlock->getBlockID();
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
// Use the default initial lockset in case there are no predecessors.
VisitedBlocks.insert(CurrBlock);
// Iterate through the predecessor blocks and warn if the lockset for all
// predecessors is not the same. We take the entry lockset of the current
// block to be the intersection of all previous locksets.
// FIXME: By keeping the intersection, we may output more errors in future
// for a lock which is not in the intersection, but was in the union. We
// may want to also keep the union in future. As an example, let's say
// the intersection contains Mutex L, and the union contains L and M.
// Later we unlock M. At this point, we would output an error because we
// never locked M; although the real error is probably that we forgot to
// lock M on all code paths. Conversely, let's say that later we lock M.
// In this case, we should compare against the intersection instead of the
// union because the real error is probably that we forgot to unlock M on
// all code paths.
bool LocksetInitialized = false;
SmallVector<CFGBlock *, 8> SpecialBlocks;
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
// if *PI -> CurrBlock is a back edge
if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
continue;
int PrevBlockID = (*PI)->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
// Ignore edges from blocks that can't return.
if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
continue;
// Okay, we can reach this block from the entry.
CurrBlockInfo->Reachable = true;
// If the previous block ended in a 'continue' or 'break' statement, then
// a difference in locksets is probably due to a bug in that block, rather
// than in some other predecessor. In that case, keep the other
// predecessor's lockset.
if (const Stmt *Terminator = (*PI)->getTerminator()) {
if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
SpecialBlocks.push_back(*PI);
continue;
}
}
FactSet PrevLockset;
getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
if (!LocksetInitialized) {
CurrBlockInfo->EntrySet = PrevLockset;
LocksetInitialized = true;
} else {
intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
CurrBlockInfo->EntryLoc,
LEK_LockedSomePredecessors);
}
}
// Skip rest of block if it's not reachable.
if (!CurrBlockInfo->Reachable)
continue;
// Process continue and break blocks. Assume that the lockset for the
// resulting block is unaffected by any discrepancies in them.
for (const auto *PrevBlock : SpecialBlocks) {
int PrevBlockID = PrevBlock->getBlockID();
CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
if (!LocksetInitialized) {
CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
LocksetInitialized = true;
} else {
// Determine whether this edge is a loop terminator for diagnostic
// purposes. FIXME: A 'break' statement might be a loop terminator, but
// it might also be part of a switch. Also, a subsequent destructor
// might add to the lockset, in which case the real issue might be a
// double lock on the other path.
const Stmt *Terminator = PrevBlock->getTerminator();
bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
FactSet PrevLockset;
getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet,
PrevBlock, CurrBlock);
// Do not update EntrySet.
intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
PrevBlockInfo->ExitLoc,
IsLoop ? LEK_LockedSomeLoopIterations
: LEK_LockedSomePredecessors,
false);
}
}
BuildLockset LocksetBuilder(this, *CurrBlockInfo);
// Visit all the statements in the basic block.
for (CFGBlock::const_iterator BI = CurrBlock->begin(),
BE = CurrBlock->end(); BI != BE; ++BI) {
switch (BI->getKind()) {
case CFGElement::Statement: {
CFGStmt CS = BI->castAs<CFGStmt>();
LocksetBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
break;
}
// Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
case CFGElement::AutomaticObjectDtor: {
CFGAutomaticObjDtor AD = BI->castAs<CFGAutomaticObjDtor>();
CXXDestructorDecl *DD = const_cast<CXXDestructorDecl *>(
AD.getDestructorDecl(AC.getASTContext()));
if (!DD->hasAttrs())
break;
// Create a dummy expression,
VarDecl *VD = const_cast<VarDecl*>(AD.getVarDecl());
DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(),
VK_LValue, AD.getTriggerStmt()->getLocEnd());
LocksetBuilder.handleCall(&DRE, DD);
break;
}
default:
break;
}
}
CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
// For every back edge from CurrBlock (the end of the loop) to another block
// (FirstLoopBlock) we need to check that the Lockset of Block is equal to
// the one held at the beginning of FirstLoopBlock. We can look up the
// Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
// if CurrBlock -> *SI is *not* a back edge
if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
continue;
CFGBlock *FirstLoopBlock = *SI;
CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
PreLoop->EntryLoc,
LEK_LockedSomeLoopIterations,
false);
}
}
CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
// Skip the final check if the exit block is unreachable.
if (!Final->Reachable)
return;
// By default, we expect all locks held on entry to be held on exit.
FactSet ExpectedExitSet = Initial->EntrySet;
// Adjust the expected exit set by adding or removing locks, as declared
// by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
// issue the appropriate warning.
// FIXME: the location here is not quite right.
for (const auto &Lock : ExclusiveLocksAcquired)
ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
Lock, LK_Exclusive, D->getLocation()));
for (const auto &Lock : SharedLocksAcquired)
ExpectedExitSet.addLock(FactMan, llvm::make_unique<LockableFactEntry>(
Lock, LK_Shared, D->getLocation()));
for (const auto &Lock : LocksReleased)
ExpectedExitSet.removeLock(FactMan, Lock);
// FIXME: Should we call this function for all blocks which exit the function?
intersectAndWarn(ExpectedExitSet, Final->ExitSet,
Final->ExitLoc,
LEK_LockedAtEndOfFunction,
LEK_NotLockedAtEndOfFunction,
false);
Handler.leaveFunction(CurrentFunction);
}
/// \brief Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
/// Each block in the CFG is traversed exactly once.
void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
ThreadSafetyHandler &Handler,
BeforeSet **BSet) {
if (!*BSet)
*BSet = new BeforeSet;
ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
Analyzer.runAnalysis(AC);
}
void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
/// \brief Helper function that returns a LockKind required for the given level
/// of access.
LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
switch (AK) {
case AK_Read :
return LK_Shared;
case AK_Written :
return LK_Exclusive;
}
llvm_unreachable("Unknown AccessKind");
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/CMakeLists.txt | set(LLVM_LINK_COMPONENTS
Support
)
# HLSL Change - add ignored sources
set(HLSL_IGNORE_SOURCES
CocoaConventions.cpp
FormatString.cpp
PrintfFormatString.cpp
ScanfFormatString.cpp
)
add_clang_library(clangAnalysis
AnalysisDeclContext.cpp
BodyFarm.cpp
CFG.cpp
CFGReachabilityAnalysis.cpp
CFGStmtMap.cpp
CallGraph.cpp
Consumed.cpp
CodeInjector.cpp
Dominators.cpp
LiveVariables.cpp
ObjCNoReturn.cpp
PostOrderCFGView.cpp
ProgramPoint.cpp
PseudoConstantAnalysis.cpp
ReachableCode.cpp
ThreadSafety.cpp
ThreadSafetyCommon.cpp
ThreadSafetyLogical.cpp
ThreadSafetyTIL.cpp
UninitializedValues.cpp
LINK_LIBS
clangAST
clangBasic
clangLex
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/CallGraph.cpp | //== CallGraph.cpp - AST-based Call graph ----------------------*- C++ -*--==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the AST-based CallGraph.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/CallGraph.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/GraphWriter.h"
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
#define DEBUG_TYPE "CallGraph"
STATISTIC(NumObjCCallEdges, "Number of Objective-C method call edges");
STATISTIC(NumBlockCallEdges, "Number of block call edges");
namespace {
/// A helper class, which walks the AST and locates all the call sites in the
/// given function body.
class CGBuilder : public StmtVisitor<CGBuilder> {
CallGraph *G;
CallGraphNode *CallerNode;
public:
CGBuilder(CallGraph *g, CallGraphNode *N)
: G(g), CallerNode(N) {}
void VisitStmt(Stmt *S) { VisitChildren(S); }
Decl *getDeclFromCall(CallExpr *CE) {
if (FunctionDecl *CalleeDecl = CE->getDirectCallee())
return CalleeDecl;
// Simple detection of a call through a block.
Expr *CEE = CE->getCallee()->IgnoreParenImpCasts();
if (BlockExpr *Block = dyn_cast<BlockExpr>(CEE)) {
NumBlockCallEdges++;
return Block->getBlockDecl();
}
return nullptr;
}
void addCalledDecl(Decl *D) {
if (G->includeInGraph(D)) {
CallGraphNode *CalleeNode = G->getOrInsertNode(D);
CallerNode->addCallee(CalleeNode, G);
}
}
void VisitCallExpr(CallExpr *CE) {
if (Decl *D = getDeclFromCall(CE))
addCalledDecl(D);
}
// Adds may-call edges for the ObjC message sends.
void VisitObjCMessageExpr(ObjCMessageExpr *ME) {
if (ObjCInterfaceDecl *IDecl = ME->getReceiverInterface()) {
Selector Sel = ME->getSelector();
// Find the callee definition within the same translation unit.
Decl *D = nullptr;
if (ME->isInstanceMessage())
D = IDecl->lookupPrivateMethod(Sel);
else
D = IDecl->lookupPrivateClassMethod(Sel);
if (D) {
addCalledDecl(D);
NumObjCCallEdges++;
}
}
}
void VisitChildren(Stmt *S) {
for (Stmt *SubStmt : S->children())
if (SubStmt)
this->Visit(SubStmt);
}
};
} // end anonymous namespace
void CallGraph::addNodesForBlocks(DeclContext *D) {
if (BlockDecl *BD = dyn_cast<BlockDecl>(D))
addNodeForDecl(BD, true);
for (auto *I : D->decls())
if (auto *DC = dyn_cast<DeclContext>(I))
addNodesForBlocks(DC);
}
CallGraph::CallGraph() {
Root = getOrInsertNode(nullptr);
}
CallGraph::~CallGraph() {
llvm::DeleteContainerSeconds(FunctionMap);
}
bool CallGraph::includeInGraph(const Decl *D) {
assert(D);
if (!D->hasBody())
return false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// We skip function template definitions, as their semantics is
// only determined when they are instantiated.
if (FD->isDependentContext())
return false;
IdentifierInfo *II = FD->getIdentifier();
if (II && II->getName().startswith("__inline"))
return false;
}
return true;
}
void CallGraph::addNodeForDecl(Decl* D, bool IsGlobal) {
assert(D);
// Allocate a new node, mark it as root, and process it's calls.
CallGraphNode *Node = getOrInsertNode(D);
// Process all the calls by this function as well.
CGBuilder builder(this, Node);
if (Stmt *Body = D->getBody())
builder.Visit(Body);
}
CallGraphNode *CallGraph::getNode(const Decl *F) const {
FunctionMapTy::const_iterator I = FunctionMap.find(F);
if (I == FunctionMap.end()) return nullptr;
return I->second;
}
CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
if (F && !isa<ObjCMethodDecl>(F))
F = F->getCanonicalDecl();
CallGraphNode *&Node = FunctionMap[F];
if (Node)
return Node;
Node = new CallGraphNode(F);
// Make Root node a parent of all functions to make sure all are reachable.
if (F)
Root->addCallee(Node, this);
return Node;
}
void CallGraph::print(raw_ostream &OS) const {
OS << " --- Call graph Dump --- \n";
// We are going to print the graph in reverse post order, partially, to make
// sure the output is deterministic.
llvm::ReversePostOrderTraversal<const clang::CallGraph*> RPOT(this);
for (llvm::ReversePostOrderTraversal<const clang::CallGraph*>::rpo_iterator
I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
const CallGraphNode *N = *I;
OS << " Function: ";
if (N == Root)
OS << "< root >";
else
N->print(OS);
OS << " calls: ";
for (CallGraphNode::const_iterator CI = N->begin(),
CE = N->end(); CI != CE; ++CI) {
assert(*CI != Root && "No one can call the root node.");
(*CI)->print(OS);
OS << " ";
}
OS << '\n';
}
OS.flush();
}
void CallGraph::dump() const {
print(llvm::errs());
}
void CallGraph::viewGraph() const {
llvm::ViewGraph(this, "CallGraph");
}
void CallGraphNode::print(raw_ostream &os) const {
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(FD))
return ND->printName(os);
os << "< >";
}
void CallGraphNode::dump() const {
print(llvm::errs());
}
namespace llvm {
template <>
struct DOTGraphTraits<const CallGraph*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
static std::string getNodeLabel(const CallGraphNode *Node,
const CallGraph *CG) {
if (CG->getRoot() == Node) {
return "< root >";
}
if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Node->getDecl()))
return ND->getNameAsString();
else
return "< >";
}
};
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ObjCNoReturn.cpp | //= ObjCNoReturn.cpp - Handling of Cocoa APIs known not to return --*- C++ -*---
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements special handling of recognizing ObjC API hooks that
// do not return but aren't marked as such in API headers.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
using namespace clang;
static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) {
if (!Class)
return false;
if (Class->getIdentifier() == II)
return true;
return isSubclass(Class->getSuperClass(), II);
}
ObjCNoReturn::ObjCNoReturn(ASTContext &C)
: RaiseSel(GetNullarySelector("raise", C)),
NSExceptionII(&C.Idents.get("NSException"))
{
// Generate selectors.
SmallVector<IdentifierInfo*, 3> II;
// raise:format:
II.push_back(&C.Idents.get("raise"));
II.push_back(&C.Idents.get("format"));
NSExceptionInstanceRaiseSelectors[0] =
C.Selectors.getSelector(II.size(), &II[0]);
// raise:format:arguments:
II.push_back(&C.Idents.get("arguments"));
NSExceptionInstanceRaiseSelectors[1] =
C.Selectors.getSelector(II.size(), &II[0]);
}
bool ObjCNoReturn::isImplicitNoReturn(const ObjCMessageExpr *ME) {
Selector S = ME->getSelector();
if (ME->isInstanceMessage()) {
// Check for the "raise" message.
return S == RaiseSel;
}
if (const ObjCInterfaceDecl *ID = ME->getReceiverInterface()) {
if (isSubclass(ID, NSExceptionII)) {
for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i) {
if (S == NSExceptionInstanceRaiseSelectors[i])
return true;
}
}
}
return false;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/PrintfFormatString.cpp | //== PrintfFormatString.cpp - Analysis of printf format strings --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Handling of format string in printf and friends. The structure of format
// strings for fprintf() are described in C99 7.19.6.1.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/FormatString.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
using clang::analyze_format_string::ConversionSpecifier;
using clang::analyze_printf::PrintfSpecifier;
using namespace clang;
typedef clang::analyze_format_string::SpecifierResult<PrintfSpecifier>
PrintfSpecifierResult;
//===----------------------------------------------------------------------===//
// Methods for parsing format strings.
//===----------------------------------------------------------------------===//
using analyze_format_string::ParseNonPositionAmount;
static bool ParsePrecision(FormatStringHandler &H, PrintfSpecifier &FS,
const char *Start, const char *&Beg, const char *E,
unsigned *argIndex) {
if (argIndex) {
FS.setPrecision(ParseNonPositionAmount(Beg, E, *argIndex));
} else {
const OptionalAmount Amt = ParsePositionAmount(H, Start, Beg, E,
analyze_format_string::PrecisionPos);
if (Amt.isInvalid())
return true;
FS.setPrecision(Amt);
}
return false;
}
static bool ParseObjCFlags(FormatStringHandler &H, PrintfSpecifier &FS,
const char *FlagBeg, const char *E, bool Warn) {
StringRef Flag(FlagBeg, E - FlagBeg);
// Currently there is only one flag.
if (Flag == "tt") {
FS.setHasObjCTechnicalTerm(FlagBeg);
return false;
}
// Handle either the case of no flag or an invalid flag.
if (Warn) {
if (Flag == "")
H.HandleEmptyObjCModifierFlag(FlagBeg, E - FlagBeg);
else
H.HandleInvalidObjCModifierFlag(FlagBeg, E - FlagBeg);
}
return true;
}
static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
const char *&Beg,
const char *E,
unsigned &argIndex,
const LangOptions &LO,
const TargetInfo &Target,
bool Warn,
bool isFreeBSDKPrintf) {
using namespace clang::analyze_format_string;
using namespace clang::analyze_printf;
const char *I = Beg;
const char *Start = nullptr;
UpdateOnReturn <const char*> UpdateBeg(Beg, I);
// Look for a '%' character that indicates the start of a format specifier.
for ( ; I != E ; ++I) {
char c = *I;
if (c == '\0') {
// Detect spurious null characters, which are likely errors.
H.HandleNullChar(I);
return true;
}
if (c == '%') {
Start = I++; // Record the start of the format specifier.
break;
}
}
// No format specifier found?
if (!Start)
return false;
if (I == E) {
// No more characters left?
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
PrintfSpecifier FS;
if (ParseArgPosition(H, FS, Start, I, E))
return true;
if (I == E) {
// No more characters left?
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
// Look for flags (if any).
bool hasMore = true;
for ( ; I != E; ++I) {
switch (*I) {
default: hasMore = false; break;
case '\'':
// FIXME: POSIX specific. Always accept?
FS.setHasThousandsGrouping(I);
break;
case '-': FS.setIsLeftJustified(I); break;
case '+': FS.setHasPlusPrefix(I); break;
case ' ': FS.setHasSpacePrefix(I); break;
case '#': FS.setHasAlternativeForm(I); break;
case '0': FS.setHasLeadingZeros(I); break;
}
if (!hasMore)
break;
}
if (I == E) {
// No more characters left?
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
// Look for the field width (if any).
if (ParseFieldWidth(H, FS, Start, I, E,
FS.usesPositionalArg() ? nullptr : &argIndex))
return true;
if (I == E) {
// No more characters left?
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
// Look for the precision (if any).
if (*I == '.') {
++I;
if (I == E) {
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
if (ParsePrecision(H, FS, Start, I, E,
FS.usesPositionalArg() ? nullptr : &argIndex))
return true;
if (I == E) {
// No more characters left?
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
}
// Look for the length modifier.
if (ParseLengthModifier(FS, I, E, LO) && I == E) {
// No more characters left?
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
// Look for the Objective-C modifier flags, if any.
// We parse these here, even if they don't apply to
// the conversion specifier, and then emit an error
// later if the conversion specifier isn't '@'. This
// enables better recovery, and we don't know if
// these flags are applicable until later.
const char *ObjCModifierFlagsStart = nullptr,
*ObjCModifierFlagsEnd = nullptr;
if (*I == '[') {
ObjCModifierFlagsStart = I;
++I;
auto flagStart = I;
for (;; ++I) {
ObjCModifierFlagsEnd = I;
if (I == E) {
if (Warn)
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
// Did we find the closing ']'?
if (*I == ']') {
if (ParseObjCFlags(H, FS, flagStart, I, Warn))
return true;
++I;
break;
}
// There are no separators defined yet for multiple
// Objective-C modifier flags. When those are
// defined, this is the place to check.
}
}
if (*I == '\0') {
// Detect spurious null characters, which are likely errors.
H.HandleNullChar(I);
return true;
}
// Finally, look for the conversion specifier.
const char *conversionPosition = I++;
ConversionSpecifier::Kind k = ConversionSpecifier::InvalidSpecifier;
switch (*conversionPosition) {
default:
break;
// C99: 7.19.6.1 (section 8).
case '%': k = ConversionSpecifier::PercentArg; break;
case 'A': k = ConversionSpecifier::AArg; break;
case 'E': k = ConversionSpecifier::EArg; break;
case 'F': k = ConversionSpecifier::FArg; break;
case 'G': k = ConversionSpecifier::GArg; break;
case 'X': k = ConversionSpecifier::XArg; break;
case 'a': k = ConversionSpecifier::aArg; break;
case 'c': k = ConversionSpecifier::cArg; break;
case 'd': k = ConversionSpecifier::dArg; break;
case 'e': k = ConversionSpecifier::eArg; break;
case 'f': k = ConversionSpecifier::fArg; break;
case 'g': k = ConversionSpecifier::gArg; break;
case 'i': k = ConversionSpecifier::iArg; break;
case 'n': k = ConversionSpecifier::nArg; break;
case 'o': k = ConversionSpecifier::oArg; break;
case 'p': k = ConversionSpecifier::pArg; break;
case 's': k = ConversionSpecifier::sArg; break;
case 'u': k = ConversionSpecifier::uArg; break;
case 'x': k = ConversionSpecifier::xArg; break;
// POSIX specific.
case 'C': k = ConversionSpecifier::CArg; break;
case 'S': k = ConversionSpecifier::SArg; break;
// Objective-C.
case '@': k = ConversionSpecifier::ObjCObjArg; break;
// Glibc specific.
case 'm': k = ConversionSpecifier::PrintErrno; break;
// FreeBSD kernel specific.
case 'b':
if (isFreeBSDKPrintf)
k = ConversionSpecifier::FreeBSDbArg; // int followed by char *
break;
case 'r':
if (isFreeBSDKPrintf)
k = ConversionSpecifier::FreeBSDrArg; // int
break;
case 'y':
if (isFreeBSDKPrintf)
k = ConversionSpecifier::FreeBSDyArg; // int
break;
// Apple-specific.
case 'D':
if (isFreeBSDKPrintf)
k = ConversionSpecifier::FreeBSDDArg; // void * followed by char *
else if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::DArg;
break;
case 'O':
if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::OArg;
break;
case 'U':
if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::UArg;
break;
// MS specific.
case 'Z':
if (Target.getTriple().isOSMSVCRT())
k = ConversionSpecifier::ZArg;
}
// Check to see if we used the Objective-C modifier flags with
// a conversion specifier other than '@'.
if (k != ConversionSpecifier::ObjCObjArg &&
k != ConversionSpecifier::InvalidSpecifier &&
ObjCModifierFlagsStart) {
H.HandleObjCFlagsWithNonObjCConversion(ObjCModifierFlagsStart,
ObjCModifierFlagsEnd + 1,
conversionPosition);
return true;
}
PrintfConversionSpecifier CS(conversionPosition, k);
FS.setConversionSpecifier(CS);
if (CS.consumesDataArgument() && !FS.usesPositionalArg())
FS.setArgIndex(argIndex++);
// FreeBSD kernel specific.
if (k == ConversionSpecifier::FreeBSDbArg ||
k == ConversionSpecifier::FreeBSDDArg)
argIndex++;
if (k == ConversionSpecifier::InvalidSpecifier) {
// Assume the conversion takes one argument.
return !H.HandleInvalidPrintfConversionSpecifier(FS, Start, I - Start);
}
return PrintfSpecifierResult(Start, FS);
}
bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
const char *I,
const char *E,
const LangOptions &LO,
const TargetInfo &Target,
bool isFreeBSDKPrintf) {
unsigned argIndex = 0;
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
LO, Target, true,
isFreeBSDKPrintf);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
return true;
// Did we exhaust the string or encounter an error that
// we can recover from?
if (!FSR.hasValue())
continue;
// We have a format specifier. Pass it to the callback.
if (!H.HandlePrintfSpecifier(FSR.getValue(), FSR.getStart(),
I - FSR.getStart()))
return true;
}
assert(I == E && "Format string not exhausted");
return false;
}
bool clang::analyze_format_string::ParseFormatStringHasSArg(const char *I,
const char *E,
const LangOptions &LO,
const TargetInfo &Target) {
unsigned argIndex = 0;
// Keep looking for a %s format specifier until we have exhausted the string.
FormatStringHandler H;
while (I != E) {
const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
LO, Target, false,
false);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
return false;
// Did we exhaust the string or encounter an error that
// we can recover from?
if (!FSR.hasValue())
continue;
const analyze_printf::PrintfSpecifier &FS = FSR.getValue();
// Return true if this a %s format specifier.
if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::Kind::sArg)
return true;
}
return false;
}
//===----------------------------------------------------------------------===//
// Methods on PrintfSpecifier.
//===----------------------------------------------------------------------===//
ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
bool IsObjCLiteral) const {
const PrintfConversionSpecifier &CS = getConversionSpecifier();
if (!CS.consumesDataArgument())
return ArgType::Invalid();
if (CS.getKind() == ConversionSpecifier::cArg)
switch (LM.getKind()) {
case LengthModifier::None:
return Ctx.IntTy;
case LengthModifier::AsLong:
case LengthModifier::AsWide:
return ArgType(ArgType::WIntTy, "wint_t");
case LengthModifier::AsShort:
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
return Ctx.IntTy;
default:
return ArgType::Invalid();
}
if (CS.isIntArg())
switch (LM.getKind()) {
case LengthModifier::AsLongDouble:
// GNU extension.
return Ctx.LongLongTy;
case LengthModifier::None:
return Ctx.IntTy;
case LengthModifier::AsInt32:
return ArgType(Ctx.IntTy, "__int32");
case LengthModifier::AsChar: return ArgType::AnyCharTy;
case LengthModifier::AsShort: return Ctx.ShortTy;
case LengthModifier::AsLong: return Ctx.LongTy;
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return Ctx.LongLongTy;
case LengthModifier::AsInt64:
return ArgType(Ctx.LongLongTy, "__int64");
case LengthModifier::AsIntMax:
return ArgType(Ctx.getIntMaxType(), "intmax_t");
case LengthModifier::AsSizeT:
// FIXME: How to get the corresponding signed version of size_t?
return ArgType();
case LengthModifier::AsInt3264:
return Ctx.getTargetInfo().getTriple().isArch64Bit()
? ArgType(Ctx.LongLongTy, "__int64")
: ArgType(Ctx.IntTy, "__int32");
case LengthModifier::AsPtrDiff:
return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsWide:
return ArgType::Invalid();
}
if (CS.isUIntArg())
switch (LM.getKind()) {
case LengthModifier::AsLongDouble:
// GNU extension.
return Ctx.UnsignedLongLongTy;
case LengthModifier::None:
return Ctx.UnsignedIntTy;
case LengthModifier::AsInt32:
return ArgType(Ctx.UnsignedIntTy, "unsigned __int32");
case LengthModifier::AsChar: return Ctx.UnsignedCharTy;
case LengthModifier::AsShort: return Ctx.UnsignedShortTy;
case LengthModifier::AsLong: return Ctx.UnsignedLongTy;
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return Ctx.UnsignedLongLongTy;
case LengthModifier::AsInt64:
return ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64");
case LengthModifier::AsIntMax:
return ArgType(Ctx.getUIntMaxType(), "uintmax_t");
case LengthModifier::AsSizeT:
return ArgType(Ctx.getSizeType(), "size_t");
case LengthModifier::AsInt3264:
return Ctx.getTargetInfo().getTriple().isArch64Bit()
? ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64")
: ArgType(Ctx.UnsignedIntTy, "unsigned __int32");
case LengthModifier::AsPtrDiff:
// FIXME: How to get the corresponding unsigned
// version of ptrdiff_t?
return ArgType();
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsWide:
return ArgType::Invalid();
}
if (CS.isDoubleArg()) {
if (LM.getKind() == LengthModifier::AsLongDouble)
return Ctx.LongDoubleTy;
return Ctx.DoubleTy;
}
if (CS.getKind() == ConversionSpecifier::nArg) {
switch (LM.getKind()) {
case LengthModifier::None:
return ArgType::PtrTo(Ctx.IntTy);
case LengthModifier::AsChar:
return ArgType::PtrTo(Ctx.SignedCharTy);
case LengthModifier::AsShort:
return ArgType::PtrTo(Ctx.ShortTy);
case LengthModifier::AsLong:
return ArgType::PtrTo(Ctx.LongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return ArgType::PtrTo(Ctx.LongLongTy);
case LengthModifier::AsIntMax:
return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
case LengthModifier::AsSizeT:
return ArgType(); // FIXME: ssize_t
case LengthModifier::AsPtrDiff:
return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
case LengthModifier::AsLongDouble:
return ArgType(); // FIXME: Is this a known extension?
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsInt64:
case LengthModifier::AsWide:
return ArgType::Invalid();
}
}
switch (CS.getKind()) {
case ConversionSpecifier::sArg:
if (LM.getKind() == LengthModifier::AsWideChar) {
if (IsObjCLiteral)
return ArgType(Ctx.getPointerType(Ctx.UnsignedShortTy.withConst()),
"const unichar *");
return ArgType(ArgType::WCStrTy, "wchar_t *");
}
if (LM.getKind() == LengthModifier::AsWide)
return ArgType(ArgType::WCStrTy, "wchar_t *");
return ArgType::CStrTy;
case ConversionSpecifier::SArg:
if (IsObjCLiteral)
return ArgType(Ctx.getPointerType(Ctx.UnsignedShortTy.withConst()),
"const unichar *");
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
LM.getKind() == LengthModifier::AsShort)
return ArgType::CStrTy;
return ArgType(ArgType::WCStrTy, "wchar_t *");
case ConversionSpecifier::CArg:
if (IsObjCLiteral)
return ArgType(Ctx.UnsignedShortTy, "unichar");
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT() &&
LM.getKind() == LengthModifier::AsShort)
return Ctx.IntTy;
return ArgType(Ctx.WideCharTy, "wchar_t");
case ConversionSpecifier::pArg:
return ArgType::CPointerTy;
case ConversionSpecifier::ObjCObjArg:
return ArgType::ObjCPointerTy;
default:
break;
}
// FIXME: Handle other cases.
return ArgType();
}
bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
ASTContext &Ctx, bool IsObjCLiteral) {
// %n is different from other conversion specifiers; don't try to fix it.
if (CS.getKind() == ConversionSpecifier::nArg)
return false;
// Handle Objective-C objects first. Note that while the '%@' specifier will
// not warn for structure pointer or void pointer arguments (because that's
// how CoreFoundation objects are implemented), we only show a fixit for '%@'
// if we know it's an object (block, id, class, or __attribute__((NSObject))).
if (QT->isObjCRetainableType()) {
if (!IsObjCLiteral)
return false;
CS.setKind(ConversionSpecifier::ObjCObjArg);
// Disable irrelevant flags
HasThousandsGrouping = false;
HasPlusPrefix = false;
HasSpacePrefix = false;
HasAlternativeForm = false;
HasLeadingZeroes = false;
Precision.setHowSpecified(OptionalAmount::NotSpecified);
LM.setKind(LengthModifier::None);
return true;
}
// Handle strings next (char *, wchar_t *)
if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
CS.setKind(ConversionSpecifier::sArg);
// Disable irrelevant flags
HasAlternativeForm = 0;
HasLeadingZeroes = 0;
// Set the long length modifier for wide characters
if (QT->getPointeeType()->isWideCharType())
LM.setKind(LengthModifier::AsWideChar);
else
LM.setKind(LengthModifier::None);
return true;
}
// If it's an enum, get its underlying type.
if (const EnumType *ETy = QT->getAs<EnumType>())
QT = ETy->getDecl()->getIntegerType();
// We can only work with builtin types.
const BuiltinType *BT = QT->getAs<BuiltinType>();
if (!BT)
return false;
// Set length modifier
switch (BT->getKind()) {
case BuiltinType::Bool:
case BuiltinType::WChar_U:
case BuiltinType::WChar_S:
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::UInt128:
case BuiltinType::Int128:
case BuiltinType::Half:
// Various types which are non-trivial to correct.
return false;
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
// Misc other stuff which doesn't make sense here.
return false;
case BuiltinType::UInt:
case BuiltinType::Int:
case BuiltinType::Float:
case BuiltinType::Double:
LM.setKind(LengthModifier::None);
break;
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::Char_S:
case BuiltinType::SChar:
LM.setKind(LengthModifier::AsChar);
break;
case BuiltinType::Short:
case BuiltinType::UShort:
LM.setKind(LengthModifier::AsShort);
break;
case BuiltinType::Long:
case BuiltinType::ULong:
LM.setKind(LengthModifier::AsLong);
break;
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
LM.setKind(LengthModifier::AsLongLong);
break;
case BuiltinType::LongDouble:
LM.setKind(LengthModifier::AsLongDouble);
break;
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus11))
namedTypeToLengthModifier(QT, LM);
// If fixing the length modifier was enough, we might be done.
if (hasValidLengthModifier(Ctx.getTargetInfo())) {
// If we're going to offer a fix anyway, make sure the sign matches.
switch (CS.getKind()) {
case ConversionSpecifier::uArg:
case ConversionSpecifier::UArg:
if (QT->isSignedIntegerType())
CS.setKind(clang::analyze_format_string::ConversionSpecifier::dArg);
break;
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
if (QT->isUnsignedIntegerType() && !HasPlusPrefix)
CS.setKind(clang::analyze_format_string::ConversionSpecifier::uArg);
break;
default:
// Other specifiers do not have signed/unsigned variants.
break;
}
const analyze_printf::ArgType &ATR = getArgType(Ctx, IsObjCLiteral);
if (ATR.isValid() && ATR.matchesType(Ctx, QT))
return true;
}
// Set conversion specifier and disable any flags which do not apply to it.
// Let typedefs to char fall through to int, as %c is silly for uint8_t.
if (!isa<TypedefType>(QT) && QT->isCharType()) {
CS.setKind(ConversionSpecifier::cArg);
LM.setKind(LengthModifier::None);
Precision.setHowSpecified(OptionalAmount::NotSpecified);
HasAlternativeForm = 0;
HasLeadingZeroes = 0;
HasPlusPrefix = 0;
}
// Test for Floating type first as LongDouble can pass isUnsignedIntegerType
else if (QT->isRealFloatingType()) {
CS.setKind(ConversionSpecifier::fArg);
}
else if (QT->isSignedIntegerType()) {
CS.setKind(ConversionSpecifier::dArg);
HasAlternativeForm = 0;
}
else if (QT->isUnsignedIntegerType()) {
CS.setKind(ConversionSpecifier::uArg);
HasAlternativeForm = 0;
HasPlusPrefix = 0;
} else {
llvm_unreachable("Unexpected type");
}
return true;
}
void PrintfSpecifier::toString(raw_ostream &os) const {
// Whilst some features have no defined order, we are using the order
// appearing in the C99 standard (ISO/IEC 9899:1999 (E) 7.19.6.1)
os << "%";
// Positional args
if (usesPositionalArg()) {
os << getPositionalArgIndex() << "$";
}
// Conversion flags
if (IsLeftJustified) os << "-";
if (HasPlusPrefix) os << "+";
if (HasSpacePrefix) os << " ";
if (HasAlternativeForm) os << "#";
if (HasLeadingZeroes) os << "0";
// Minimum field width
FieldWidth.toString(os);
// Precision
Precision.toString(os);
// Length modifier
os << LM.toString();
// Conversion specifier
os << CS.toString();
}
bool PrintfSpecifier::hasValidPlusPrefix() const {
if (!HasPlusPrefix)
return true;
// The plus prefix only makes sense for signed conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return true;
default:
return false;
}
}
bool PrintfSpecifier::hasValidAlternativeForm() const {
if (!HasAlternativeForm)
return true;
// Alternate form flag only valid with the oxXaAeEfFgG conversions
switch (CS.getKind()) {
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return true;
default:
return false;
}
}
bool PrintfSpecifier::hasValidLeadingZeros() const {
if (!HasLeadingZeroes)
return true;
// Leading zeroes flag only valid with the diouxXaAeEfFgG conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return true;
default:
return false;
}
}
bool PrintfSpecifier::hasValidSpacePrefix() const {
if (!HasSpacePrefix)
return true;
// The space prefix only makes sense for signed conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return true;
default:
return false;
}
}
bool PrintfSpecifier::hasValidLeftJustified() const {
if (!IsLeftJustified)
return true;
// The left justified flag is valid for all conversions except n
switch (CS.getKind()) {
case ConversionSpecifier::nArg:
return false;
default:
return true;
}
}
bool PrintfSpecifier::hasValidThousandsGroupingPrefix() const {
if (!HasThousandsGrouping)
return true;
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::UArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
return true;
default:
return false;
}
}
bool PrintfSpecifier::hasValidPrecision() const {
if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
return true;
// Precision is only valid with the diouxXaAeEfFgGs conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
return true;
default:
return false;
}
}
bool PrintfSpecifier::hasValidFieldWidth() const {
if (FieldWidth.getHowSpecified() == OptionalAmount::NotSpecified)
return true;
// The field width is valid for all conversions except n
switch (CS.getKind()) {
case ConversionSpecifier::nArg:
return false;
default:
return true;
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/Dominators.cpp | //=- Dominators.cpp - Implementation of dominators tree for Clang CFG C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/Dominators.h"
using namespace clang;
void DominatorTree::anchor() { }
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/FormatStringParsing.h |
#ifndef LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
#define LLVM_CLANG_LIB_ANALYSIS_FORMATSTRINGPARSING_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/FormatString.h"
#include "llvm/Support/raw_ostream.h"
namespace clang {
class LangOptions;
template <typename T>
class UpdateOnReturn {
T &ValueToUpdate;
const T &ValueToCopy;
public:
UpdateOnReturn(T &valueToUpdate, const T &valueToCopy)
: ValueToUpdate(valueToUpdate), ValueToCopy(valueToCopy) {}
~UpdateOnReturn() {
ValueToUpdate = ValueToCopy;
}
};
namespace analyze_format_string {
OptionalAmount ParseAmount(const char *&Beg, const char *E);
OptionalAmount ParseNonPositionAmount(const char *&Beg, const char *E,
unsigned &argIndex);
OptionalAmount ParsePositionAmount(FormatStringHandler &H,
const char *Start, const char *&Beg,
const char *E, PositionContext p);
bool ParseFieldWidth(FormatStringHandler &H,
FormatSpecifier &CS,
const char *Start, const char *&Beg, const char *E,
unsigned *argIndex);
bool ParseArgPosition(FormatStringHandler &H,
FormatSpecifier &CS, const char *Start,
const char *&Beg, const char *E);
/// Returns true if a LengthModifier was parsed and installed in the
/// FormatSpecifier& argument, and false otherwise.
bool ParseLengthModifier(FormatSpecifier &FS, const char *&Beg, const char *E,
const LangOptions &LO, bool IsScanf = false);
template <typename T> class SpecifierResult {
T FS;
const char *Start;
bool Stop;
public:
SpecifierResult(bool stop = false)
: Start(nullptr), Stop(stop) {}
SpecifierResult(const char *start,
const T &fs)
: FS(fs), Start(start), Stop(false) {}
const char *getStart() const { return Start; }
bool shouldStop() const { return Stop; }
bool hasValue() const { return Start != nullptr; }
const T &getValue() const {
assert(hasValue());
return FS;
}
const T &getValue() { return FS; }
};
} // end analyze_format_string namespace
} // end clang namespace
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/PostOrderCFGView.cpp | //===- PostOrderCFGView.cpp - Post order view of CFG blocks -------*- C++ --*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements post order view of the blocks in a CFG.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
using namespace clang;
void PostOrderCFGView::anchor() { }
PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
Blocks.reserve(cfg->getNumBlockIDs());
CFGBlockSet BSet(cfg);
for (po_iterator I = po_iterator::begin(cfg, BSet),
E = po_iterator::end(cfg, BSet); I != E; ++I) {
BlockOrder[*I] = Blocks.size() + 1;
Blocks.push_back(*I);
}
}
PostOrderCFGView *PostOrderCFGView::create(AnalysisDeclContext &ctx) {
const CFG *cfg = ctx.getCFG();
if (!cfg)
return nullptr;
return new PostOrderCFGView(cfg);
}
const void *PostOrderCFGView::getTag() { static int x; return &x; }
bool PostOrderCFGView::BlockOrderCompare::operator()(const CFGBlock *b1,
const CFGBlock *b2) const {
PostOrderCFGView::BlockOrderTy::const_iterator b1It = POV.BlockOrder.find(b1);
PostOrderCFGView::BlockOrderTy::const_iterator b2It = POV.BlockOrder.find(b2);
unsigned b1V = (b1It == POV.BlockOrder.end()) ? 0 : b1It->second;
unsigned b2V = (b2It == POV.BlockOrder.end()) ? 0 : b2It->second;
return b1V > b2V;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ThreadSafetyLogical.cpp | //===- ThreadSafetyLogical.cpp ---------------------------------*- C++ --*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This file defines a representation for logical expressions with SExpr leaves
// that are used as part of fact-checking capability expressions.
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
using namespace llvm;
using namespace clang::threadSafety::lexpr;
// Implication. We implement De Morgan's Laws by maintaining LNeg and RNeg
// to keep track of whether LHS and RHS are negated.
static bool implies(const LExpr *LHS, bool LNeg, const LExpr *RHS, bool RNeg) {
// In comments below, we write => for implication.
// Calculates the logical AND implication operator.
const auto LeftAndOperator = [=](const BinOp *A) {
return implies(A->left(), LNeg, RHS, RNeg) &&
implies(A->right(), LNeg, RHS, RNeg);
};
const auto RightAndOperator = [=](const BinOp *A) {
return implies(LHS, LNeg, A->left(), RNeg) &&
implies(LHS, LNeg, A->right(), RNeg);
};
// Calculates the logical OR implication operator.
const auto LeftOrOperator = [=](const BinOp *A) {
return implies(A->left(), LNeg, RHS, RNeg) ||
implies(A->right(), LNeg, RHS, RNeg);
};
const auto RightOrOperator = [=](const BinOp *A) {
return implies(LHS, LNeg, A->left(), RNeg) ||
implies(LHS, LNeg, A->right(), RNeg);
};
// Recurse on right.
switch (RHS->kind()) {
case LExpr::And:
// When performing right recursion:
// C => A & B [if] C => A and C => B
// When performing right recursion (negated):
// C => !(A & B) [if] C => !A | !B [===] C => !A or C => !B
return RNeg ? RightOrOperator(cast<And>(RHS))
: RightAndOperator(cast<And>(RHS));
case LExpr::Or:
// When performing right recursion:
// C => (A | B) [if] C => A or C => B
// When performing right recursion (negated):
// C => !(A | B) [if] C => !A & !B [===] C => !A and C => !B
return RNeg ? RightAndOperator(cast<Or>(RHS))
: RightOrOperator(cast<Or>(RHS));
case LExpr::Not:
// Note that C => !A is very different from !(C => A). It would be incorrect
// to return !implies(LHS, RHS).
return implies(LHS, LNeg, cast<Not>(RHS)->exp(), !RNeg);
case LExpr::Terminal:
// After reaching the terminal, it's time to recurse on the left.
break;
}
// RHS is now a terminal. Recurse on Left.
switch (LHS->kind()) {
case LExpr::And:
// When performing left recursion:
// A & B => C [if] A => C or B => C
// When performing left recursion (negated):
// !(A & B) => C [if] !A | !B => C [===] !A => C and !B => C
return LNeg ? LeftAndOperator(cast<And>(LHS))
: LeftOrOperator(cast<And>(LHS));
case LExpr::Or:
// When performing left recursion:
// A | B => C [if] A => C and B => C
// When performing left recursion (negated):
// !(A | B) => C [if] !A & !B => C [===] !A => C or !B => C
return LNeg ? LeftOrOperator(cast<Or>(LHS))
: LeftAndOperator(cast<Or>(LHS));
case LExpr::Not:
// Note that A => !C is very different from !(A => C). It would be incorrect
// to return !implies(LHS, RHS).
return implies(cast<Not>(LHS)->exp(), !LNeg, RHS, RNeg);
case LExpr::Terminal:
// After reaching the terminal, it's time to perform identity comparisons.
break;
}
// A => A
// !A => !A
if (LNeg != RNeg)
return false;
// FIXME -- this should compare SExprs for equality, not pointer equality.
return cast<Terminal>(LHS)->expr() == cast<Terminal>(RHS)->expr();
}
namespace clang {
namespace threadSafety {
namespace lexpr {
bool implies(const LExpr *LHS, const LExpr *RHS) {
// Start out by assuming that LHS and RHS are not negated.
return ::implies(LHS, false, RHS, false);
}
}
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/BodyFarm.h | //== BodyFarm.h - Factory for conjuring up fake bodies -------------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// BodyFarm is a factory for creating faux implementations for functions/methods
// for analysis purposes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
#define LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
#include "clang/AST/DeclBase.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
namespace clang {
class ASTContext;
class FunctionDecl;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class Stmt;
class CodeInjector;
class BodyFarm {
public:
BodyFarm(ASTContext &C, CodeInjector *injector) : C(C), Injector(injector) {}
/// Factory method for creating bodies for ordinary functions.
Stmt *getBody(const FunctionDecl *D);
/// Factory method for creating bodies for Objective-C properties.
Stmt *getBody(const ObjCMethodDecl *D);
private:
typedef llvm::DenseMap<const Decl *, Optional<Stmt *> > BodyMap;
ASTContext &C;
BodyMap Bodies;
CodeInjector *Injector;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ScanfFormatString.cpp | //= ScanfFormatString.cpp - Analysis of printf format strings --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Handling of format string in scanf and friends. The structure of format
// strings for fscanf() are described in C99 7.19.6.2.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/FormatString.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
using clang::analyze_format_string::ConversionSpecifier;
using clang::analyze_scanf::ScanfConversionSpecifier;
using clang::analyze_scanf::ScanfSpecifier;
using clang::UpdateOnReturn;
using namespace clang;
typedef clang::analyze_format_string::SpecifierResult<ScanfSpecifier>
ScanfSpecifierResult;
static bool ParseScanList(FormatStringHandler &H,
ScanfConversionSpecifier &CS,
const char *&Beg, const char *E) {
const char *I = Beg;
const char *start = I - 1;
UpdateOnReturn <const char*> UpdateBeg(Beg, I);
// No more characters?
if (I == E) {
H.HandleIncompleteScanList(start, I);
return true;
}
// Special case: ']' is the first character.
if (*I == ']') {
if (++I == E) {
H.HandleIncompleteScanList(start, I - 1);
return true;
}
}
// Special case: "^]" are the first characters.
if (I + 1 != E && I[0] == '^' && I[1] == ']') {
I += 2;
if (I == E) {
H.HandleIncompleteScanList(start, I - 1);
return true;
}
}
// Look for a ']' character which denotes the end of the scan list.
while (*I != ']') {
if (++I == E) {
H.HandleIncompleteScanList(start, I - 1);
return true;
}
}
CS.setEndScanList(I);
return false;
}
// FIXME: Much of this is copy-paste from ParsePrintfSpecifier.
// We can possibly refactor.
static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
const char *&Beg,
const char *E,
unsigned &argIndex,
const LangOptions &LO,
const TargetInfo &Target) {
using namespace clang::analyze_scanf;
const char *I = Beg;
const char *Start = nullptr;
UpdateOnReturn <const char*> UpdateBeg(Beg, I);
// Look for a '%' character that indicates the start of a format specifier.
for ( ; I != E ; ++I) {
char c = *I;
if (c == '\0') {
// Detect spurious null characters, which are likely errors.
H.HandleNullChar(I);
return true;
}
if (c == '%') {
Start = I++; // Record the start of the format specifier.
break;
}
}
// No format specifier found?
if (!Start)
return false;
if (I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
ScanfSpecifier FS;
if (ParseArgPosition(H, FS, Start, I, E))
return true;
if (I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
// Look for '*' flag if it is present.
if (*I == '*') {
FS.setSuppressAssignment(I);
if (++I == E) {
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
}
// Look for the field width (if any). Unlike printf, this is either
// a fixed integer or isn't present.
const OptionalAmount &Amt = clang::analyze_format_string::ParseAmount(I, E);
if (Amt.getHowSpecified() != OptionalAmount::NotSpecified) {
assert(Amt.getHowSpecified() == OptionalAmount::Constant);
FS.setFieldWidth(Amt);
if (I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
}
// Look for the length modifier.
if (ParseLengthModifier(FS, I, E, LO, /*scanf=*/true) && I == E) {
// No more characters left?
H.HandleIncompleteSpecifier(Start, E - Start);
return true;
}
// Detect spurious null characters, which are likely errors.
if (*I == '\0') {
H.HandleNullChar(I);
return true;
}
// Finally, look for the conversion specifier.
const char *conversionPosition = I++;
ScanfConversionSpecifier::Kind k = ScanfConversionSpecifier::InvalidSpecifier;
switch (*conversionPosition) {
default:
break;
case '%': k = ConversionSpecifier::PercentArg; break;
case 'A': k = ConversionSpecifier::AArg; break;
case 'E': k = ConversionSpecifier::EArg; break;
case 'F': k = ConversionSpecifier::FArg; break;
case 'G': k = ConversionSpecifier::GArg; break;
case 'X': k = ConversionSpecifier::XArg; break;
case 'a': k = ConversionSpecifier::aArg; break;
case 'd': k = ConversionSpecifier::dArg; break;
case 'e': k = ConversionSpecifier::eArg; break;
case 'f': k = ConversionSpecifier::fArg; break;
case 'g': k = ConversionSpecifier::gArg; break;
case 'i': k = ConversionSpecifier::iArg; break;
case 'n': k = ConversionSpecifier::nArg; break;
case 'c': k = ConversionSpecifier::cArg; break;
case 'C': k = ConversionSpecifier::CArg; break;
case 'S': k = ConversionSpecifier::SArg; break;
case '[': k = ConversionSpecifier::ScanListArg; break;
case 'u': k = ConversionSpecifier::uArg; break;
case 'x': k = ConversionSpecifier::xArg; break;
case 'o': k = ConversionSpecifier::oArg; break;
case 's': k = ConversionSpecifier::sArg; break;
case 'p': k = ConversionSpecifier::pArg; break;
// Apple extensions
// Apple-specific
case 'D':
if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::DArg;
break;
case 'O':
if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::OArg;
break;
case 'U':
if (Target.getTriple().isOSDarwin())
k = ConversionSpecifier::UArg;
break;
}
ScanfConversionSpecifier CS(conversionPosition, k);
if (k == ScanfConversionSpecifier::ScanListArg) {
if (ParseScanList(H, CS, I, E))
return true;
}
FS.setConversionSpecifier(CS);
if (CS.consumesDataArgument() && !FS.getSuppressAssignment()
&& !FS.usesPositionalArg())
FS.setArgIndex(argIndex++);
// FIXME: '%' and '*' doesn't make sense. Issue a warning.
// FIXME: 'ConsumedSoFar' and '*' doesn't make sense.
if (k == ScanfConversionSpecifier::InvalidSpecifier) {
// Assume the conversion takes one argument.
return !H.HandleInvalidScanfConversionSpecifier(FS, Beg, I - Beg);
}
return ScanfSpecifierResult(Start, FS);
}
ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
const ScanfConversionSpecifier &CS = getConversionSpecifier();
if (!CS.consumesDataArgument())
return ArgType::Invalid();
switch(CS.getKind()) {
// Signed int.
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
switch (LM.getKind()) {
case LengthModifier::None:
return ArgType::PtrTo(Ctx.IntTy);
case LengthModifier::AsChar:
return ArgType::PtrTo(ArgType::AnyCharTy);
case LengthModifier::AsShort:
return ArgType::PtrTo(Ctx.ShortTy);
case LengthModifier::AsLong:
return ArgType::PtrTo(Ctx.LongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return ArgType::PtrTo(Ctx.LongLongTy);
case LengthModifier::AsInt64:
return ArgType::PtrTo(ArgType(Ctx.LongLongTy, "__int64"));
case LengthModifier::AsIntMax:
return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
case LengthModifier::AsSizeT:
// FIXME: ssize_t.
return ArgType();
case LengthModifier::AsPtrDiff:
return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
case LengthModifier::AsLongDouble:
// GNU extension.
return ArgType::PtrTo(Ctx.LongLongTy);
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsWide:
return ArgType::Invalid();
}
// Unsigned int.
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
case ConversionSpecifier::UArg:
case ConversionSpecifier::xArg:
case ConversionSpecifier::XArg:
switch (LM.getKind()) {
case LengthModifier::None:
return ArgType::PtrTo(Ctx.UnsignedIntTy);
case LengthModifier::AsChar:
return ArgType::PtrTo(Ctx.UnsignedCharTy);
case LengthModifier::AsShort:
return ArgType::PtrTo(Ctx.UnsignedShortTy);
case LengthModifier::AsLong:
return ArgType::PtrTo(Ctx.UnsignedLongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
case LengthModifier::AsInt64:
return ArgType::PtrTo(ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64"));
case LengthModifier::AsIntMax:
return ArgType::PtrTo(ArgType(Ctx.getUIntMaxType(), "uintmax_t"));
case LengthModifier::AsSizeT:
return ArgType::PtrTo(ArgType(Ctx.getSizeType(), "size_t"));
case LengthModifier::AsPtrDiff:
// FIXME: Unsigned version of ptrdiff_t?
return ArgType();
case LengthModifier::AsLongDouble:
// GNU extension.
return ArgType::PtrTo(Ctx.UnsignedLongLongTy);
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsWide:
return ArgType::Invalid();
}
// Float.
case ConversionSpecifier::aArg:
case ConversionSpecifier::AArg:
case ConversionSpecifier::eArg:
case ConversionSpecifier::EArg:
case ConversionSpecifier::fArg:
case ConversionSpecifier::FArg:
case ConversionSpecifier::gArg:
case ConversionSpecifier::GArg:
switch (LM.getKind()) {
case LengthModifier::None:
return ArgType::PtrTo(Ctx.FloatTy);
case LengthModifier::AsLong:
return ArgType::PtrTo(Ctx.DoubleTy);
case LengthModifier::AsLongDouble:
return ArgType::PtrTo(Ctx.LongDoubleTy);
default:
return ArgType::Invalid();
}
// Char, string and scanlist.
case ConversionSpecifier::cArg:
case ConversionSpecifier::sArg:
case ConversionSpecifier::ScanListArg:
switch (LM.getKind()) {
case LengthModifier::None:
return ArgType::PtrTo(ArgType::AnyCharTy);
case LengthModifier::AsLong:
case LengthModifier::AsWide:
return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
return ArgType::PtrTo(ArgType::CStrTy);
case LengthModifier::AsShort:
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
return ArgType::PtrTo(ArgType::AnyCharTy);
default:
return ArgType::Invalid();
}
case ConversionSpecifier::CArg:
case ConversionSpecifier::SArg:
// FIXME: Mac OS X specific?
switch (LM.getKind()) {
case LengthModifier::None:
case LengthModifier::AsWide:
return ArgType::PtrTo(ArgType(Ctx.getWideCharType(), "wchar_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
return ArgType::PtrTo(ArgType(ArgType::WCStrTy, "wchar_t *"));
case LengthModifier::AsShort:
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
return ArgType::PtrTo(ArgType::AnyCharTy);
default:
return ArgType::Invalid();
}
// Pointer.
case ConversionSpecifier::pArg:
return ArgType::PtrTo(ArgType::CPointerTy);
// Write-back.
case ConversionSpecifier::nArg:
switch (LM.getKind()) {
case LengthModifier::None:
return ArgType::PtrTo(Ctx.IntTy);
case LengthModifier::AsChar:
return ArgType::PtrTo(Ctx.SignedCharTy);
case LengthModifier::AsShort:
return ArgType::PtrTo(Ctx.ShortTy);
case LengthModifier::AsLong:
return ArgType::PtrTo(Ctx.LongTy);
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
return ArgType::PtrTo(Ctx.LongLongTy);
case LengthModifier::AsInt64:
return ArgType::PtrTo(ArgType(Ctx.LongLongTy, "__int64"));
case LengthModifier::AsIntMax:
return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t"));
case LengthModifier::AsSizeT:
return ArgType(); // FIXME: ssize_t
case LengthModifier::AsPtrDiff:
return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
case LengthModifier::AsLongDouble:
return ArgType(); // FIXME: Is this a known extension?
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsInt32:
case LengthModifier::AsInt3264:
case LengthModifier::AsWide:
return ArgType::Invalid();
}
default:
break;
}
return ArgType();
}
bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
const LangOptions &LangOpt,
ASTContext &Ctx) {
// %n is different from other conversion specifiers; don't try to fix it.
if (CS.getKind() == ConversionSpecifier::nArg)
return false;
if (!QT->isPointerType())
return false;
QualType PT = QT->getPointeeType();
// If it's an enum, get its underlying type.
if (const EnumType *ETy = PT->getAs<EnumType>())
PT = ETy->getDecl()->getIntegerType();
const BuiltinType *BT = PT->getAs<BuiltinType>();
if (!BT)
return false;
// Pointer to a character.
if (PT->isAnyCharacterType()) {
CS.setKind(ConversionSpecifier::sArg);
if (PT->isWideCharType())
LM.setKind(LengthModifier::AsWideChar);
else
LM.setKind(LengthModifier::None);
// If we know the target array length, we can use it as a field width.
if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(RawQT)) {
if (CAT->getSizeModifier() == ArrayType::Normal)
FieldWidth = OptionalAmount(OptionalAmount::Constant,
CAT->getSize().getZExtValue() - 1,
"", 0, false);
}
return true;
}
// Figure out the length modifier.
switch (BT->getKind()) {
// no modifier
case BuiltinType::UInt:
case BuiltinType::Int:
case BuiltinType::Float:
LM.setKind(LengthModifier::None);
break;
// hh
case BuiltinType::Char_U:
case BuiltinType::UChar:
case BuiltinType::Char_S:
case BuiltinType::SChar:
LM.setKind(LengthModifier::AsChar);
break;
// h
case BuiltinType::Short:
case BuiltinType::UShort:
LM.setKind(LengthModifier::AsShort);
break;
// l
case BuiltinType::Long:
case BuiltinType::ULong:
case BuiltinType::Double:
LM.setKind(LengthModifier::AsLong);
break;
// ll
case BuiltinType::LongLong:
case BuiltinType::ULongLong:
LM.setKind(LengthModifier::AsLongLong);
break;
// L
case BuiltinType::LongDouble:
LM.setKind(LengthModifier::AsLongDouble);
break;
// Don't know.
default:
return false;
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus11))
namedTypeToLengthModifier(PT, LM);
// If fixing the length modifier was enough, we are done.
if (hasValidLengthModifier(Ctx.getTargetInfo())) {
const analyze_scanf::ArgType &AT = getArgType(Ctx);
if (AT.isValid() && AT.matchesType(Ctx, QT))
return true;
}
// Figure out the conversion specifier.
if (PT->isRealFloatingType())
CS.setKind(ConversionSpecifier::fArg);
else if (PT->isSignedIntegerType())
CS.setKind(ConversionSpecifier::dArg);
else if (PT->isUnsignedIntegerType())
CS.setKind(ConversionSpecifier::uArg);
else
llvm_unreachable("Unexpected type");
return true;
}
void ScanfSpecifier::toString(raw_ostream &os) const {
os << "%";
if (usesPositionalArg())
os << getPositionalArgIndex() << "$";
if (SuppressAssignment)
os << "*";
FieldWidth.toString(os);
os << LM.toString();
os << CS.toString();
}
bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
const char *I,
const char *E,
const LangOptions &LO,
const TargetInfo &Target) {
unsigned argIndex = 0;
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
const ScanfSpecifierResult &FSR = ParseScanfSpecifier(H, I, E, argIndex,
LO, Target);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
return true;
// Did we exhaust the string or encounter an error that
// we can recover from?
if (!FSR.hasValue())
continue;
// We have a format specifier. Pass it to the callback.
if (!H.HandleScanfSpecifier(FSR.getValue(), FSR.getStart(),
I - FSR.getStart())) {
return true;
}
}
assert(I == E && "Format string not exhausted");
return false;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ReachableCode.cpp | //=- ReachableCodePathInsensitive.cpp ---------------------------*- C++ --*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a flow-sensitive, path-insensitive analysis of
// determining reachable blocks within a CFG.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ReachableCode.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// Core Reachability Analysis routines.
//===----------------------------------------------------------------------===//
static bool isEnumConstant(const Expr *Ex) {
const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex);
if (!DR)
return false;
return isa<EnumConstantDecl>(DR->getDecl());
}
static bool isTrivialExpression(const Expr *Ex) {
Ex = Ex->IgnoreParenCasts();
return isa<IntegerLiteral>(Ex) || isa<StringLiteral>(Ex) ||
isa<CXXBoolLiteralExpr>(Ex) || isa<ObjCBoolLiteralExpr>(Ex) ||
isa<CharacterLiteral>(Ex) ||
isEnumConstant(Ex);
}
static bool isTrivialDoWhile(const CFGBlock *B, const Stmt *S) {
// Check if the block ends with a do...while() and see if 'S' is the
// condition.
if (const Stmt *Term = B->getTerminator()) {
if (const DoStmt *DS = dyn_cast<DoStmt>(Term)) {
const Expr *Cond = DS->getCond()->IgnoreParenCasts();
return Cond == S && isTrivialExpression(Cond);
}
}
return false;
}
static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
// Look to see if the current control flow ends with a 'return', and see if
// 'S' is a substatement. The 'return' may not be the last element in the
// block, or may be in a subsequent block because of destructors.
const CFGBlock *Current = B;
while (true) {
for (CFGBlock::const_reverse_iterator I = Current->rbegin(),
E = Current->rend();
I != E; ++I) {
if (Optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(CS->getStmt())) {
if (RS == S)
return true;
if (const Expr *RE = RS->getRetValue()) {
RE = RE->IgnoreParenCasts();
if (RE == S)
return true;
ParentMap PM(const_cast<Expr *>(RE));
// If 'S' is in the ParentMap, it is a subexpression of
// the return statement.
return PM.getParent(S);
}
}
break;
}
}
// Note also that we are restricting the search for the return statement
// to stop at control-flow; only part of a return statement may be dead,
// without the whole return statement being dead.
if (Current->getTerminator().isTemporaryDtorsBranch()) {
// Temporary destructors have a predictable control flow, thus we want to
// look into the next block for the return statement.
// We look into the false branch, as we know the true branch only contains
// the call to the destructor.
assert(Current->succ_size() == 2);
Current = *(Current->succ_begin() + 1);
} else if (!Current->getTerminator() && Current->succ_size() == 1) {
// If there is only one successor, we're not dealing with outgoing control
// flow. Thus, look into the next block.
Current = *Current->succ_begin();
if (Current->pred_size() > 1) {
// If there is more than one predecessor, we're dealing with incoming
// control flow - if the return statement is in that block, it might
// well be reachable via a different control flow, thus it's not dead.
return false;
}
} else {
// We hit control flow or a dead end. Stop searching.
return false;
}
}
llvm_unreachable("Broke out of infinite loop.");
}
static SourceLocation getTopMostMacro(SourceLocation Loc, SourceManager &SM) {
assert(Loc.isMacroID());
SourceLocation Last;
while (Loc.isMacroID()) {
Last = Loc;
Loc = SM.getImmediateMacroCallerLoc(Loc);
}
return Last;
}
/// Returns true if the statement is expanded from a configuration macro.
static bool isExpandedFromConfigurationMacro(const Stmt *S,
Preprocessor &PP,
bool IgnoreYES_NO = false) {
// FIXME: This is not very precise. Here we just check to see if the
// value comes from a macro, but we can do much better. This is likely
// to be over conservative. This logic is factored into a separate function
// so that we can refine it later.
SourceLocation L = S->getLocStart();
if (L.isMacroID()) {
if (IgnoreYES_NO) {
// The Objective-C constant 'YES' and 'NO'
// are defined as macros. Do not treat them
// as configuration values.
SourceManager &SM = PP.getSourceManager();
SourceLocation TopL = getTopMostMacro(L, SM);
StringRef MacroName = PP.getImmediateMacroName(TopL);
if (MacroName == "YES" || MacroName == "NO")
return false;
}
return true;
}
return false;
}
static bool isConfigurationValue(const ValueDecl *D, Preprocessor &PP);
/// Returns true if the statement represents a configuration value.
///
/// A configuration value is something usually determined at compile-time
/// to conditionally always execute some branch. Such guards are for
/// "sometimes unreachable" code. Such code is usually not interesting
/// to report as unreachable, and may mask truly unreachable code within
/// those blocks.
static bool isConfigurationValue(const Stmt *S,
Preprocessor &PP,
SourceRange *SilenceableCondVal = nullptr,
bool IncludeIntegers = true,
bool WrappedInParens = false) {
if (!S)
return false;
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreCasts();
// Special case looking for the sigil '()' around an integer literal.
if (const ParenExpr *PE = dyn_cast<ParenExpr>(S))
if (!PE->getLocStart().isMacroID())
return isConfigurationValue(PE->getSubExpr(), PP, SilenceableCondVal,
IncludeIntegers, true);
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreCasts();
bool IgnoreYES_NO = false;
switch (S->getStmtClass()) {
case Stmt::CallExprClass: {
const FunctionDecl *Callee =
dyn_cast_or_null<FunctionDecl>(cast<CallExpr>(S)->getCalleeDecl());
return Callee ? Callee->isConstexpr() : false;
}
case Stmt::DeclRefExprClass:
return isConfigurationValue(cast<DeclRefExpr>(S)->getDecl(), PP);
case Stmt::ObjCBoolLiteralExprClass:
IgnoreYES_NO = true;
LLVM_FALLTHROUGH; // HLSL Change
case Stmt::CXXBoolLiteralExprClass:
case Stmt::IntegerLiteralClass: {
const Expr *E = cast<Expr>(S);
if (IncludeIntegers) {
if (SilenceableCondVal && !SilenceableCondVal->getBegin().isValid())
*SilenceableCondVal = E->getSourceRange();
return WrappedInParens || isExpandedFromConfigurationMacro(E, PP, IgnoreYES_NO);
}
return false;
}
case Stmt::MemberExprClass:
return isConfigurationValue(cast<MemberExpr>(S)->getMemberDecl(), PP);
case Stmt::UnaryExprOrTypeTraitExprClass:
return true;
case Stmt::BinaryOperatorClass: {
const BinaryOperator *B = cast<BinaryOperator>(S);
// Only include raw integers (not enums) as configuration
// values if they are used in a logical or comparison operator
// (not arithmetic).
IncludeIntegers &= (B->isLogicalOp() || B->isComparisonOp());
return isConfigurationValue(B->getLHS(), PP, SilenceableCondVal,
IncludeIntegers) ||
isConfigurationValue(B->getRHS(), PP, SilenceableCondVal,
IncludeIntegers);
}
case Stmt::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(S);
if (SilenceableCondVal)
*SilenceableCondVal = UO->getSourceRange();
return UO->getOpcode() == UO_LNot &&
isConfigurationValue(UO->getSubExpr(), PP, SilenceableCondVal,
IncludeIntegers, WrappedInParens);
}
default:
return false;
}
}
static bool isConfigurationValue(const ValueDecl *D, Preprocessor &PP) {
if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D))
return isConfigurationValue(ED->getInitExpr(), PP);
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// As a heuristic, treat globals as configuration values. Note
// that we only will get here if Sema evaluated this
// condition to a constant expression, which means the global
// had to be declared in a way to be a truly constant value.
// We could generalize this to local variables, but it isn't
// clear if those truly represent configuration values that
// gate unreachable code.
if (!VD->hasLocalStorage())
return true;
// As a heuristic, locals that have been marked 'const' explicitly
// can be treated as configuration values as well.
return VD->getType().isLocalConstQualified();
}
return false;
}
/// Returns true if we should always explore all successors of a block.
static bool shouldTreatSuccessorsAsReachable(const CFGBlock *B,
Preprocessor &PP) {
if (const Stmt *Term = B->getTerminator()) {
if (isa<SwitchStmt>(Term))
return true;
// Specially handle '||' and '&&'.
if (isa<BinaryOperator>(Term)) {
return isConfigurationValue(Term, PP);
}
}
const Stmt *Cond = B->getTerminatorCondition(/* stripParens */ false);
return isConfigurationValue(Cond, PP);
}
static unsigned scanFromBlock(const CFGBlock *Start,
llvm::BitVector &Reachable,
Preprocessor *PP,
bool IncludeSometimesUnreachableEdges) {
unsigned count = 0;
// Prep work queue
SmallVector<const CFGBlock*, 32> WL;
// The entry block may have already been marked reachable
// by the caller.
if (!Reachable[Start->getBlockID()]) {
++count;
Reachable[Start->getBlockID()] = true;
}
WL.push_back(Start);
// Find the reachable blocks from 'Start'.
while (!WL.empty()) {
const CFGBlock *item = WL.pop_back_val();
// There are cases where we want to treat all successors as reachable.
// The idea is that some "sometimes unreachable" code is not interesting,
// and that we should forge ahead and explore those branches anyway.
// This allows us to potentially uncover some "always unreachable" code
// within the "sometimes unreachable" code.
// Look at the successors and mark then reachable.
Optional<bool> TreatAllSuccessorsAsReachable;
if (!IncludeSometimesUnreachableEdges)
TreatAllSuccessorsAsReachable = false;
for (CFGBlock::const_succ_iterator I = item->succ_begin(),
E = item->succ_end(); I != E; ++I) {
const CFGBlock *B = *I;
if (!B) do {
const CFGBlock *UB = I->getPossiblyUnreachableBlock();
if (!UB)
break;
if (!TreatAllSuccessorsAsReachable.hasValue()) {
assert(PP);
TreatAllSuccessorsAsReachable =
shouldTreatSuccessorsAsReachable(item, *PP);
}
if (TreatAllSuccessorsAsReachable.getValue()) {
B = UB;
break;
}
}
while (false);
if (B) {
unsigned blockID = B->getBlockID();
if (!Reachable[blockID]) {
Reachable.set(blockID);
WL.push_back(B);
++count;
}
}
}
}
return count;
}
static unsigned scanMaybeReachableFromBlock(const CFGBlock *Start,
Preprocessor &PP,
llvm::BitVector &Reachable) {
return scanFromBlock(Start, Reachable, &PP, true);
}
//===----------------------------------------------------------------------===//
// Dead Code Scanner.
//===----------------------------------------------------------------------===//
namespace {
class DeadCodeScan {
llvm::BitVector Visited;
llvm::BitVector &Reachable;
SmallVector<const CFGBlock *, 10> WorkList;
Preprocessor &PP;
typedef SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12>
DeferredLocsTy;
DeferredLocsTy DeferredLocs;
public:
DeadCodeScan(llvm::BitVector &reachable, Preprocessor &PP)
: Visited(reachable.size()),
Reachable(reachable),
PP(PP) {}
void enqueue(const CFGBlock *block);
unsigned scanBackwards(const CFGBlock *Start,
clang::reachable_code::Callback &CB);
bool isDeadCodeRoot(const CFGBlock *Block);
const Stmt *findDeadCode(const CFGBlock *Block);
void reportDeadCode(const CFGBlock *B,
const Stmt *S,
clang::reachable_code::Callback &CB);
};
}
void DeadCodeScan::enqueue(const CFGBlock *block) {
unsigned blockID = block->getBlockID();
if (Reachable[blockID] || Visited[blockID])
return;
Visited[blockID] = true;
WorkList.push_back(block);
}
bool DeadCodeScan::isDeadCodeRoot(const clang::CFGBlock *Block) {
bool isDeadRoot = true;
for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
E = Block->pred_end(); I != E; ++I) {
if (const CFGBlock *PredBlock = *I) {
unsigned blockID = PredBlock->getBlockID();
if (Visited[blockID]) {
isDeadRoot = false;
continue;
}
if (!Reachable[blockID]) {
isDeadRoot = false;
Visited[blockID] = true;
WorkList.push_back(PredBlock);
continue;
}
}
}
return isDeadRoot;
}
static bool isValidDeadStmt(const Stmt *S) {
if (S->getLocStart().isInvalid())
return false;
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S))
return BO->getOpcode() != BO_Comma;
return true;
}
const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
for (CFGBlock::const_iterator I = Block->begin(), E = Block->end(); I!=E; ++I)
if (Optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
const Stmt *S = CS->getStmt();
if (isValidDeadStmt(S))
return S;
}
if (CFGTerminator T = Block->getTerminator()) {
if (!T.isTemporaryDtorsBranch()) {
const Stmt *S = T.getStmt();
if (isValidDeadStmt(S))
return S;
}
}
return nullptr;
}
// HLSL Change: changed calling convention to __cdecl
static int __cdecl SrcCmp(const std::pair<const CFGBlock *, const Stmt *> *p1,
const std::pair<const CFGBlock *, const Stmt *> *p2) {
if (p1->second->getLocStart() < p2->second->getLocStart())
return -1;
if (p2->second->getLocStart() < p1->second->getLocStart())
return 1;
return 0;
}
unsigned DeadCodeScan::scanBackwards(const clang::CFGBlock *Start,
clang::reachable_code::Callback &CB) {
unsigned count = 0;
enqueue(Start);
while (!WorkList.empty()) {
const CFGBlock *Block = WorkList.pop_back_val();
// It is possible that this block has been marked reachable after
// it was enqueued.
if (Reachable[Block->getBlockID()])
continue;
// Look for any dead code within the block.
const Stmt *S = findDeadCode(Block);
if (!S) {
// No dead code. Possibly an empty block. Look at dead predecessors.
for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
E = Block->pred_end(); I != E; ++I) {
if (const CFGBlock *predBlock = *I)
enqueue(predBlock);
}
continue;
}
// Specially handle macro-expanded code.
if (S->getLocStart().isMacroID()) {
count += scanMaybeReachableFromBlock(Block, PP, Reachable);
continue;
}
if (isDeadCodeRoot(Block)) {
reportDeadCode(Block, S, CB);
count += scanMaybeReachableFromBlock(Block, PP, Reachable);
}
else {
// Record this statement as the possibly best location in a
// strongly-connected component of dead code for emitting a
// warning.
DeferredLocs.push_back(std::make_pair(Block, S));
}
}
// If we didn't find a dead root, then report the dead code with the
// earliest location.
if (!DeferredLocs.empty()) {
llvm::array_pod_sort(DeferredLocs.begin(), DeferredLocs.end(), SrcCmp);
for (DeferredLocsTy::iterator I = DeferredLocs.begin(),
E = DeferredLocs.end(); I != E; ++I) {
const CFGBlock *Block = I->first;
if (Reachable[Block->getBlockID()])
continue;
reportDeadCode(Block, I->second, CB);
count += scanMaybeReachableFromBlock(Block, PP, Reachable);
}
}
return count;
}
static SourceLocation GetUnreachableLoc(const Stmt *S,
SourceRange &R1,
SourceRange &R2) {
R1 = R2 = SourceRange();
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreParenImpCasts();
switch (S->getStmtClass()) {
case Expr::BinaryOperatorClass: {
const BinaryOperator *BO = cast<BinaryOperator>(S);
return BO->getOperatorLoc();
}
case Expr::UnaryOperatorClass: {
const UnaryOperator *UO = cast<UnaryOperator>(S);
R1 = UO->getSubExpr()->getSourceRange();
return UO->getOperatorLoc();
}
case Expr::CompoundAssignOperatorClass: {
const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S);
R1 = CAO->getLHS()->getSourceRange();
R2 = CAO->getRHS()->getSourceRange();
return CAO->getOperatorLoc();
}
case Expr::BinaryConditionalOperatorClass:
case Expr::ConditionalOperatorClass: {
const AbstractConditionalOperator *CO =
cast<AbstractConditionalOperator>(S);
return CO->getQuestionLoc();
}
case Expr::MemberExprClass: {
const MemberExpr *ME = cast<MemberExpr>(S);
R1 = ME->getSourceRange();
return ME->getMemberLoc();
}
case Expr::ArraySubscriptExprClass: {
const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S);
R1 = ASE->getLHS()->getSourceRange();
R2 = ASE->getRHS()->getSourceRange();
return ASE->getRBracketLoc();
}
case Expr::CStyleCastExprClass: {
const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S);
R1 = CSC->getSubExpr()->getSourceRange();
return CSC->getLParenLoc();
}
case Expr::CXXFunctionalCastExprClass: {
const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S);
R1 = CE->getSubExpr()->getSourceRange();
return CE->getLocStart();
}
case Stmt::CXXTryStmtClass: {
return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
}
case Expr::ObjCBridgedCastExprClass: {
const ObjCBridgedCastExpr *CSC = cast<ObjCBridgedCastExpr>(S);
R1 = CSC->getSubExpr()->getSourceRange();
return CSC->getLParenLoc();
}
default: ;
}
R1 = S->getSourceRange();
return S->getLocStart();
}
void DeadCodeScan::reportDeadCode(const CFGBlock *B,
const Stmt *S,
clang::reachable_code::Callback &CB) {
// Classify the unreachable code found, or suppress it in some cases.
reachable_code::UnreachableKind UK = reachable_code::UK_Other;
if (isa<BreakStmt>(S)) {
UK = reachable_code::UK_Break;
}
else if (isTrivialDoWhile(B, S)) {
return;
}
else if (isDeadReturn(B, S)) {
UK = reachable_code::UK_Return;
}
SourceRange SilenceableCondVal;
if (UK == reachable_code::UK_Other) {
// Check if the dead code is part of the "loop target" of
// a for/for-range loop. This is the block that contains
// the increment code.
if (const Stmt *LoopTarget = B->getLoopTarget()) {
SourceLocation Loc = LoopTarget->getLocStart();
SourceRange R1(Loc, Loc), R2;
if (const ForStmt *FS = dyn_cast<ForStmt>(LoopTarget)) {
const Expr *Inc = FS->getInc();
Loc = Inc->getLocStart();
R2 = Inc->getSourceRange();
}
CB.HandleUnreachable(reachable_code::UK_Loop_Increment,
Loc, SourceRange(), SourceRange(Loc, Loc), R2);
return;
}
// Check if the dead block has a predecessor whose branch has
// a configuration value that *could* be modified to
// silence the warning.
CFGBlock::const_pred_iterator PI = B->pred_begin();
if (PI != B->pred_end()) {
if (const CFGBlock *PredBlock = PI->getPossiblyUnreachableBlock()) {
const Stmt *TermCond =
PredBlock->getTerminatorCondition(/* strip parens */ false);
isConfigurationValue(TermCond, PP, &SilenceableCondVal);
}
}
}
SourceRange R1, R2;
SourceLocation Loc = GetUnreachableLoc(S, R1, R2);
CB.HandleUnreachable(UK, Loc, SilenceableCondVal, R1, R2);
}
//===----------------------------------------------------------------------===//
// Reachability APIs.
//===----------------------------------------------------------------------===//
namespace clang { namespace reachable_code {
void Callback::anchor() { }
unsigned ScanReachableFromBlock(const CFGBlock *Start,
llvm::BitVector &Reachable) {
return scanFromBlock(Start, Reachable, /* SourceManager* */ nullptr, false);
}
void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP,
Callback &CB) {
CFG *cfg = AC.getCFG();
if (!cfg)
return;
// Scan for reachable blocks from the entrance of the CFG.
// If there are no unreachable blocks, we're done.
llvm::BitVector reachable(cfg->getNumBlockIDs());
unsigned numReachable =
scanMaybeReachableFromBlock(&cfg->getEntry(), PP, reachable);
if (numReachable == cfg->getNumBlockIDs())
return;
// If there aren't explicit EH edges, we should include the 'try' dispatch
// blocks as roots.
if (!AC.getCFGBuildOptions().AddEHEdges) {
for (CFG::try_block_iterator I = cfg->try_blocks_begin(),
E = cfg->try_blocks_end() ; I != E; ++I) {
numReachable += scanMaybeReachableFromBlock(*I, PP, reachable);
}
if (numReachable == cfg->getNumBlockIDs())
return;
}
// There are some unreachable blocks. We need to find the root blocks that
// contain code that should be considered unreachable.
for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
const CFGBlock *block = *I;
// A block may have been marked reachable during this loop.
if (reachable[block->getBlockID()])
continue;
DeadCodeScan DS(reachable, PP);
numReachable += DS.scanBackwards(block, CB);
if (numReachable == cfg->getNumBlockIDs())
return;
}
}
}} // end namespace clang::reachable_code
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp | //===- ThreadSafetyCommon.cpp ----------------------------------*- C++ --*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of the interfaces declared in ThreadSafetyCommon.h
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <algorithm>
#include <climits>
#include <vector>
using namespace clang;
using namespace threadSafety;
// From ThreadSafetyUtil.h
std::string threadSafety::getSourceLiteralString(const clang::Expr *CE) {
switch (CE->getStmtClass()) {
case Stmt::IntegerLiteralClass:
return cast<IntegerLiteral>(CE)->getValue().toString(10, true);
case Stmt::StringLiteralClass: {
std::string ret("\"");
ret += cast<StringLiteral>(CE)->getString();
ret += "\"";
return ret;
}
case Stmt::CharacterLiteralClass:
case Stmt::CXXNullPtrLiteralExprClass:
case Stmt::GNUNullExprClass:
case Stmt::CXXBoolLiteralExprClass:
case Stmt::FloatingLiteralClass:
case Stmt::ImaginaryLiteralClass:
case Stmt::ObjCStringLiteralClass:
default:
return "#lit";
}
}
// Return true if E is a variable that points to an incomplete Phi node.
static bool isIncompletePhi(const til::SExpr *E) {
if (const auto *Ph = dyn_cast<til::Phi>(E))
return Ph->status() == til::Phi::PH_Incomplete;
return false;
}
typedef SExprBuilder::CallingContext CallingContext;
til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
auto It = SMap.find(S);
if (It != SMap.end())
return It->second;
return nullptr;
}
til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
Walker.walk(*this);
return Scfg;
}
static bool isCalleeArrow(const Expr *E) {
const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
return ME ? ME->isArrow() : false;
}
/// \brief Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
/// \param AttrExp The expression to translate.
/// \param D The declaration to which the attribute is attached.
/// \param DeclExp An expression involving the Decl to which the attribute
/// is attached. E.g. the call to a function.
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
const NamedDecl *D,
const Expr *DeclExp,
VarDecl *SelfDecl) {
// If we are processing a raw attribute expression, with no substitutions.
if (!DeclExp)
return translateAttrExpr(AttrExp, nullptr);
CallingContext Ctx(nullptr, D);
// Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
// for formal parameters when we call buildMutexID later.
if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
Ctx.SelfArg = ME->getBase();
Ctx.SelfArrow = ME->isArrow();
} else if (const CXXMemberCallExpr *CE =
dyn_cast<CXXMemberCallExpr>(DeclExp)) {
Ctx.SelfArg = CE->getImplicitObjectArgument();
Ctx.SelfArrow = isCalleeArrow(CE->getCallee());
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
} else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
} else if (const CXXConstructExpr *CE =
dyn_cast<CXXConstructExpr>(DeclExp)) {
Ctx.SelfArg = nullptr; // Will be set below
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
} else if (D && isa<CXXDestructorDecl>(D)) {
// There's no such thing as a "destructor call" in the AST.
Ctx.SelfArg = DeclExp;
}
// Hack to handle constructors, where self cannot be recovered from
// the expression.
if (SelfDecl && !Ctx.SelfArg) {
DeclRefExpr SelfDRE(SelfDecl, false, SelfDecl->getType(), VK_LValue,
SelfDecl->getLocation());
Ctx.SelfArg = &SelfDRE;
// If the attribute has no arguments, then assume the argument is "this".
if (!AttrExp)
return translateAttrExpr(Ctx.SelfArg, nullptr);
else // For most attributes.
return translateAttrExpr(AttrExp, &Ctx);
}
// If the attribute has no arguments, then assume the argument is "this".
if (!AttrExp)
return translateAttrExpr(Ctx.SelfArg, nullptr);
else // For most attributes.
return translateAttrExpr(AttrExp, &Ctx);
}
/// \brief Translate a clang expression in an attribute to a til::SExpr.
// This assumes a CallingContext has already been created.
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
CallingContext *Ctx) {
if (!AttrExp)
return CapabilityExpr(nullptr, false);
if (auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
if (SLit->getString() == StringRef("*"))
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
return CapabilityExpr(new (Arena) til::Wildcard(), false);
else
// Ignore other string literals for now.
return CapabilityExpr(nullptr, false);
}
bool Neg = false;
if (auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
if (OE->getOperator() == OO_Exclaim) {
Neg = true;
AttrExp = OE->getArg(0);
}
}
else if (auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
if (UO->getOpcode() == UO_LNot) {
Neg = true;
AttrExp = UO->getSubExpr();
}
}
til::SExpr *E = translate(AttrExp, Ctx);
// Trap mutex expressions like nullptr, or 0.
// Any literal value is nonsense.
if (!E || isa<til::Literal>(E))
return CapabilityExpr(nullptr, false);
// Hack to deal with smart pointers -- strip off top-level pointer casts.
if (auto *CE = dyn_cast_or_null<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
return CapabilityExpr(CE->expr(), Neg);
}
return CapabilityExpr(E, Neg);
}
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.
til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
if (!S)
return nullptr;
// Check if S has already been translated and cached.
// This handles the lookup of SSA names for DeclRefExprs here.
if (til::SExpr *E = lookupStmt(S))
return E;
switch (S->getStmtClass()) {
case Stmt::DeclRefExprClass:
return translateDeclRefExpr(cast<DeclRefExpr>(S), Ctx);
case Stmt::CXXThisExprClass:
return translateCXXThisExpr(cast<CXXThisExpr>(S), Ctx);
case Stmt::MemberExprClass:
return translateMemberExpr(cast<MemberExpr>(S), Ctx);
case Stmt::CallExprClass:
return translateCallExpr(cast<CallExpr>(S), Ctx);
case Stmt::CXXMemberCallExprClass:
return translateCXXMemberCallExpr(cast<CXXMemberCallExpr>(S), Ctx);
case Stmt::CXXOperatorCallExprClass:
return translateCXXOperatorCallExpr(cast<CXXOperatorCallExpr>(S), Ctx);
case Stmt::UnaryOperatorClass:
return translateUnaryOperator(cast<UnaryOperator>(S), Ctx);
case Stmt::BinaryOperatorClass:
case Stmt::CompoundAssignOperatorClass:
return translateBinaryOperator(cast<BinaryOperator>(S), Ctx);
case Stmt::ArraySubscriptExprClass:
return translateArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Ctx);
case Stmt::ConditionalOperatorClass:
return translateAbstractConditionalOperator(
cast<ConditionalOperator>(S), Ctx);
case Stmt::BinaryConditionalOperatorClass:
return translateAbstractConditionalOperator(
cast<BinaryConditionalOperator>(S), Ctx);
// We treat these as no-ops
case Stmt::ParenExprClass:
return translate(cast<ParenExpr>(S)->getSubExpr(), Ctx);
case Stmt::ExprWithCleanupsClass:
return translate(cast<ExprWithCleanups>(S)->getSubExpr(), Ctx);
case Stmt::CXXBindTemporaryExprClass:
return translate(cast<CXXBindTemporaryExpr>(S)->getSubExpr(), Ctx);
// Collect all literals
case Stmt::CharacterLiteralClass:
case Stmt::CXXNullPtrLiteralExprClass:
case Stmt::GNUNullExprClass:
case Stmt::CXXBoolLiteralExprClass:
case Stmt::FloatingLiteralClass:
case Stmt::ImaginaryLiteralClass:
case Stmt::IntegerLiteralClass:
case Stmt::StringLiteralClass:
case Stmt::ObjCStringLiteralClass:
return new (Arena) til::Literal(cast<Expr>(S));
case Stmt::DeclStmtClass:
return translateDeclStmt(cast<DeclStmt>(S), Ctx);
default:
break;
}
if (const CastExpr *CE = dyn_cast<CastExpr>(S))
return translateCastExpr(CE, Ctx);
return new (Arena) til::Undefined(S);
}
til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
CallingContext *Ctx) {
const ValueDecl *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
// Function parameters require substitution and/or renaming.
if (const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
const FunctionDecl *FD =
cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
unsigned I = PV->getFunctionScopeIndex();
if (Ctx && Ctx->FunArgs && FD == Ctx->AttrDecl->getCanonicalDecl()) {
// Substitute call arguments for references to function parameters
assert(I < Ctx->NumArgs);
return translate(Ctx->FunArgs[I], Ctx->Prev);
}
// Map the param back to the param of the original function declaration
// for consistent comparisons.
VD = FD->getParamDecl(I);
}
// For non-local variables, treat it as a referenced to a named object.
return new (Arena) til::LiteralPtr(VD);
}
til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
CallingContext *Ctx) {
// Substitute for 'this'
if (Ctx && Ctx->SelfArg)
return translate(Ctx->SelfArg, Ctx->Prev);
assert(SelfVar && "We have no variable for 'this'!");
return SelfVar;
}
static const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
if (auto *V = dyn_cast<til::Variable>(E))
return V->clangDecl();
if (auto *Ph = dyn_cast<til::Phi>(E))
return Ph->clangDecl();
if (auto *P = dyn_cast<til::Project>(E))
return P->clangDecl();
if (auto *L = dyn_cast<til::LiteralPtr>(E))
return L->clangDecl();
return 0;
}
static bool hasCppPointerType(const til::SExpr *E) {
auto *VD = getValueDeclFromSExpr(E);
if (VD && VD->getType()->isPointerType())
return true;
if (auto *C = dyn_cast<til::Cast>(E))
return C->castOpcode() == til::CAST_objToPtr;
return false;
}
// Grab the very first declaration of virtual method D
static const CXXMethodDecl *getFirstVirtualDecl(const CXXMethodDecl *D) {
while (true) {
D = D->getCanonicalDecl();
CXXMethodDecl::method_iterator I = D->begin_overridden_methods(),
E = D->end_overridden_methods();
if (I == E)
return D; // Method does not override anything
D = *I; // FIXME: this does not work with multiple inheritance.
}
return nullptr;
}
til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
CallingContext *Ctx) {
til::SExpr *BE = translate(ME->getBase(), Ctx);
til::SExpr *E = new (Arena) til::SApply(BE);
const ValueDecl *D = ME->getMemberDecl();
if (auto *VD = dyn_cast<CXXMethodDecl>(D))
D = getFirstVirtualDecl(VD);
til::Project *P = new (Arena) til::Project(E, D);
if (hasCppPointerType(BE))
P->setArrow(true);
return P;
}
til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
CallingContext *Ctx,
const Expr *SelfE) {
if (CapabilityExprMode) {
// Handle LOCK_RETURNED
const FunctionDecl *FD = CE->getDirectCallee()->getMostRecentDecl();
if (LockReturnedAttr* At = FD->getAttr<LockReturnedAttr>()) {
CallingContext LRCallCtx(Ctx);
LRCallCtx.AttrDecl = CE->getDirectCallee();
LRCallCtx.SelfArg = SelfE;
LRCallCtx.NumArgs = CE->getNumArgs();
LRCallCtx.FunArgs = CE->getArgs();
return const_cast<til::SExpr*>(
translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
}
}
til::SExpr *E = translate(CE->getCallee(), Ctx);
for (const auto *Arg : CE->arguments()) {
til::SExpr *A = translate(Arg, Ctx);
E = new (Arena) til::Apply(E, A);
}
return new (Arena) til::Call(E, CE);
}
til::SExpr *SExprBuilder::translateCXXMemberCallExpr(
const CXXMemberCallExpr *ME, CallingContext *Ctx) {
if (CapabilityExprMode) {
// Ignore calls to get() on smart pointers.
if (ME->getMethodDecl()->getNameAsString() == "get" &&
ME->getNumArgs() == 0) {
auto *E = translate(ME->getImplicitObjectArgument(), Ctx);
return new (Arena) til::Cast(til::CAST_objToPtr, E);
// return E;
}
}
return translateCallExpr(cast<CallExpr>(ME), Ctx,
ME->getImplicitObjectArgument());
}
til::SExpr *SExprBuilder::translateCXXOperatorCallExpr(
const CXXOperatorCallExpr *OCE, CallingContext *Ctx) {
if (CapabilityExprMode) {
// Ignore operator * and operator -> on smart pointers.
OverloadedOperatorKind k = OCE->getOperator();
if (k == OO_Star || k == OO_Arrow) {
auto *E = translate(OCE->getArg(0), Ctx);
return new (Arena) til::Cast(til::CAST_objToPtr, E);
// return E;
}
}
return translateCallExpr(cast<CallExpr>(OCE), Ctx);
}
til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
CallingContext *Ctx) {
switch (UO->getOpcode()) {
case UO_PostInc:
case UO_PostDec:
case UO_PreInc:
case UO_PreDec:
return new (Arena) til::Undefined(UO);
case UO_AddrOf: {
if (CapabilityExprMode) {
// interpret &Graph::mu_ as an existential.
if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
if (DRE->getDecl()->isCXXInstanceMember()) {
// This is a pointer-to-member expression, e.g. &MyClass::mu_.
// We interpret this syntax specially, as a wildcard.
auto *W = new (Arena) til::Wildcard();
return new (Arena) til::Project(W, DRE->getDecl());
}
}
}
// otherwise, & is a no-op
return translate(UO->getSubExpr(), Ctx);
}
// We treat these as no-ops
case UO_Deref:
case UO_Plus:
return translate(UO->getSubExpr(), Ctx);
case UO_Minus:
return new (Arena)
til::UnaryOp(til::UOP_Minus, translate(UO->getSubExpr(), Ctx));
case UO_Not:
return new (Arena)
til::UnaryOp(til::UOP_BitNot, translate(UO->getSubExpr(), Ctx));
case UO_LNot:
return new (Arena)
til::UnaryOp(til::UOP_LogicNot, translate(UO->getSubExpr(), Ctx));
// Currently unsupported
case UO_Real:
case UO_Imag:
case UO_Extension:
return new (Arena) til::Undefined(UO);
}
return new (Arena) til::Undefined(UO);
}
til::SExpr *SExprBuilder::translateBinOp(til::TIL_BinaryOpcode Op,
const BinaryOperator *BO,
CallingContext *Ctx, bool Reverse) {
til::SExpr *E0 = translate(BO->getLHS(), Ctx);
til::SExpr *E1 = translate(BO->getRHS(), Ctx);
if (Reverse)
return new (Arena) til::BinaryOp(Op, E1, E0);
else
return new (Arena) til::BinaryOp(Op, E0, E1);
}
til::SExpr *SExprBuilder::translateBinAssign(til::TIL_BinaryOpcode Op,
const BinaryOperator *BO,
CallingContext *Ctx,
bool Assign) {
const Expr *LHS = BO->getLHS();
const Expr *RHS = BO->getRHS();
til::SExpr *E0 = translate(LHS, Ctx);
til::SExpr *E1 = translate(RHS, Ctx);
const ValueDecl *VD = nullptr;
til::SExpr *CV = nullptr;
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHS)) {
VD = DRE->getDecl();
CV = lookupVarDecl(VD);
}
if (!Assign) {
til::SExpr *Arg = CV ? CV : new (Arena) til::Load(E0);
E1 = new (Arena) til::BinaryOp(Op, Arg, E1);
E1 = addStatement(E1, nullptr, VD);
}
if (VD && CV)
return updateVarDecl(VD, E1);
return new (Arena) til::Store(E0, E1);
}
til::SExpr *SExprBuilder::translateBinaryOperator(const BinaryOperator *BO,
CallingContext *Ctx) {
switch (BO->getOpcode()) {
case BO_PtrMemD:
case BO_PtrMemI:
return new (Arena) til::Undefined(BO);
case BO_Mul: return translateBinOp(til::BOP_Mul, BO, Ctx);
case BO_Div: return translateBinOp(til::BOP_Div, BO, Ctx);
case BO_Rem: return translateBinOp(til::BOP_Rem, BO, Ctx);
case BO_Add: return translateBinOp(til::BOP_Add, BO, Ctx);
case BO_Sub: return translateBinOp(til::BOP_Sub, BO, Ctx);
case BO_Shl: return translateBinOp(til::BOP_Shl, BO, Ctx);
case BO_Shr: return translateBinOp(til::BOP_Shr, BO, Ctx);
case BO_LT: return translateBinOp(til::BOP_Lt, BO, Ctx);
case BO_GT: return translateBinOp(til::BOP_Lt, BO, Ctx, true);
case BO_LE: return translateBinOp(til::BOP_Leq, BO, Ctx);
case BO_GE: return translateBinOp(til::BOP_Leq, BO, Ctx, true);
case BO_EQ: return translateBinOp(til::BOP_Eq, BO, Ctx);
case BO_NE: return translateBinOp(til::BOP_Neq, BO, Ctx);
case BO_And: return translateBinOp(til::BOP_BitAnd, BO, Ctx);
case BO_Xor: return translateBinOp(til::BOP_BitXor, BO, Ctx);
case BO_Or: return translateBinOp(til::BOP_BitOr, BO, Ctx);
case BO_LAnd: return translateBinOp(til::BOP_LogicAnd, BO, Ctx);
case BO_LOr: return translateBinOp(til::BOP_LogicOr, BO, Ctx);
case BO_Assign: return translateBinAssign(til::BOP_Eq, BO, Ctx, true);
case BO_MulAssign: return translateBinAssign(til::BOP_Mul, BO, Ctx);
case BO_DivAssign: return translateBinAssign(til::BOP_Div, BO, Ctx);
case BO_RemAssign: return translateBinAssign(til::BOP_Rem, BO, Ctx);
case BO_AddAssign: return translateBinAssign(til::BOP_Add, BO, Ctx);
case BO_SubAssign: return translateBinAssign(til::BOP_Sub, BO, Ctx);
case BO_ShlAssign: return translateBinAssign(til::BOP_Shl, BO, Ctx);
case BO_ShrAssign: return translateBinAssign(til::BOP_Shr, BO, Ctx);
case BO_AndAssign: return translateBinAssign(til::BOP_BitAnd, BO, Ctx);
case BO_XorAssign: return translateBinAssign(til::BOP_BitXor, BO, Ctx);
case BO_OrAssign: return translateBinAssign(til::BOP_BitOr, BO, Ctx);
case BO_Comma:
// The clang CFG should have already processed both sides.
return translate(BO->getRHS(), Ctx);
}
return new (Arena) til::Undefined(BO);
}
til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
CallingContext *Ctx) {
clang::CastKind K = CE->getCastKind();
switch (K) {
case CK_LValueToRValue: {
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
til::SExpr *E0 = lookupVarDecl(DRE->getDecl());
if (E0)
return E0;
}
til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
return E0;
// FIXME!! -- get Load working properly
// return new (Arena) til::Load(E0);
}
case CK_NoOp:
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay: {
til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
return E0;
}
default: {
// FIXME: handle different kinds of casts.
til::SExpr *E0 = translate(CE->getSubExpr(), Ctx);
if (CapabilityExprMode)
return E0;
return new (Arena) til::Cast(til::CAST_none, E0);
}
}
}
til::SExpr *
SExprBuilder::translateArraySubscriptExpr(const ArraySubscriptExpr *E,
CallingContext *Ctx) {
til::SExpr *E0 = translate(E->getBase(), Ctx);
til::SExpr *E1 = translate(E->getIdx(), Ctx);
return new (Arena) til::ArrayIndex(E0, E1);
}
til::SExpr *
SExprBuilder::translateAbstractConditionalOperator(
const AbstractConditionalOperator *CO, CallingContext *Ctx) {
auto *C = translate(CO->getCond(), Ctx);
auto *T = translate(CO->getTrueExpr(), Ctx);
auto *E = translate(CO->getFalseExpr(), Ctx);
return new (Arena) til::IfThenElse(C, T, E);
}
til::SExpr *
SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
DeclGroupRef DGrp = S->getDeclGroup();
for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
Expr *E = VD->getInit();
til::SExpr* SE = translate(E, Ctx);
// Add local variables with trivial type to the variable map
QualType T = VD->getType();
if (T.isTrivialType(VD->getASTContext())) {
return addVarDecl(VD, SE);
}
else {
// TODO: add alloca
}
}
}
return nullptr;
}
// If (E) is non-trivial, then add it to the current basic block, and
// update the statement map so that S refers to E. Returns a new variable
// that refers to E.
// If E is trivial returns E.
til::SExpr *SExprBuilder::addStatement(til::SExpr* E, const Stmt *S,
const ValueDecl *VD) {
if (!E || !CurrentBB || E->block() || til::ThreadSafetyTIL::isTrivial(E))
return E;
if (VD)
E = new (Arena) til::Variable(E, VD);
CurrentInstructions.push_back(E);
if (S)
insertStmt(S, E);
return E;
}
// Returns the current value of VD, if known, and nullptr otherwise.
til::SExpr *SExprBuilder::lookupVarDecl(const ValueDecl *VD) {
auto It = LVarIdxMap.find(VD);
if (It != LVarIdxMap.end()) {
assert(CurrentLVarMap[It->second].first == VD);
return CurrentLVarMap[It->second].second;
}
return nullptr;
}
// if E is a til::Variable, update its clangDecl.
static void maybeUpdateVD(til::SExpr *E, const ValueDecl *VD) {
if (!E)
return;
if (til::Variable *V = dyn_cast<til::Variable>(E)) {
if (!V->clangDecl())
V->setClangDecl(VD);
}
}
// Adds a new variable declaration.
til::SExpr *SExprBuilder::addVarDecl(const ValueDecl *VD, til::SExpr *E) {
maybeUpdateVD(E, VD);
LVarIdxMap.insert(std::make_pair(VD, CurrentLVarMap.size()));
CurrentLVarMap.makeWritable();
CurrentLVarMap.push_back(std::make_pair(VD, E));
return E;
}
// Updates a current variable declaration. (E.g. by assignment)
til::SExpr *SExprBuilder::updateVarDecl(const ValueDecl *VD, til::SExpr *E) {
maybeUpdateVD(E, VD);
auto It = LVarIdxMap.find(VD);
if (It == LVarIdxMap.end()) {
til::SExpr *Ptr = new (Arena) til::LiteralPtr(VD);
til::SExpr *St = new (Arena) til::Store(Ptr, E);
return St;
}
CurrentLVarMap.makeWritable();
CurrentLVarMap.elem(It->second).second = E;
return E;
}
// Make a Phi node in the current block for the i^th variable in CurrentVarMap.
// If E != null, sets Phi[CurrentBlockInfo->ArgIndex] = E.
// If E == null, this is a backedge and will be set later.
void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
unsigned ArgIndex = CurrentBlockInfo->ProcessedPredecessors;
assert(ArgIndex > 0 && ArgIndex < NPreds);
til::SExpr *CurrE = CurrentLVarMap[i].second;
if (CurrE->block() == CurrentBB) {
// We already have a Phi node in the current block,
// so just add the new variable to the Phi node.
til::Phi *Ph = dyn_cast<til::Phi>(CurrE);
assert(Ph && "Expecting Phi node.");
if (E)
Ph->values()[ArgIndex] = E;
return;
}
// Make a new phi node: phi(..., E)
// All phi args up to the current index are set to the current value.
til::Phi *Ph = new (Arena) til::Phi(Arena, NPreds);
Ph->values().setValues(NPreds, nullptr);
for (unsigned PIdx = 0; PIdx < ArgIndex; ++PIdx)
Ph->values()[PIdx] = CurrE;
if (E)
Ph->values()[ArgIndex] = E;
Ph->setClangDecl(CurrentLVarMap[i].first);
// If E is from a back-edge, or either E or CurrE are incomplete, then
// mark this node as incomplete; we may need to remove it later.
if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE)) {
Ph->setStatus(til::Phi::PH_Incomplete);
}
// Add Phi node to current block, and update CurrentLVarMap[i]
CurrentArguments.push_back(Ph);
if (Ph->status() == til::Phi::PH_Incomplete)
IncompleteArgs.push_back(Ph);
CurrentLVarMap.makeWritable();
CurrentLVarMap.elem(i).second = Ph;
}
// Merge values from Map into the current variable map.
// This will construct Phi nodes in the current basic block as necessary.
void SExprBuilder::mergeEntryMap(LVarDefinitionMap Map) {
assert(CurrentBlockInfo && "Not processing a block!");
if (!CurrentLVarMap.valid()) {
// Steal Map, using copy-on-write.
CurrentLVarMap = std::move(Map);
return;
}
if (CurrentLVarMap.sameAs(Map))
return; // Easy merge: maps from different predecessors are unchanged.
unsigned NPreds = CurrentBB->numPredecessors();
unsigned ESz = CurrentLVarMap.size();
unsigned MSz = Map.size();
unsigned Sz = std::min(ESz, MSz);
for (unsigned i=0; i<Sz; ++i) {
if (CurrentLVarMap[i].first != Map[i].first) {
// We've reached the end of variables in common.
CurrentLVarMap.makeWritable();
CurrentLVarMap.downsize(i);
break;
}
if (CurrentLVarMap[i].second != Map[i].second)
makePhiNodeVar(i, NPreds, Map[i].second);
}
if (ESz > MSz) {
CurrentLVarMap.makeWritable();
CurrentLVarMap.downsize(Map.size());
}
}
// Merge a back edge into the current variable map.
// This will create phi nodes for all variables in the variable map.
void SExprBuilder::mergeEntryMapBackEdge() {
// We don't have definitions for variables on the backedge, because we
// haven't gotten that far in the CFG. Thus, when encountering a back edge,
// we conservatively create Phi nodes for all variables. Unnecessary Phi
// nodes will be marked as incomplete, and stripped out at the end.
//
// An Phi node is unnecessary if it only refers to itself and one other
// variable, e.g. x = Phi(y, y, x) can be reduced to x = y.
assert(CurrentBlockInfo && "Not processing a block!");
if (CurrentBlockInfo->HasBackEdges)
return;
CurrentBlockInfo->HasBackEdges = true;
CurrentLVarMap.makeWritable();
unsigned Sz = CurrentLVarMap.size();
unsigned NPreds = CurrentBB->numPredecessors();
for (unsigned i=0; i < Sz; ++i) {
makePhiNodeVar(i, NPreds, nullptr);
}
}
// Update the phi nodes that were initially created for a back edge
// once the variable definitions have been computed.
// I.e., merge the current variable map into the phi nodes for Blk.
void SExprBuilder::mergePhiNodesBackEdge(const CFGBlock *Blk) {
til::BasicBlock *BB = lookupBlock(Blk);
unsigned ArgIndex = BBInfo[Blk->getBlockID()].ProcessedPredecessors;
assert(ArgIndex > 0 && ArgIndex < BB->numPredecessors());
for (til::SExpr *PE : BB->arguments()) {
til::Phi *Ph = dyn_cast_or_null<til::Phi>(PE);
assert(Ph && "Expecting Phi Node.");
assert(Ph->values()[ArgIndex] == nullptr && "Wrong index for back edge.");
til::SExpr *E = lookupVarDecl(Ph->clangDecl());
assert(E && "Couldn't find local variable for Phi node.");
Ph->values()[ArgIndex] = E;
}
}
void SExprBuilder::enterCFG(CFG *Cfg, const NamedDecl *D,
const CFGBlock *First) {
// Perform initial setup operations.
unsigned NBlocks = Cfg->getNumBlockIDs();
Scfg = new (Arena) til::SCFG(Arena, NBlocks);
// allocate all basic blocks immediately, to handle forward references.
BBInfo.resize(NBlocks);
BlockMap.resize(NBlocks, nullptr);
// create map from clang blockID to til::BasicBlocks
for (auto *B : *Cfg) {
auto *BB = new (Arena) til::BasicBlock(Arena);
BB->reserveInstructions(B->size());
BlockMap[B->getBlockID()] = BB;
}
CurrentBB = lookupBlock(&Cfg->getEntry());
auto Parms = isa<ObjCMethodDecl>(D) ? cast<ObjCMethodDecl>(D)->parameters()
: cast<FunctionDecl>(D)->parameters();
for (auto *Pm : Parms) {
QualType T = Pm->getType();
if (!T.isTrivialType(Pm->getASTContext()))
continue;
// Add parameters to local variable map.
// FIXME: right now we emulate params with loads; that should be fixed.
til::SExpr *Lp = new (Arena) til::LiteralPtr(Pm);
til::SExpr *Ld = new (Arena) til::Load(Lp);
til::SExpr *V = addStatement(Ld, nullptr, Pm);
addVarDecl(Pm, V);
}
}
void SExprBuilder::enterCFGBlock(const CFGBlock *B) {
// Intialize TIL basic block and add it to the CFG.
CurrentBB = lookupBlock(B);
CurrentBB->reservePredecessors(B->pred_size());
Scfg->add(CurrentBB);
CurrentBlockInfo = &BBInfo[B->getBlockID()];
// CurrentLVarMap is moved to ExitMap on block exit.
// FIXME: the entry block will hold function parameters.
// assert(!CurrentLVarMap.valid() && "CurrentLVarMap already initialized.");
}
void SExprBuilder::handlePredecessor(const CFGBlock *Pred) {
// Compute CurrentLVarMap on entry from ExitMaps of predecessors
CurrentBB->addPredecessor(BlockMap[Pred->getBlockID()]);
BlockInfo *PredInfo = &BBInfo[Pred->getBlockID()];
assert(PredInfo->UnprocessedSuccessors > 0);
if (--PredInfo->UnprocessedSuccessors == 0)
mergeEntryMap(std::move(PredInfo->ExitMap));
else
mergeEntryMap(PredInfo->ExitMap.clone());
++CurrentBlockInfo->ProcessedPredecessors;
}
void SExprBuilder::handlePredecessorBackEdge(const CFGBlock *Pred) {
mergeEntryMapBackEdge();
}
void SExprBuilder::enterCFGBlockBody(const CFGBlock *B) {
// The merge*() methods have created arguments.
// Push those arguments onto the basic block.
CurrentBB->arguments().reserve(
static_cast<unsigned>(CurrentArguments.size()), Arena);
for (auto *A : CurrentArguments)
CurrentBB->addArgument(A);
}
void SExprBuilder::handleStatement(const Stmt *S) {
til::SExpr *E = translate(S, nullptr);
addStatement(E, S);
}
void SExprBuilder::handleDestructorCall(const VarDecl *VD,
const CXXDestructorDecl *DD) {
til::SExpr *Sf = new (Arena) til::LiteralPtr(VD);
til::SExpr *Dr = new (Arena) til::LiteralPtr(DD);
til::SExpr *Ap = new (Arena) til::Apply(Dr, Sf);
til::SExpr *E = new (Arena) til::Call(Ap);
addStatement(E, nullptr);
}
void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
CurrentBB->instructions().reserve(
static_cast<unsigned>(CurrentInstructions.size()), Arena);
for (auto *V : CurrentInstructions)
CurrentBB->addInstruction(V);
// Create an appropriate terminator
unsigned N = B->succ_size();
auto It = B->succ_begin();
if (N == 1) {
til::BasicBlock *BB = *It ? lookupBlock(*It) : nullptr;
// TODO: set index
unsigned Idx = BB ? BB->findPredecessorIndex(CurrentBB) : 0;
auto *Tm = new (Arena) til::Goto(BB, Idx);
CurrentBB->setTerminator(Tm);
}
else if (N == 2) {
til::SExpr *C = translate(B->getTerminatorCondition(true), nullptr);
til::BasicBlock *BB1 = *It ? lookupBlock(*It) : nullptr;
++It;
til::BasicBlock *BB2 = *It ? lookupBlock(*It) : nullptr;
// FIXME: make sure these arent' critical edges.
auto *Tm = new (Arena) til::Branch(C, BB1, BB2);
CurrentBB->setTerminator(Tm);
}
}
void SExprBuilder::handleSuccessor(const CFGBlock *Succ) {
++CurrentBlockInfo->UnprocessedSuccessors;
}
void SExprBuilder::handleSuccessorBackEdge(const CFGBlock *Succ) {
mergePhiNodesBackEdge(Succ);
++BBInfo[Succ->getBlockID()].ProcessedPredecessors;
}
void SExprBuilder::exitCFGBlock(const CFGBlock *B) {
CurrentArguments.clear();
CurrentInstructions.clear();
CurrentBlockInfo->ExitMap = std::move(CurrentLVarMap);
CurrentBB = nullptr;
CurrentBlockInfo = nullptr;
}
void SExprBuilder::exitCFG(const CFGBlock *Last) {
for (auto *Ph : IncompleteArgs) {
if (Ph->status() == til::Phi::PH_Incomplete)
simplifyIncompleteArg(Ph);
}
CurrentArguments.clear();
CurrentInstructions.clear();
IncompleteArgs.clear();
}
/*
void printSCFG(CFGWalker &Walker) {
llvm::BumpPtrAllocator Bpa;
til::MemRegionRef Arena(&Bpa);
SExprBuilder SxBuilder(Arena);
til::SCFG *Scfg = SxBuilder.buildCFG(Walker);
TILPrinter::print(Scfg, llvm::errs());
}
*/
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Analysis/BodyFarm.cpp | //== BodyFarm.cpp - Factory for conjuring up fake bodies ----------*- C++ -*-//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// BodyFarm is a factory for creating faux implementations for functions/methods
// for analysis purposes.
//
//===----------------------------------------------------------------------===//
#include "BodyFarm.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Analysis/CodeInjector.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
//===----------------------------------------------------------------------===//
// Helper creation functions for constructing faux ASTs.
//===----------------------------------------------------------------------===//
static bool isDispatchBlock(QualType Ty) {
// Is it a block pointer?
const BlockPointerType *BPT = Ty->getAs<BlockPointerType>();
if (!BPT)
return false;
// Check if the block pointer type takes no arguments and
// returns void.
const FunctionProtoType *FT =
BPT->getPointeeType()->getAs<FunctionProtoType>();
if (!FT || !FT->getReturnType()->isVoidType() || FT->getNumParams() != 0)
return false;
return true;
}
namespace {
class ASTMaker {
public:
ASTMaker(ASTContext &C) : C(C) {}
/// Create a new BinaryOperator representing a simple assignment.
BinaryOperator *makeAssignment(const Expr *LHS, const Expr *RHS, QualType Ty);
/// Create a new BinaryOperator representing a comparison.
BinaryOperator *makeComparison(const Expr *LHS, const Expr *RHS,
BinaryOperator::Opcode Op);
/// Create a new compound stmt using the provided statements.
CompoundStmt *makeCompound(ArrayRef<Stmt*>);
/// Create a new DeclRefExpr for the referenced variable.
DeclRefExpr *makeDeclRefExpr(const VarDecl *D);
/// Create a new UnaryOperator representing a dereference.
UnaryOperator *makeDereference(const Expr *Arg, QualType Ty);
/// Create an implicit cast for an integer conversion.
Expr *makeIntegralCast(const Expr *Arg, QualType Ty);
/// Create an implicit cast to a builtin boolean type.
ImplicitCastExpr *makeIntegralCastToBoolean(const Expr *Arg);
// Create an implicit cast for lvalue-to-rvaluate conversions.
ImplicitCastExpr *makeLvalueToRvalue(const Expr *Arg, QualType Ty);
/// Create an Objective-C bool literal.
ObjCBoolLiteralExpr *makeObjCBool(bool Val);
/// Create an Objective-C ivar reference.
ObjCIvarRefExpr *makeObjCIvarRef(const Expr *Base, const ObjCIvarDecl *IVar);
/// Create a Return statement.
ReturnStmt *makeReturn(const Expr *RetVal);
private:
ASTContext &C;
};
}
BinaryOperator *ASTMaker::makeAssignment(const Expr *LHS, const Expr *RHS,
QualType Ty) {
return new (C) BinaryOperator(const_cast<Expr*>(LHS), const_cast<Expr*>(RHS),
BO_Assign, Ty, VK_RValue,
OK_Ordinary, SourceLocation(), false);
}
BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
BinaryOperator::Opcode Op) {
assert(BinaryOperator::isLogicalOp(Op) ||
BinaryOperator::isComparisonOp(Op));
return new (C) BinaryOperator(const_cast<Expr*>(LHS),
const_cast<Expr*>(RHS),
Op,
C.getLogicalOperationType(),
VK_RValue,
OK_Ordinary, SourceLocation(), false);
}
CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
return new (C) CompoundStmt(C, Stmts, SourceLocation(), SourceLocation());
}
DeclRefExpr *ASTMaker::makeDeclRefExpr(const VarDecl *D) {
DeclRefExpr *DR =
DeclRefExpr::Create(/* Ctx = */ C,
/* QualifierLoc = */ NestedNameSpecifierLoc(),
/* TemplateKWLoc = */ SourceLocation(),
/* D = */ const_cast<VarDecl*>(D),
/* RefersToEnclosingVariableOrCapture = */ false,
/* NameLoc = */ SourceLocation(),
/* T = */ D->getType(),
/* VK = */ VK_LValue);
return DR;
}
UnaryOperator *ASTMaker::makeDereference(const Expr *Arg, QualType Ty) {
return new (C) UnaryOperator(const_cast<Expr*>(Arg), UO_Deref, Ty,
VK_LValue, OK_Ordinary, SourceLocation());
}
ImplicitCastExpr *ASTMaker::makeLvalueToRvalue(const Expr *Arg, QualType Ty) {
return ImplicitCastExpr::Create(C, Ty, CK_LValueToRValue,
const_cast<Expr*>(Arg), nullptr, VK_RValue);
}
Expr *ASTMaker::makeIntegralCast(const Expr *Arg, QualType Ty) {
if (Arg->getType() == Ty)
return const_cast<Expr*>(Arg);
return ImplicitCastExpr::Create(C, Ty, CK_IntegralCast,
const_cast<Expr*>(Arg), nullptr, VK_RValue);
}
ImplicitCastExpr *ASTMaker::makeIntegralCastToBoolean(const Expr *Arg) {
return ImplicitCastExpr::Create(C, C.BoolTy, CK_IntegralToBoolean,
const_cast<Expr*>(Arg), nullptr, VK_RValue);
}
ObjCBoolLiteralExpr *ASTMaker::makeObjCBool(bool Val) {
QualType Ty = C.getBOOLDecl() ? C.getBOOLType() : C.ObjCBuiltinBoolTy;
return new (C) ObjCBoolLiteralExpr(Val, Ty, SourceLocation());
}
ObjCIvarRefExpr *ASTMaker::makeObjCIvarRef(const Expr *Base,
const ObjCIvarDecl *IVar) {
return new (C) ObjCIvarRefExpr(const_cast<ObjCIvarDecl*>(IVar),
IVar->getType(), SourceLocation(),
SourceLocation(), const_cast<Expr*>(Base),
/*arrow=*/true, /*free=*/false);
}
ReturnStmt *ASTMaker::makeReturn(const Expr *RetVal) {
return new (C) ReturnStmt(SourceLocation(), const_cast<Expr*>(RetVal),
nullptr);
}
//===----------------------------------------------------------------------===//
// Creation functions for faux ASTs.
//===----------------------------------------------------------------------===//
typedef Stmt *(*FunctionFarmer)(ASTContext &C, const FunctionDecl *D);
/// Create a fake body for dispatch_once.
static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
// Check if we have at least two parameters.
if (D->param_size() != 2)
return nullptr;
// Check if the first parameter is a pointer to integer type.
const ParmVarDecl *Predicate = D->getParamDecl(0);
QualType PredicateQPtrTy = Predicate->getType();
const PointerType *PredicatePtrTy = PredicateQPtrTy->getAs<PointerType>();
if (!PredicatePtrTy)
return nullptr;
QualType PredicateTy = PredicatePtrTy->getPointeeType();
if (!PredicateTy->isIntegerType())
return nullptr;
// Check if the second parameter is the proper block type.
const ParmVarDecl *Block = D->getParamDecl(1);
QualType Ty = Block->getType();
if (!isDispatchBlock(Ty))
return nullptr;
// Everything checks out. Create a fakse body that checks the predicate,
// sets it, and calls the block. Basically, an AST dump of:
//
// void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) {
// if (!*predicate) {
// *predicate = 1;
// block();
// }
// }
ASTMaker M(C);
// (1) Create the call.
DeclRefExpr *DR = M.makeDeclRefExpr(Block);
ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
CallExpr *CE = new (C) CallExpr(C, ICE, None, C.VoidTy, VK_RValue,
SourceLocation());
// (2) Create the assignment to the predicate.
IntegerLiteral *IL =
IntegerLiteral::Create(C, llvm::APInt(C.getTypeSize(C.IntTy), (uint64_t) 1),
C.IntTy, SourceLocation());
BinaryOperator *B =
M.makeAssignment(
M.makeDereference(
M.makeLvalueToRvalue(
M.makeDeclRefExpr(Predicate), PredicateQPtrTy),
PredicateTy),
M.makeIntegralCast(IL, PredicateTy),
PredicateTy);
// (3) Create the compound statement.
Stmt *Stmts[] = { B, CE };
CompoundStmt *CS = M.makeCompound(Stmts);
// (4) Create the 'if' condition.
ImplicitCastExpr *LValToRval =
M.makeLvalueToRvalue(
M.makeDereference(
M.makeLvalueToRvalue(
M.makeDeclRefExpr(Predicate),
PredicateQPtrTy),
PredicateTy),
PredicateTy);
UnaryOperator *UO = new (C) UnaryOperator(LValToRval, UO_LNot, C.IntTy,
VK_RValue, OK_Ordinary,
SourceLocation());
// (5) Create the 'if' statement.
IfStmt *If = new (C) IfStmt(C, SourceLocation(), nullptr, UO, CS);
return If;
}
/// Create a fake body for dispatch_sync.
static Stmt *create_dispatch_sync(ASTContext &C, const FunctionDecl *D) {
// Check if we have at least two parameters.
if (D->param_size() != 2)
return nullptr;
// Check if the second parameter is a block.
const ParmVarDecl *PV = D->getParamDecl(1);
QualType Ty = PV->getType();
if (!isDispatchBlock(Ty))
return nullptr;
// Everything checks out. Create a fake body that just calls the block.
// This is basically just an AST dump of:
//
// void dispatch_sync(dispatch_queue_t queue, void (^block)(void)) {
// block();
// }
//
ASTMaker M(C);
DeclRefExpr *DR = M.makeDeclRefExpr(PV);
ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
CallExpr *CE = new (C) CallExpr(C, ICE, None, C.VoidTy, VK_RValue,
SourceLocation());
return CE;
}
static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
{
// There are exactly 3 arguments.
if (D->param_size() != 3)
return nullptr;
// Signature:
// _Bool OSAtomicCompareAndSwapPtr(void *__oldValue,
// void *__newValue,
// void * volatile *__theValue)
// Generate body:
// if (oldValue == *theValue) {
// *theValue = newValue;
// return YES;
// }
// else return NO;
QualType ResultTy = D->getReturnType();
bool isBoolean = ResultTy->isBooleanType();
if (!isBoolean && !ResultTy->isIntegralType(C))
return nullptr;
const ParmVarDecl *OldValue = D->getParamDecl(0);
QualType OldValueTy = OldValue->getType();
const ParmVarDecl *NewValue = D->getParamDecl(1);
QualType NewValueTy = NewValue->getType();
assert(OldValueTy == NewValueTy);
const ParmVarDecl *TheValue = D->getParamDecl(2);
QualType TheValueTy = TheValue->getType();
const PointerType *PT = TheValueTy->getAs<PointerType>();
if (!PT)
return nullptr;
QualType PointeeTy = PT->getPointeeType();
ASTMaker M(C);
// Construct the comparison.
Expr *Comparison =
M.makeComparison(
M.makeLvalueToRvalue(M.makeDeclRefExpr(OldValue), OldValueTy),
M.makeLvalueToRvalue(
M.makeDereference(
M.makeLvalueToRvalue(M.makeDeclRefExpr(TheValue), TheValueTy),
PointeeTy),
PointeeTy),
BO_EQ);
// Construct the body of the IfStmt.
Stmt *Stmts[2];
Stmts[0] =
M.makeAssignment(
M.makeDereference(
M.makeLvalueToRvalue(M.makeDeclRefExpr(TheValue), TheValueTy),
PointeeTy),
M.makeLvalueToRvalue(M.makeDeclRefExpr(NewValue), NewValueTy),
NewValueTy);
Expr *BoolVal = M.makeObjCBool(true);
Expr *RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
: M.makeIntegralCast(BoolVal, ResultTy);
Stmts[1] = M.makeReturn(RetVal);
CompoundStmt *Body = M.makeCompound(Stmts);
// Construct the else clause.
BoolVal = M.makeObjCBool(false);
RetVal = isBoolean ? M.makeIntegralCastToBoolean(BoolVal)
: M.makeIntegralCast(BoolVal, ResultTy);
Stmt *Else = M.makeReturn(RetVal);
/// Construct the If.
Stmt *If =
new (C) IfStmt(C, SourceLocation(), nullptr, Comparison, Body,
SourceLocation(), Else);
return If;
}
Stmt *BodyFarm::getBody(const FunctionDecl *D) {
D = D->getCanonicalDecl();
Optional<Stmt *> &Val = Bodies[D];
if (Val.hasValue())
return Val.getValue();
Val = nullptr;
if (D->getIdentifier() == nullptr)
return nullptr;
StringRef Name = D->getName();
if (Name.empty())
return nullptr;
FunctionFarmer FF;
if (Name.startswith("OSAtomicCompareAndSwap") ||
Name.startswith("objc_atomicCompareAndSwap")) {
FF = create_OSAtomicCompareAndSwap;
}
else {
FF = llvm::StringSwitch<FunctionFarmer>(Name)
.Case("dispatch_sync", create_dispatch_sync)
.Case("dispatch_once", create_dispatch_once)
.Default(nullptr);
}
if (FF) { Val = FF(C, D); }
else if (Injector) { Val = Injector->getBody(D); }
return Val.getValue();
}
static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
const ObjCPropertyDecl *Prop) {
// First, find the backing ivar.
const ObjCIvarDecl *IVar = Prop->getPropertyIvarDecl();
if (!IVar)
return nullptr;
// Ignore weak variables, which have special behavior.
if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)
return nullptr;
// Look to see if Sema has synthesized a body for us. This happens in
// Objective-C++ because the return value may be a C++ class type with a
// non-trivial copy constructor. We can only do this if we can find the
// @synthesize for this property, though (or if we know it's been auto-
// synthesized).
const ObjCImplementationDecl *ImplDecl =
IVar->getContainingInterface()->getImplementation();
if (ImplDecl) {
for (const auto *I : ImplDecl->property_impls()) {
if (I->getPropertyDecl() != Prop)
continue;
if (I->getGetterCXXConstructor()) {
ASTMaker M(Ctx);
return M.makeReturn(I->getGetterCXXConstructor());
}
}
}
// Sanity check that the property is the same type as the ivar, or a
// reference to it, and that it is either an object pointer or trivially
// copyable.
if (!Ctx.hasSameUnqualifiedType(IVar->getType(),
Prop->getType().getNonReferenceType()))
return nullptr;
if (!IVar->getType()->isObjCLifetimeType() &&
!IVar->getType().isTriviallyCopyableType(Ctx))
return nullptr;
// Generate our body:
// return self->_ivar;
ASTMaker M(Ctx);
const VarDecl *selfVar = Prop->getGetterMethodDecl()->getSelfDecl();
Expr *loadedIVar =
M.makeObjCIvarRef(
M.makeLvalueToRvalue(
M.makeDeclRefExpr(selfVar),
selfVar->getType()),
IVar);
if (!Prop->getType()->isReferenceType())
loadedIVar = M.makeLvalueToRvalue(loadedIVar, IVar->getType());
return M.makeReturn(loadedIVar);
}
Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
// We currently only know how to synthesize property accessors.
if (!D->isPropertyAccessor())
return nullptr;
D = D->getCanonicalDecl();
Optional<Stmt *> &Val = Bodies[D];
if (Val.hasValue())
return Val.getValue();
Val = nullptr;
const ObjCPropertyDecl *Prop = D->findPropertyDecl();
if (!Prop)
return nullptr;
// For now, we only synthesize getters.
if (D->param_size() != 0)
return nullptr;
Val = createObjCPropertyGetter(C, Prop);
return Val.getValue();
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/BreakableToken.h | //===--- BreakableToken.h - Format C++ code -------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Declares BreakableToken, BreakableStringLiteral, and
/// BreakableBlockComment classes, that contain token type-specific logic to
/// break long lines in tokens.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_BREAKABLETOKEN_H
#define LLVM_CLANG_LIB_FORMAT_BREAKABLETOKEN_H
#include "Encoding.h"
#include "TokenAnnotator.h"
#include "WhitespaceManager.h"
#include <utility>
namespace clang {
namespace format {
struct FormatStyle;
/// \brief Base class for strategies on how to break tokens.
///
/// FIXME: The interface seems set in stone, so we might want to just pull the
/// strategy into the class, instead of controlling it from the outside.
class BreakableToken {
public:
/// \brief Contains starting character index and length of split.
typedef std::pair<StringRef::size_type, unsigned> Split;
virtual ~BreakableToken() {}
/// \brief Returns the number of lines in this token in the original code.
virtual unsigned getLineCount() const = 0;
/// \brief Returns the number of columns required to format the piece of line
/// at \p LineIndex, from byte offset \p Offset with length \p Length.
///
/// Note that previous breaks are not taken into account. \p Offset is always
/// specified from the start of the (original) line.
/// \p Length can be set to StringRef::npos, which means "to the end of line".
virtual unsigned
getLineLengthAfterSplit(unsigned LineIndex, unsigned Offset,
StringRef::size_type Length) const = 0;
/// \brief Returns a range (offset, length) at which to break the line at
/// \p LineIndex, if previously broken at \p TailOffset. If possible, do not
/// violate \p ColumnLimit.
virtual Split getSplit(unsigned LineIndex, unsigned TailOffset,
unsigned ColumnLimit) const = 0;
/// \brief Emits the previously retrieved \p Split via \p Whitespaces.
virtual void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) = 0;
/// \brief Replaces the whitespace range described by \p Split with a single
/// space.
virtual void replaceWhitespace(unsigned LineIndex, unsigned TailOffset,
Split Split,
WhitespaceManager &Whitespaces) = 0;
/// \brief Replaces the whitespace between \p LineIndex-1 and \p LineIndex.
virtual void replaceWhitespaceBefore(unsigned LineIndex,
WhitespaceManager &Whitespaces) {}
protected:
BreakableToken(const FormatToken &Tok, unsigned IndentLevel,
bool InPPDirective, encoding::Encoding Encoding,
const FormatStyle &Style)
: Tok(Tok), IndentLevel(IndentLevel), InPPDirective(InPPDirective),
Encoding(Encoding), Style(Style) {}
const FormatToken &Tok;
const unsigned IndentLevel;
const bool InPPDirective;
const encoding::Encoding Encoding;
const FormatStyle &Style;
};
/// \brief Base class for single line tokens that can be broken.
///
/// \c getSplit() needs to be implemented by child classes.
class BreakableSingleLineToken : public BreakableToken {
public:
unsigned getLineCount() const override;
unsigned getLineLengthAfterSplit(unsigned LineIndex, unsigned TailOffset,
StringRef::size_type Length) const override;
protected:
BreakableSingleLineToken(const FormatToken &Tok, unsigned IndentLevel,
unsigned StartColumn, StringRef Prefix,
StringRef Postfix, bool InPPDirective,
encoding::Encoding Encoding,
const FormatStyle &Style);
// The column in which the token starts.
unsigned StartColumn;
// The prefix a line needs after a break in the token.
StringRef Prefix;
// The postfix a line needs before introducing a break.
StringRef Postfix;
// The token text excluding the prefix and postfix.
StringRef Line;
};
class BreakableStringLiteral : public BreakableSingleLineToken {
public:
/// \brief Creates a breakable token for a single line string literal.
///
/// \p StartColumn specifies the column in which the token will start
/// after formatting.
BreakableStringLiteral(const FormatToken &Tok, unsigned IndentLevel,
unsigned StartColumn, StringRef Prefix,
StringRef Postfix, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style);
Split getSplit(unsigned LineIndex, unsigned TailOffset,
unsigned ColumnLimit) const override;
void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) override;
void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) override {}
};
class BreakableLineComment : public BreakableSingleLineToken {
public:
/// \brief Creates a breakable token for a line comment.
///
/// \p StartColumn specifies the column in which the comment will start
/// after formatting.
BreakableLineComment(const FormatToken &Token, unsigned IndentLevel,
unsigned StartColumn, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style);
Split getSplit(unsigned LineIndex, unsigned TailOffset,
unsigned ColumnLimit) const override;
void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) override;
void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) override;
void replaceWhitespaceBefore(unsigned LineIndex,
WhitespaceManager &Whitespaces) override;
private:
// The prefix without an additional space if one was added.
StringRef OriginalPrefix;
};
class BreakableBlockComment : public BreakableToken {
public:
/// \brief Creates a breakable token for a block comment.
///
/// \p StartColumn specifies the column in which the comment will start
/// after formatting, while \p OriginalStartColumn specifies in which
/// column the comment started before formatting.
/// If the comment starts a line after formatting, set \p FirstInLine to true.
BreakableBlockComment(const FormatToken &Token, unsigned IndentLevel,
unsigned StartColumn, unsigned OriginaStartColumn,
bool FirstInLine, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style);
unsigned getLineCount() const override;
unsigned getLineLengthAfterSplit(unsigned LineIndex, unsigned TailOffset,
StringRef::size_type Length) const override;
Split getSplit(unsigned LineIndex, unsigned TailOffset,
unsigned ColumnLimit) const override;
void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) override;
void replaceWhitespace(unsigned LineIndex, unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) override;
void replaceWhitespaceBefore(unsigned LineIndex,
WhitespaceManager &Whitespaces) override;
private:
// Rearranges the whitespace between Lines[LineIndex-1] and Lines[LineIndex],
// so that all whitespace between the lines is accounted to Lines[LineIndex]
// as leading whitespace:
// - Lines[LineIndex] points to the text after that whitespace
// - Lines[LineIndex-1] shrinks by its trailing whitespace
// - LeadingWhitespace[LineIndex] is updated with the complete whitespace
// between the end of the text of Lines[LineIndex-1] and Lines[LineIndex]
//
// Sets StartOfLineColumn to the intended column in which the text at
// Lines[LineIndex] starts (note that the decoration, if present, is not
// considered part of the text).
void adjustWhitespace(unsigned LineIndex, int IndentDelta);
// Returns the column at which the text in line LineIndex starts, when broken
// at TailOffset. Note that the decoration (if present) is not considered part
// of the text.
unsigned getContentStartColumn(unsigned LineIndex, unsigned TailOffset) const;
// Contains the text of the lines of the block comment, excluding the leading
// /* in the first line and trailing */ in the last line, and excluding all
// trailing whitespace between the lines. Note that the decoration (if
// present) is also not considered part of the text.
SmallVector<StringRef, 16> Lines;
// LeadingWhitespace[i] is the number of characters regarded as whitespace in
// front of Lines[i]. Note that this can include "* " sequences, which we
// regard as whitespace when all lines have a "*" prefix.
SmallVector<unsigned, 16> LeadingWhitespace;
// StartOfLineColumn[i] is the target column at which Line[i] should be.
// Note that this excludes a leading "* " or "*" in case all lines have
// a "*" prefix.
// The first line's target column is always positive. The remaining lines'
// target columns are relative to the first line to allow correct indentation
// of comments in \c WhitespaceManager. Thus they can be negative as well (in
// case the first line needs to be unindented more than there's actual
// whitespace in another line).
SmallVector<int, 16> StartOfLineColumn;
// The column at which the text of a broken line should start.
// Note that an optional decoration would go before that column.
// IndentAtLineBreak is a uniform position for all lines in a block comment,
// regardless of their relative position.
// FIXME: Revisit the decision to do this; the main reason was to support
// patterns like
// /**************//**
// * Comment
// We could also support such patterns by special casing the first line
// instead.
unsigned IndentAtLineBreak;
// This is to distinguish between the case when the last line was empty and
// the case when it started with a decoration ("*" or "* ").
bool LastLineNeedsDecoration;
// Either "* " if all lines begin with a "*", or empty.
StringRef Decoration;
};
} // namespace format
} // namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/ContinuationIndenter.cpp | //===--- ContinuationIndenter.cpp - Format C++ code -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements the continuation indenter.
///
//===----------------------------------------------------------------------===//
#include "BreakableToken.h"
#include "ContinuationIndenter.h"
#include "WhitespaceManager.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/Support/Debug.h"
#include <string>
#define DEBUG_TYPE "format-formatter"
namespace clang {
namespace format {
// Returns the length of everything up to the first possible line break after
// the ), ], } or > matching \c Tok.
static unsigned getLengthToMatchingParen(const FormatToken &Tok) {
if (!Tok.MatchingParen)
return 0;
FormatToken *End = Tok.MatchingParen;
while (End->Next && !End->Next->CanBreakBefore) {
End = End->Next;
}
return End->TotalLength - Tok.TotalLength + 1;
}
// Returns \c true if \c Tok is the "." or "->" of a call and starts the next
// segment of a builder type call.
static bool startsSegmentOfBuilderTypeCall(const FormatToken &Tok) {
return Tok.isMemberAccess() && Tok.Previous && Tok.Previous->closesScope();
}
// Returns \c true if \c Current starts a new parameter.
static bool startsNextParameter(const FormatToken &Current,
const FormatStyle &Style) {
const FormatToken &Previous = *Current.Previous;
if (Current.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializersBeforeComma)
return true;
return Previous.is(tok::comma) && !Current.isTrailingComment() &&
(Previous.isNot(TT_CtorInitializerComma) ||
!Style.BreakConstructorInitializersBeforeComma);
}
ContinuationIndenter::ContinuationIndenter(const FormatStyle &Style,
const AdditionalKeywords &Keywords,
SourceManager &SourceMgr,
WhitespaceManager &Whitespaces,
encoding::Encoding Encoding,
bool BinPackInconclusiveFunctions)
: Style(Style), Keywords(Keywords), SourceMgr(SourceMgr),
Whitespaces(Whitespaces), Encoding(Encoding),
BinPackInconclusiveFunctions(BinPackInconclusiveFunctions),
CommentPragmasRegex(Style.CommentPragmas) {}
LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
const AnnotatedLine *Line,
bool DryRun) {
LineState State;
State.FirstIndent = FirstIndent;
State.Column = FirstIndent;
State.Line = Line;
State.NextToken = Line->First;
State.Stack.push_back(ParenState(FirstIndent, Line->Level, FirstIndent,
/*AvoidBinPacking=*/false,
/*NoLineBreak=*/false));
State.LineContainsContinuedForLoopSection = false;
State.StartOfStringLiteral = 0;
State.StartOfLineLevel = 0;
State.LowestLevelOnLine = 0;
State.IgnoreStackForComparison = false;
// The first token has already been indented and thus consumed.
moveStateToNextToken(State, DryRun, /*Newline=*/false);
return State;
}
bool ContinuationIndenter::canBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
assert(&Previous == Current.Previous);
if (!Current.CanBreakBefore &&
!(State.Stack.back().BreakBeforeClosingBrace &&
Current.closesBlockTypeList(Style)))
return false;
// The opening "{" of a braced list has to be on the same line as the first
// element if it is nested in another braced init list or function call.
if (!Current.MustBreakBefore && Previous.is(tok::l_brace) &&
Previous.isNot(TT_DictLiteral) && Previous.BlockKind == BK_BracedInit &&
Previous.Previous &&
Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma))
return false;
// This prevents breaks like:
// ...
// SomeParameter, OtherParameter).DoSomething(
// ...
// As they hide "DoSomething" and are generally bad for readability.
if (Previous.opensScope() && Previous.isNot(tok::l_brace) &&
State.LowestLevelOnLine < State.StartOfLineLevel &&
State.LowestLevelOnLine < Current.NestingLevel)
return false;
if (Current.isMemberAccess() && State.Stack.back().ContainsUnwrappedBuilder)
return false;
// Don't create a 'hanging' indent if there are multiple blocks in a single
// statement.
if (Previous.is(tok::l_brace) && State.Stack.size() > 1 &&
State.Stack[State.Stack.size() - 2].NestedBlockInlined &&
State.Stack[State.Stack.size() - 2].HasMultipleNestedBlocks)
return false;
// Don't break after very short return types (e.g. "void") as that is often
// unexpected.
if (Current.is(TT_FunctionDeclarationName) &&
Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_None &&
State.Column < 6)
return false;
return !State.Stack.back().NoLineBreak;
}
bool ContinuationIndenter::mustBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
return true;
if (State.Stack.back().BreakBeforeClosingBrace &&
Current.closesBlockTypeList(Style))
return true;
if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
return true;
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) &&
Previous.isNot(tok::question)) ||
(!Style.BreakBeforeTernaryOperators &&
Previous.is(TT_ConditionalExpr))) &&
State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() &&
!Current.isOneOf(tok::r_paren, tok::r_brace))
return true;
if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
Previous.is(TT_ArrayInitializerLSquare)) &&
Style.ColumnLimit > 0 &&
getLengthToMatchingParen(Previous) + State.Column - 1 >
getColumnLimit(State))
return true;
if (Current.is(TT_CtorInitializerColon) &&
((Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All) ||
Style.BreakConstructorInitializersBeforeComma || Style.ColumnLimit != 0))
return true;
if (Current.is(TT_SelectorName) && State.Stack.back().ObjCSelectorNameFound &&
State.Stack.back().BreakBeforeParameter)
return true;
unsigned NewLineColumn = getNewLineColumn(State);
if (State.Column < NewLineColumn)
return false;
if (Style.AlwaysBreakBeforeMultilineStrings &&
(NewLineColumn == State.FirstIndent + Style.ContinuationIndentWidth ||
Previous.is(tok::comma) || Current.NestingLevel < 2) &&
!Previous.isOneOf(tok::kw_return, tok::lessless, tok::at) &&
!Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
nextIsMultilineString(State))
return true;
// Using CanBreakBefore here and below takes care of the decision whether the
// current style uses wrapping before or after operators for the given
// operator.
if (Previous.is(TT_BinaryOperator) && Current.CanBreakBefore) {
// If we need to break somewhere inside the LHS of a binary expression, we
// should also break after the operator. Otherwise, the formatting would
// hide the operator precedence, e.g. in:
// if (aaaaaaaaaaaaaa ==
// bbbbbbbbbbbbbb && c) {..
// For comparisons, we only apply this rule, if the LHS is a binary
// expression itself as otherwise, the line breaks seem superfluous.
// We need special cases for ">>" which we have split into two ">" while
// lexing in order to make template parsing easier.
bool IsComparison = (Previous.getPrecedence() == prec::Relational ||
Previous.getPrecedence() == prec::Equality) &&
Previous.Previous &&
Previous.Previous->isNot(TT_BinaryOperator); // For >>.
bool LHSIsBinaryExpr =
Previous.Previous && Previous.Previous->EndsBinaryExpression;
if ((!IsComparison || LHSIsBinaryExpr) && !Current.isTrailingComment() &&
Previous.getPrecedence() != prec::Assignment &&
State.Stack.back().BreakBeforeParameter)
return true;
} else if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore &&
State.Stack.back().BreakBeforeParameter) {
return true;
}
// Same as above, but for the first "<<" operator.
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator) &&
State.Stack.back().BreakBeforeParameter &&
State.Stack.back().FirstLessLess == 0)
return true;
if (Current.NestingLevel == 0 && !Current.isTrailingComment()) {
// Always break after "template <...>" and leading annotations. This is only
// for cases where the entire line does not fit on a single line as a
// different LineFormatter would be used otherwise.
if (Previous.ClosesTemplateDeclaration)
return true;
if (Previous.is(TT_FunctionAnnotationRParen))
return true;
if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) &&
Current.isNot(TT_LeadingJavaAnnotation))
return true;
}
// If the return type spans multiple lines, wrap before the function name.
if (Current.isOneOf(TT_FunctionDeclarationName, tok::kw_operator) &&
State.Stack.back().BreakBeforeParameter)
return true;
if (startsSegmentOfBuilderTypeCall(Current) &&
(State.Stack.back().CallContinuation != 0 ||
State.Stack.back().BreakBeforeParameter))
return true;
// The following could be precomputed as they do not depend on the state.
// However, as they should take effect only if the UnwrappedLine does not fit
// into the ColumnLimit, they are checked here in the ContinuationIndenter.
if (Style.ColumnLimit != 0 && Previous.BlockKind == BK_Block &&
Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment))
return true;
if (Current.is(tok::lessless) && Previous.is(tok::identifier) &&
Previous.TokenText == "endl")
return true;
return false;
}
unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
bool DryRun,
unsigned ExtraSpaces) {
const FormatToken &Current = *State.NextToken;
assert(!State.Stack.empty());
if ((Current.is(TT_ImplicitStringLiteral) &&
(Current.Previous->Tok.getIdentifierInfo() == nullptr ||
Current.Previous->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_not_keyword))) {
unsigned EndColumn =
SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getEnd());
if (Current.LastNewlineOffset != 0) {
// If there is a newline within this token, the final column will solely
// determined by the current end column.
State.Column = EndColumn;
} else {
unsigned StartColumn =
SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getBegin());
assert(EndColumn >= StartColumn);
State.Column += EndColumn - StartColumn;
}
moveStateToNextToken(State, DryRun, /*Newline=*/false);
return 0;
}
unsigned Penalty = 0;
if (Newline)
Penalty = addTokenOnNewLine(State, DryRun);
else
addTokenOnCurrentLine(State, DryRun, ExtraSpaces);
return moveStateToNextToken(State, DryRun, Newline) + Penalty;
}
void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
unsigned ExtraSpaces) {
FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *State.NextToken->Previous;
if (Current.is(tok::equal) &&
(State.Line->First->is(tok::kw_for) || Current.NestingLevel == 0) &&
State.Stack.back().VariablePos == 0) {
State.Stack.back().VariablePos = State.Column;
// Move over * and & if they are bound to the variable name.
const FormatToken *Tok = &Previous;
while (Tok && State.Stack.back().VariablePos >= Tok->ColumnWidth) {
State.Stack.back().VariablePos -= Tok->ColumnWidth;
if (Tok->SpacesRequiredBefore != 0)
break;
Tok = Tok->Previous;
}
if (Previous.PartOfMultiVariableDeclStmt)
State.Stack.back().LastSpace = State.Stack.back().VariablePos;
}
unsigned Spaces = Current.SpacesRequiredBefore + ExtraSpaces;
if (!DryRun)
Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, /*IndentLevel=*/0,
Spaces, State.Column + Spaces);
if (Current.is(TT_SelectorName) &&
!State.Stack.back().ObjCSelectorNameFound) {
if (Current.LongestObjCSelectorName == 0)
State.Stack.back().AlignColons = false;
else if (State.Stack.back().Indent + Current.LongestObjCSelectorName >
State.Column + Spaces + Current.ColumnWidth)
State.Stack.back().ColonPos =
std::max(State.FirstIndent + Style.ContinuationIndentWidth,
State.Stack.back().Indent) +
Current.LongestObjCSelectorName;
else
State.Stack.back().ColonPos = State.Column + Spaces + Current.ColumnWidth;
}
if (Style.AlignAfterOpenBracket && Previous.opensScope() &&
Previous.isNot(TT_ObjCMethodExpr) &&
(Current.isNot(TT_LineComment) || Previous.BlockKind == BK_BracedInit))
State.Stack.back().Indent = State.Column + Spaces;
if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
State.Stack.back().NoLineBreak = true;
if (startsSegmentOfBuilderTypeCall(Current) &&
State.Column > getNewLineColumn(State))
State.Stack.back().ContainsUnwrappedBuilder = true;
if (Current.is(TT_LambdaArrow) && Style.Language == FormatStyle::LK_Java)
State.Stack.back().NoLineBreak = true;
if (Current.isMemberAccess() && Previous.is(tok::r_paren) &&
(Previous.MatchingParen &&
(Previous.TotalLength - Previous.MatchingParen->TotalLength > 10))) {
// If there is a function call with long parameters, break before trailing
// calls. This prevents things like:
// EXPECT_CALL(SomeLongParameter).Times(
// 2);
// We don't want to do this for short parameters as they can just be
// indexes.
State.Stack.back().NoLineBreak = true;
}
State.Column += Spaces;
if (Current.isNot(tok::comment) && Previous.is(tok::l_paren) &&
Previous.Previous &&
Previous.Previous->isOneOf(tok::kw_if, tok::kw_for)) {
// Treat the condition inside an if as if it was a second function
// parameter, i.e. let nested calls have a continuation indent.
State.Stack.back().LastSpace = State.Column;
State.Stack.back().NestedBlockIndent = State.Column;
} else if (!Current.isOneOf(tok::comment, tok::caret) &&
(Previous.is(tok::comma) ||
(Previous.is(tok::colon) && Previous.is(TT_ObjCMethodExpr)))) {
State.Stack.back().LastSpace = State.Column;
} else if ((Previous.isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
TT_CtorInitializerColon)) &&
((Previous.getPrecedence() != prec::Assignment &&
(Previous.isNot(tok::lessless) || Previous.OperatorIndex != 0 ||
!Previous.LastOperator)) ||
Current.StartsBinaryExpression)) {
// Always indent relative to the RHS of the expression unless this is a
// simple assignment without binary expression on the RHS. Also indent
// relative to unary operators and the colons of constructor initializers.
State.Stack.back().LastSpace = State.Column;
} else if (Previous.is(TT_InheritanceColon)) {
State.Stack.back().Indent = State.Column;
State.Stack.back().LastSpace = State.Column;
} else if (Previous.opensScope()) {
// If a function has a trailing call, indent all parameters from the
// opening parenthesis. This avoids confusing indents like:
// OuterFunction(InnerFunctionCall( // break
// ParameterToInnerFunction)) // break
// .SecondInnerFunctionCall();
bool HasTrailingCall = false;
if (Previous.MatchingParen) {
const FormatToken *Next = Previous.MatchingParen->getNextNonComment();
HasTrailingCall = Next && Next->isMemberAccess();
}
if (HasTrailingCall && State.Stack.size() > 1 &&
State.Stack[State.Stack.size() - 2].CallContinuation == 0)
State.Stack.back().LastSpace = State.Column;
}
}
unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool DryRun) {
FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *State.NextToken->Previous;
// Extra penalty that needs to be added because of the way certain line
// breaks are chosen.
unsigned Penalty = 0;
const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
const FormatToken *NextNonComment = Previous.getNextNonComment();
if (!NextNonComment)
NextNonComment = &Current;
// The first line break on any NestingLevel causes an extra penalty in order
// prefer similar line breaks.
if (!State.Stack.back().ContainsLineBreak)
Penalty += 15;
State.Stack.back().ContainsLineBreak = true;
Penalty += State.NextToken->SplitPenalty;
// Breaking before the first "<<" is generally not desirable if the LHS is
// short. Also always add the penalty if the LHS is split over mutliple lines
// to avoid unnecessary line breaks that just work around this penalty.
if (NextNonComment->is(tok::lessless) &&
State.Stack.back().FirstLessLess == 0 &&
(State.Column <= Style.ColumnLimit / 3 ||
State.Stack.back().BreakBeforeParameter))
Penalty += Style.PenaltyBreakFirstLessLess;
State.Column = getNewLineColumn(State);
// Indent nested blocks relative to this column, unless in a very specific
// JavaScript special case where:
//
// var loooooong_name =
// function() {
// // code
// }
//
// is common and should be formatted like a free-standing function.
if (Style.Language != FormatStyle::LK_JavaScript ||
Current.NestingLevel != 0 || !PreviousNonComment->is(tok::equal) ||
!Current.is(Keywords.kw_function))
State.Stack.back().NestedBlockIndent = State.Column;
if (NextNonComment->isMemberAccess()) {
if (State.Stack.back().CallContinuation == 0)
State.Stack.back().CallContinuation = State.Column;
} else if (NextNonComment->is(TT_SelectorName)) {
if (!State.Stack.back().ObjCSelectorNameFound) {
if (NextNonComment->LongestObjCSelectorName == 0) {
State.Stack.back().AlignColons = false;
} else {
State.Stack.back().ColonPos =
(Style.IndentWrappedFunctionNames
? std::max(State.Stack.back().Indent,
State.FirstIndent + Style.ContinuationIndentWidth)
: State.Stack.back().Indent) +
NextNonComment->LongestObjCSelectorName;
}
} else if (State.Stack.back().AlignColons &&
State.Stack.back().ColonPos <= NextNonComment->ColumnWidth) {
State.Stack.back().ColonPos = State.Column + NextNonComment->ColumnWidth;
}
} else if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) {
// FIXME: This is hacky, find a better way. The problem is that in an ObjC
// method expression, the block should be aligned to the line starting it,
// e.g.:
// [aaaaaaaaaaaaaaa aaaaaaaaa: \\ break for some reason
// ^(int *i) {
// // ...
// }];
// Thus, we set LastSpace of the next higher NestingLevel, to which we move
// when we consume all of the "}"'s FakeRParens at the "{".
if (State.Stack.size() > 1)
State.Stack[State.Stack.size() - 2].LastSpace =
std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
Style.ContinuationIndentWidth;
}
if ((Previous.isOneOf(tok::comma, tok::semi) &&
!State.Stack.back().AvoidBinPacking) ||
Previous.is(TT_BinaryOperator))
State.Stack.back().BreakBeforeParameter = false;
if (Previous.isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
Current.NestingLevel == 0)
State.Stack.back().BreakBeforeParameter = false;
if (NextNonComment->is(tok::question) ||
(PreviousNonComment && PreviousNonComment->is(tok::question)))
State.Stack.back().BreakBeforeParameter = true;
if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore)
State.Stack.back().BreakBeforeParameter = false;
if (!DryRun) {
unsigned Newlines = std::max(
1u, std::min(Current.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1));
Whitespaces.replaceWhitespace(Current, Newlines,
State.Stack.back().IndentLevel, State.Column,
State.Column, State.Line->InPPDirective);
}
if (!Current.isTrailingComment())
State.Stack.back().LastSpace = State.Column;
State.StartOfLineLevel = Current.NestingLevel;
State.LowestLevelOnLine = Current.NestingLevel;
// Any break on this level means that the parent level has been broken
// and we need to avoid bin packing there.
bool NestedBlockSpecialCase =
Current.is(tok::r_brace) && State.Stack.size() > 1 &&
State.Stack[State.Stack.size() - 2].NestedBlockInlined;
if (!NestedBlockSpecialCase)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
if (PreviousNonComment &&
!PreviousNonComment->isOneOf(tok::comma, tok::semi) &&
(PreviousNonComment->isNot(TT_TemplateCloser) ||
Current.NestingLevel != 0) &&
!PreviousNonComment->isOneOf(
TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
TT_LeadingJavaAnnotation) &&
Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope())
State.Stack.back().BreakBeforeParameter = true;
// If we break after { or the [ of an array initializer, we should also break
// before the corresponding } or ].
if (PreviousNonComment &&
(PreviousNonComment->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)))
State.Stack.back().BreakBeforeClosingBrace = true;
if (State.Stack.back().AvoidBinPacking) {
// If we are breaking after '(', '{', '<', this is not bin packing
// unless AllowAllParametersOfDeclarationOnNextLine is false or this is a
// dict/object literal.
if (!Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
(!Style.AllowAllParametersOfDeclarationOnNextLine &&
State.Line->MustBeDeclaration) ||
Previous.is(TT_DictLiteral))
State.Stack.back().BreakBeforeParameter = true;
}
return Penalty;
}
unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
if (!State.NextToken || !State.NextToken->Previous)
return 0;
FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
// If we are continuing an expression, we want to use the continuation indent.
unsigned ContinuationIndent =
std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
Style.ContinuationIndentWidth;
const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
const FormatToken *NextNonComment = Previous.getNextNonComment();
if (!NextNonComment)
NextNonComment = &Current;
// Java specific bits.
if (Style.Language == FormatStyle::LK_Java &&
Current.isOneOf(Keywords.kw_implements, Keywords.kw_extends))
return std::max(State.Stack.back().LastSpace,
State.Stack.back().Indent + Style.ContinuationIndentWidth);
if (NextNonComment->is(tok::l_brace) && NextNonComment->BlockKind == BK_Block)
return Current.NestingLevel == 0 ? State.FirstIndent
: State.Stack.back().Indent;
if (Current.isOneOf(tok::r_brace, tok::r_square) && State.Stack.size() > 1) {
if (Current.closesBlockTypeList(Style))
return State.Stack[State.Stack.size() - 2].NestedBlockIndent;
if (Current.MatchingParen &&
Current.MatchingParen->BlockKind == BK_BracedInit)
return State.Stack[State.Stack.size() - 2].LastSpace;
return State.FirstIndent;
}
if (Current.is(tok::identifier) && Current.Next &&
Current.Next->is(TT_DictLiteral))
return State.Stack.back().Indent;
if (NextNonComment->isStringLiteral() && State.StartOfStringLiteral != 0)
return State.StartOfStringLiteral;
if (NextNonComment->is(TT_ObjCStringLiteral) &&
State.StartOfStringLiteral != 0)
return State.StartOfStringLiteral - 1;
if (NextNonComment->is(tok::lessless) &&
State.Stack.back().FirstLessLess != 0)
return State.Stack.back().FirstLessLess;
if (NextNonComment->isMemberAccess()) {
if (State.Stack.back().CallContinuation == 0)
return ContinuationIndent;
return State.Stack.back().CallContinuation;
}
if (State.Stack.back().QuestionColumn != 0 &&
((NextNonComment->is(tok::colon) &&
NextNonComment->is(TT_ConditionalExpr)) ||
Previous.is(TT_ConditionalExpr)))
return State.Stack.back().QuestionColumn;
if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
return State.Stack.back().VariablePos;
if ((PreviousNonComment &&
(PreviousNonComment->ClosesTemplateDeclaration ||
PreviousNonComment->isOneOf(
TT_AttributeParen, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName)))
return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent);
if (NextNonComment->is(TT_SelectorName)) {
if (!State.Stack.back().ObjCSelectorNameFound) {
if (NextNonComment->LongestObjCSelectorName == 0)
return State.Stack.back().Indent;
return (Style.IndentWrappedFunctionNames
? std::max(State.Stack.back().Indent,
State.FirstIndent + Style.ContinuationIndentWidth)
: State.Stack.back().Indent) +
NextNonComment->LongestObjCSelectorName -
NextNonComment->ColumnWidth;
}
if (!State.Stack.back().AlignColons)
return State.Stack.back().Indent;
if (State.Stack.back().ColonPos > NextNonComment->ColumnWidth)
return State.Stack.back().ColonPos - NextNonComment->ColumnWidth;
return State.Stack.back().Indent;
}
if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
if (State.Stack.back().StartOfArraySubscripts != 0)
return State.Stack.back().StartOfArraySubscripts;
return ContinuationIndent;
}
// This ensure that we correctly format ObjC methods calls without inputs,
// i.e. where the last element isn't selector like: [callee method];
if (NextNonComment->is(tok::identifier) && NextNonComment->FakeRParens == 0 &&
NextNonComment->Next && NextNonComment->Next->is(TT_ObjCMethodExpr))
return State.Stack.back().Indent;
if (NextNonComment->isOneOf(TT_StartOfName, TT_PointerOrReference) ||
Previous.isOneOf(tok::coloncolon, tok::equal, TT_JsTypeColon))
return ContinuationIndent;
if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))
return ContinuationIndent;
if (NextNonComment->is(TT_CtorInitializerColon))
return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
if (NextNonComment->is(TT_CtorInitializerComma))
return State.Stack.back().Indent;
if (Previous.is(tok::r_paren) && !Current.isBinaryOperator() &&
!Current.isOneOf(tok::colon, tok::comment))
return ContinuationIndent;
if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
PreviousNonComment->isNot(tok::r_brace))
// Ensure that we fall back to the continuation indent width instead of
// just flushing continuations left.
return State.Stack.back().Indent + Style.ContinuationIndentWidth;
return State.Stack.back().Indent;
}
unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
bool DryRun, bool Newline) {
assert(State.Stack.size());
const FormatToken &Current = *State.NextToken;
if (Current.is(TT_InheritanceColon))
State.Stack.back().AvoidBinPacking = true;
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator)) {
if (State.Stack.back().FirstLessLess == 0)
State.Stack.back().FirstLessLess = State.Column;
else
State.Stack.back().LastOperatorWrapped = Newline;
}
if ((Current.is(TT_BinaryOperator) && Current.isNot(tok::lessless)) ||
Current.is(TT_ConditionalExpr))
State.Stack.back().LastOperatorWrapped = Newline;
if (Current.is(TT_ArraySubscriptLSquare) &&
State.Stack.back().StartOfArraySubscripts == 0)
State.Stack.back().StartOfArraySubscripts = State.Column;
if ((Current.is(tok::question) && Style.BreakBeforeTernaryOperators) ||
(Current.getPreviousNonComment() && Current.isNot(tok::colon) &&
Current.getPreviousNonComment()->is(tok::question) &&
!Style.BreakBeforeTernaryOperators))
State.Stack.back().QuestionColumn = State.Column;
if (!Current.opensScope() && !Current.closesScope())
State.LowestLevelOnLine =
std::min(State.LowestLevelOnLine, Current.NestingLevel);
if (Current.isMemberAccess())
State.Stack.back().StartOfFunctionCall =
Current.LastOperator ? 0 : State.Column;
if (Current.is(TT_SelectorName))
State.Stack.back().ObjCSelectorNameFound = true;
if (Current.is(TT_CtorInitializerColon)) {
// Indent 2 from the column, so:
// SomeClass::SomeClass()
// : First(...), ...
// Next(...)
// ^ line up here.
State.Stack.back().Indent =
State.Column + (Style.BreakConstructorInitializersBeforeComma ? 0 : 2);
State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
if (Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
State.Stack.back().AvoidBinPacking = true;
State.Stack.back().BreakBeforeParameter = false;
}
if (Current.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && Newline)
State.Stack.back().NestedBlockIndent =
State.Column + Current.ColumnWidth + 1;
// Insert scopes created by fake parenthesis.
const FormatToken *Previous = Current.getPreviousNonComment();
// Add special behavior to support a format commonly used for JavaScript
// closures:
// SomeFunction(function() {
// foo();
// bar();
// }, a, b, c);
if (Current.isNot(tok::comment) && Previous &&
Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
State.Stack.size() > 1) {
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].NoLineBreak = true;
State.Stack[State.Stack.size() - 2].NestedBlockInlined = false;
}
if (Previous && (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) ||
Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) &&
!Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
State.Stack.back().NestedBlockInlined =
!Newline &&
(Previous->isNot(tok::l_paren) || Previous->ParameterCount > 1);
}
moveStatePastFakeLParens(State, Newline);
moveStatePastScopeOpener(State, Newline);
moveStatePastScopeCloser(State);
moveStatePastFakeRParens(State);
if (Current.isStringLiteral() && State.StartOfStringLiteral == 0)
State.StartOfStringLiteral = State.Column;
if (Current.is(TT_ObjCStringLiteral) && State.StartOfStringLiteral == 0)
State.StartOfStringLiteral = State.Column + 1;
else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
!Current.isStringLiteral())
State.StartOfStringLiteral = 0;
State.Column += Current.ColumnWidth;
State.NextToken = State.NextToken->Next;
unsigned Penalty = breakProtrudingToken(Current, State, DryRun);
if (State.Column > getColumnLimit(State)) {
unsigned ExcessCharacters = State.Column - getColumnLimit(State);
Penalty += Style.PenaltyExcessCharacter * ExcessCharacters;
}
if (Current.Role)
Current.Role->formatFromToken(State, this, DryRun);
// If the previous has a special role, let it consume tokens as appropriate.
// It is necessary to start at the previous token for the only implemented
// role (comma separated list). That way, the decision whether or not to break
// after the "{" is already done and both options are tried and evaluated.
// FIXME: This is ugly, find a better way.
if (Previous && Previous->Role)
Penalty += Previous->Role->formatAfterToken(State, this, DryRun);
return Penalty;
}
void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
bool Newline) {
const FormatToken &Current = *State.NextToken;
const FormatToken *Previous = Current.getPreviousNonComment();
// Don't add extra indentation for the first fake parenthesis after
// 'return', assignments or opening <({[. The indentation for these cases
// is special cased.
bool SkipFirstExtraIndent =
(Previous && (Previous->opensScope() ||
Previous->isOneOf(tok::semi, tok::kw_return) ||
(Previous->getPrecedence() == prec::Assignment &&
Style.AlignOperands) ||
Previous->is(TT_ObjCMethodExpr)));
for (SmallVectorImpl<prec::Level>::const_reverse_iterator
I = Current.FakeLParens.rbegin(),
E = Current.FakeLParens.rend();
I != E; ++I) {
ParenState NewParenState = State.Stack.back();
NewParenState.ContainsLineBreak = false;
// Indent from 'LastSpace' unless these are fake parentheses encapsulating
// a builder type call after 'return' or, if the alignment after opening
// brackets is disabled.
if (!Current.isTrailingComment() &&
(Style.AlignOperands || *I < prec::Assignment) &&
(!Previous || Previous->isNot(tok::kw_return) ||
(Style.Language != FormatStyle::LK_Java && *I > 0)) &&
(Style.AlignAfterOpenBracket || *I != prec::Comma ||
Current.NestingLevel == 0))
NewParenState.Indent =
std::max(std::max(State.Column, NewParenState.Indent),
State.Stack.back().LastSpace);
// Don't allow the RHS of an operator to be split over multiple lines unless
// there is a line-break right after the operator.
// Exclude relational operators, as there, it is always more desirable to
// have the LHS 'left' of the RHS.
if (Previous && Previous->getPrecedence() > prec::Assignment &&
Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
Previous->getPrecedence() != prec::Relational) {
bool BreakBeforeOperator =
Previous->is(tok::lessless) ||
(Previous->is(TT_BinaryOperator) &&
Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None) ||
(Previous->is(TT_ConditionalExpr) &&
Style.BreakBeforeTernaryOperators);
if ((!Newline && !BreakBeforeOperator) ||
(!State.Stack.back().LastOperatorWrapped && BreakBeforeOperator))
NewParenState.NoLineBreak = true;
}
// Do not indent relative to the fake parentheses inserted for "." or "->".
// This is a special case to make the following to statements consistent:
// OuterFunction(InnerFunctionCall( // break
// ParameterToInnerFunction));
// OuterFunction(SomeObject.InnerFunctionCall( // break
// ParameterToInnerFunction));
if (*I > prec::Unknown)
NewParenState.LastSpace = std::max(NewParenState.LastSpace, State.Column);
if (*I != prec::Conditional && !Current.is(TT_UnaryOperator))
NewParenState.StartOfFunctionCall = State.Column;
// Always indent conditional expressions. Never indent expression where
// the 'operator' is ',', ';' or an assignment (i.e. *I <=
// prec::Assignment) as those have different indentation rules. Indent
// other expression, unless the indentation needs to be skipped.
if (*I == prec::Conditional ||
(!SkipFirstExtraIndent && *I > prec::Assignment &&
!Current.isTrailingComment()))
NewParenState.Indent += Style.ContinuationIndentWidth;
if ((Previous && !Previous->opensScope()) || *I > prec::Comma)
NewParenState.BreakBeforeParameter = false;
State.Stack.push_back(NewParenState);
SkipFirstExtraIndent = false;
}
}
void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) {
for (unsigned i = 0, e = State.NextToken->FakeRParens; i != e; ++i) {
unsigned VariablePos = State.Stack.back().VariablePos;
if (State.Stack.size() == 1) {
// Do not pop the last element.
break;
}
State.Stack.pop_back();
State.Stack.back().VariablePos = VariablePos;
}
}
void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
bool Newline) {
const FormatToken &Current = *State.NextToken;
if (!Current.opensScope())
return;
if (Current.MatchingParen && Current.BlockKind == BK_Block) {
moveStateToNewBlock(State);
return;
}
unsigned NewIndent;
unsigned NewIndentLevel = State.Stack.back().IndentLevel;
unsigned LastSpace = State.Stack.back().LastSpace;
bool AvoidBinPacking;
bool BreakBeforeParameter = false;
unsigned NestedBlockIndent = std::max(State.Stack.back().StartOfFunctionCall,
State.Stack.back().NestedBlockIndent);
if (Current.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
if (Current.opensBlockTypeList(Style)) {
NewIndent = State.Stack.back().NestedBlockIndent + Style.IndentWidth;
NewIndent = std::min(State.Column + 2, NewIndent);
++NewIndentLevel;
} else {
NewIndent = State.Stack.back().LastSpace + Style.ContinuationIndentWidth;
}
const FormatToken *NextNoComment = Current.getNextNonComment();
AvoidBinPacking =
Current.isOneOf(TT_ArrayInitializerLSquare, TT_DictLiteral) ||
Style.Language == FormatStyle::LK_Proto || !Style.BinPackArguments ||
(NextNoComment && NextNoComment->is(TT_DesignatedInitializerPeriod));
if (Current.ParameterCount > 1)
NestedBlockIndent = std::max(NestedBlockIndent, State.Column + 1);
} else {
NewIndent = Style.ContinuationIndentWidth +
std::max(State.Stack.back().LastSpace,
State.Stack.back().StartOfFunctionCall);
// Ensure that different different brackets force relative alignment, e.g.:
// void SomeFunction(vector< // break
// int> v);
// FIXME: We likely want to do this for more combinations of brackets.
// Verify that it is wanted for ObjC, too.
if (Current.Tok.getKind() == tok::less &&
Current.ParentBracket == tok::l_paren) {
NewIndent = std::max(NewIndent, State.Stack.back().Indent);
LastSpace = std::max(LastSpace, State.Stack.back().Indent);
}
AvoidBinPacking =
(State.Line->MustBeDeclaration && !Style.BinPackParameters) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
(Style.ExperimentalAutoDetectBinPacking &&
(Current.PackingKind == PPK_OnePerLine ||
(!BinPackInconclusiveFunctions &&
Current.PackingKind == PPK_Inconclusive)));
if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen) {
if (Style.ColumnLimit) {
// If this '[' opens an ObjC call, determine whether all parameters fit
// into one line and put one per line if they don't.
if (getLengthToMatchingParen(Current) + State.Column >
getColumnLimit(State))
BreakBeforeParameter = true;
} else {
// For ColumnLimit = 0, we have to figure out whether there is or has to
// be a line break within this call.
for (const FormatToken *Tok = &Current;
Tok && Tok != Current.MatchingParen; Tok = Tok->Next) {
if (Tok->MustBreakBefore ||
(Tok->CanBreakBefore && Tok->NewlinesBefore > 0)) {
BreakBeforeParameter = true;
break;
}
}
}
}
}
bool NoLineBreak = State.Stack.back().NoLineBreak ||
(Current.is(TT_TemplateOpener) &&
State.Stack.back().ContainsUnwrappedBuilder);
State.Stack.push_back(ParenState(NewIndent, NewIndentLevel, LastSpace,
AvoidBinPacking, NoLineBreak));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
State.Stack.back().HasMultipleNestedBlocks = Current.BlockParameterCount > 1;
}
void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
const FormatToken &Current = *State.NextToken;
if (!Current.closesScope())
return;
// If we encounter a closing ), ], } or >, we can remove a level from our
// stacks.
if (State.Stack.size() > 1 &&
(Current.isOneOf(tok::r_paren, tok::r_square) ||
(Current.is(tok::r_brace) && State.NextToken != State.Line->First) ||
State.NextToken->is(TT_TemplateCloser)))
State.Stack.pop_back();
if (Current.is(tok::r_square)) {
// If this ends the array subscript expr, reset the corresponding value.
const FormatToken *NextNonComment = Current.getNextNonComment();
if (NextNonComment && NextNonComment->isNot(tok::l_square))
State.Stack.back().StartOfArraySubscripts = 0;
}
}
void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
unsigned NestedBlockIndent = State.Stack.back().NestedBlockIndent;
// ObjC block sometimes follow special indentation rules.
unsigned NewIndent =
NestedBlockIndent + (State.NextToken->is(TT_ObjCBlockLBrace)
? Style.ObjCBlockIndentWidth
: Style.IndentWidth);
State.Stack.push_back(ParenState(
NewIndent, /*NewIndentLevel=*/State.Stack.back().IndentLevel + 1,
State.Stack.back().LastSpace, /*AvoidBinPacking=*/true,
State.Stack.back().NoLineBreak));
State.Stack.back().NestedBlockIndent = NestedBlockIndent;
State.Stack.back().BreakBeforeParameter = true;
}
unsigned ContinuationIndenter::addMultilineToken(const FormatToken &Current,
LineState &State) {
// Break before further function parameters on all levels.
for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
unsigned ColumnsUsed = State.Column;
// We can only affect layout of the first and the last line, so the penalty
// for all other lines is constant, and we ignore it.
State.Column = Current.LastLineColumnWidth;
if (ColumnsUsed > getColumnLimit(State))
return Style.PenaltyExcessCharacter * (ColumnsUsed - getColumnLimit(State));
return 0;
}
unsigned ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
LineState &State,
bool DryRun) {
// Don't break multi-line tokens other than block comments. Instead, just
// update the state.
if (Current.isNot(TT_BlockComment) && Current.IsMultiline)
return addMultilineToken(Current, State);
// Don't break implicit string literals or import statements.
if (Current.is(TT_ImplicitStringLiteral) ||
State.Line->Type == LT_ImportStatement)
return 0;
if (!Current.isStringLiteral() && !Current.is(tok::comment))
return 0;
std::unique_ptr<BreakableToken> Token;
unsigned StartColumn = State.Column - Current.ColumnWidth;
unsigned ColumnLimit = getColumnLimit(State);
if (Current.isStringLiteral()) {
// FIXME: String literal breaking is currently disabled for Java and JS, as
// it requires strings to be merged using "+" which we don't support.
if (Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript)
return 0;
// Don't break string literals inside preprocessor directives (except for
// #define directives, as their contents are stored in separate lines and
// are not affected by this check).
// This way we avoid breaking code with line directives and unknown
// preprocessor directives that contain long string literals.
if (State.Line->Type == LT_PreprocessorDirective)
return 0;
// Exempts unterminated string literals from line breaking. The user will
// likely want to terminate the string before any line breaking is done.
if (Current.IsUnterminatedLiteral)
return 0;
StringRef Text = Current.TokenText;
StringRef Prefix;
StringRef Postfix;
bool IsNSStringLiteral = false;
// FIXME: Handle whitespace between '_T', '(', '"..."', and ')'.
// FIXME: Store Prefix and Suffix (or PrefixLength and SuffixLength to
// reduce the overhead) for each FormatToken, which is a string, so that we
// don't run multiple checks here on the hot path.
if (Text.startswith("\"") && Current.Previous &&
Current.Previous->is(tok::at)) {
IsNSStringLiteral = true;
Prefix = "@\"";
}
if ((Text.endswith(Postfix = "\"") &&
(IsNSStringLiteral || Text.startswith(Prefix = "\"") ||
Text.startswith(Prefix = "u\"") || Text.startswith(Prefix = "U\"") ||
Text.startswith(Prefix = "u8\"") ||
Text.startswith(Prefix = "L\""))) ||
(Text.startswith(Prefix = "_T(\"") && Text.endswith(Postfix = "\")"))) {
Token.reset(new BreakableStringLiteral(
Current, State.Line->Level, StartColumn, Prefix, Postfix,
State.Line->InPPDirective, Encoding, Style));
} else {
return 0;
}
} else if (Current.is(TT_BlockComment) && Current.isTrailingComment()) {
if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
return 0;
Token.reset(new BreakableBlockComment(
Current, State.Line->Level, StartColumn, Current.OriginalColumn,
!Current.Previous, State.Line->InPPDirective, Encoding, Style));
} else if (Current.is(TT_LineComment) &&
(Current.Previous == nullptr ||
Current.Previous->isNot(TT_ImplicitStringLiteral))) {
if (CommentPragmasRegex.match(Current.TokenText.substr(2)))
return 0;
Token.reset(new BreakableLineComment(Current, State.Line->Level,
StartColumn, /*InPPDirective=*/false,
Encoding, Style));
// We don't insert backslashes when breaking line comments.
ColumnLimit = Style.ColumnLimit;
} else {
return 0;
}
if (Current.UnbreakableTailLength >= ColumnLimit)
return 0;
unsigned RemainingSpace = ColumnLimit - Current.UnbreakableTailLength;
bool BreakInserted = false;
unsigned Penalty = 0;
unsigned RemainingTokenColumns = 0;
for (unsigned LineIndex = 0, EndIndex = Token->getLineCount();
LineIndex != EndIndex; ++LineIndex) {
if (!DryRun)
Token->replaceWhitespaceBefore(LineIndex, Whitespaces);
unsigned TailOffset = 0;
RemainingTokenColumns =
Token->getLineLengthAfterSplit(LineIndex, TailOffset, StringRef::npos);
while (RemainingTokenColumns > RemainingSpace) {
BreakableToken::Split Split =
Token->getSplit(LineIndex, TailOffset, ColumnLimit);
if (Split.first == StringRef::npos) {
// The last line's penalty is handled in addNextStateToQueue().
if (LineIndex < EndIndex - 1)
Penalty += Style.PenaltyExcessCharacter *
(RemainingTokenColumns - RemainingSpace);
break;
}
assert(Split.first != 0);
unsigned NewRemainingTokenColumns = Token->getLineLengthAfterSplit(
LineIndex, TailOffset + Split.first + Split.second, StringRef::npos);
// We can remove extra whitespace instead of breaking the line.
if (RemainingTokenColumns + 1 - Split.second <= RemainingSpace) {
RemainingTokenColumns = 0;
if (!DryRun)
Token->replaceWhitespace(LineIndex, TailOffset, Split, Whitespaces);
break;
}
// When breaking before a tab character, it may be moved by a few columns,
// but will still be expanded to the next tab stop, so we don't save any
// columns.
if (NewRemainingTokenColumns == RemainingTokenColumns)
break;
assert(NewRemainingTokenColumns < RemainingTokenColumns);
if (!DryRun)
Token->insertBreak(LineIndex, TailOffset, Split, Whitespaces);
Penalty += Current.SplitPenalty;
unsigned ColumnsUsed =
Token->getLineLengthAfterSplit(LineIndex, TailOffset, Split.first);
if (ColumnsUsed > ColumnLimit) {
Penalty += Style.PenaltyExcessCharacter * (ColumnsUsed - ColumnLimit);
}
TailOffset += Split.first + Split.second;
RemainingTokenColumns = NewRemainingTokenColumns;
BreakInserted = true;
}
}
State.Column = RemainingTokenColumns;
if (BreakInserted) {
// If we break the token inside a parameter list, we need to break before
// the next parameter on all levels, so that the next parameter is clearly
// visible. Line comments already introduce a break.
if (Current.isNot(TT_LineComment)) {
for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
}
Penalty += Current.isStringLiteral() ? Style.PenaltyBreakString
: Style.PenaltyBreakComment;
State.Stack.back().LastSpace = StartColumn;
}
return Penalty;
}
unsigned ContinuationIndenter::getColumnLimit(const LineState &State) const {
// In preprocessor directives reserve two chars for trailing " \"
return Style.ColumnLimit - (State.Line->InPPDirective ? 2 : 0);
}
bool ContinuationIndenter::nextIsMultilineString(const LineState &State) {
const FormatToken &Current = *State.NextToken;
if (!Current.isStringLiteral() || Current.is(TT_ImplicitStringLiteral))
return false;
// We never consider raw string literals "multiline" for the purpose of
// AlwaysBreakBeforeMultilineStrings implementation as they are special-cased
// (see TokenAnnotator::mustBreakBefore().
if (Current.TokenText.startswith("R\""))
return false;
if (Current.IsMultiline)
return true;
if (Current.getNextNonComment() &&
Current.getNextNonComment()->isStringLiteral())
return true; // Implicit concatenation.
if (Style.ColumnLimit != 0 &&
State.Column + Current.ColumnWidth + Current.UnbreakableTailLength >
Style.ColumnLimit)
return true; // String will be split.
return false;
}
} // namespace format
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/TokenAnnotator.h | //===--- TokenAnnotator.h - Format C++ code ---------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements a token annotator, i.e. creates
/// \c AnnotatedTokens out of \c FormatTokens with required extra information.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_TOKENANNOTATOR_H
#define LLVM_CLANG_LIB_FORMAT_TOKENANNOTATOR_H
#include "UnwrappedLineParser.h"
#include "clang/Format/Format.h"
#include <string>
namespace clang {
class SourceManager;
namespace format {
enum LineType {
LT_Invalid,
LT_ImportStatement,
LT_ObjCDecl, // An @interface, @implementation, or @protocol line.
LT_ObjCMethodDecl,
LT_ObjCProperty, // An @property line.
LT_Other,
LT_PreprocessorDirective,
LT_VirtualFunctionDecl
};
class AnnotatedLine {
public:
AnnotatedLine(const UnwrappedLine &Line)
: First(Line.Tokens.front().Tok), Level(Line.Level),
InPPDirective(Line.InPPDirective),
MustBeDeclaration(Line.MustBeDeclaration), MightBeFunctionDecl(false),
IsMultiVariableDeclStmt(false), Affected(false),
LeadingEmptyLinesAffected(false), ChildrenAffected(false) {
assert(!Line.Tokens.empty());
// Calculate Next and Previous for all tokens. Note that we must overwrite
// Next and Previous for every token, as previous formatting runs might have
// left them in a different state.
First->Previous = nullptr;
FormatToken *Current = First;
for (std::list<UnwrappedLineNode>::const_iterator I = ++Line.Tokens.begin(),
E = Line.Tokens.end();
I != E; ++I) {
const UnwrappedLineNode &Node = *I;
Current->Next = I->Tok;
I->Tok->Previous = Current;
Current = Current->Next;
Current->Children.clear();
for (const auto &Child : Node.Children) {
Children.push_back(new AnnotatedLine(Child));
Current->Children.push_back(Children.back());
}
}
Last = Current;
Last->Next = nullptr;
}
~AnnotatedLine() {
for (unsigned i = 0, e = Children.size(); i != e; ++i) {
delete Children[i];
}
FormatToken *Current = First;
while (Current) {
Current->Children.clear();
Current->Role.reset();
Current = Current->Next;
}
}
/// \c true if this line starts with the given tokens in order, ignoring
/// comments.
template <typename... Ts> bool startsWith(Ts... Tokens) const {
return startsWith(First, Tokens...);
}
FormatToken *First;
FormatToken *Last;
SmallVector<AnnotatedLine *, 0> Children;
LineType Type;
unsigned Level;
bool InPPDirective;
bool MustBeDeclaration;
bool MightBeFunctionDecl;
bool IsMultiVariableDeclStmt;
/// \c True if this line should be formatted, i.e. intersects directly or
/// indirectly with one of the input ranges.
bool Affected;
/// \c True if the leading empty lines of this line intersect with one of the
/// input ranges.
bool LeadingEmptyLinesAffected;
/// \c True if a one of this line's children intersects with an input range.
bool ChildrenAffected;
private:
// Disallow copying.
AnnotatedLine(const AnnotatedLine &) = delete;
void operator=(const AnnotatedLine &) = delete;
template <typename A, typename... Ts>
bool startsWith(FormatToken *Tok, A K1) const {
while (Tok && Tok->is(tok::comment))
Tok = Tok->Next;
return Tok && Tok->is(K1);
}
template <typename A, typename... Ts>
bool startsWith(FormatToken *Tok, A K1, Ts... Tokens) const {
return startsWith(Tok, K1) && startsWith(Tok->Next, Tokens...);
}
};
/// \brief Determines extra information about the tokens comprising an
/// \c UnwrappedLine.
class TokenAnnotator {
public:
TokenAnnotator(const FormatStyle &Style, const AdditionalKeywords &Keywords)
: Style(Style), Keywords(Keywords) {}
/// \brief Adapts the indent levels of comment lines to the indent of the
/// subsequent line.
// FIXME: Can/should this be done in the UnwrappedLineParser?
void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines);
void annotate(AnnotatedLine &Line);
void calculateFormattingInformation(AnnotatedLine &Line);
private:
/// \brief Calculate the penalty for splitting before \c Tok.
unsigned splitPenalty(const AnnotatedLine &Line, const FormatToken &Tok,
bool InFunctionDecl);
bool spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left,
const FormatToken &Right);
bool spaceRequiredBefore(const AnnotatedLine &Line, const FormatToken &Tok);
bool mustBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
bool canBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
void printDebugInfo(const AnnotatedLine &Line);
void calculateUnbreakableTailLengths(AnnotatedLine &Line);
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
};
} // end namespace format
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/WhitespaceManager.cpp | //===--- WhitespaceManager.cpp - Format C++ code --------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements WhitespaceManager class.
///
//===----------------------------------------------------------------------===//
#include "WhitespaceManager.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace format {
bool WhitespaceManager::Change::IsBeforeInFile::
operator()(const Change &C1, const Change &C2) const {
return SourceMgr.isBeforeInTranslationUnit(
C1.OriginalWhitespaceRange.getBegin(),
C2.OriginalWhitespaceRange.getBegin());
}
WhitespaceManager::Change::Change(
bool CreateReplacement, const SourceRange &OriginalWhitespaceRange,
unsigned IndentLevel, int Spaces, unsigned StartOfTokenColumn,
unsigned NewlinesBefore, StringRef PreviousLinePostfix,
StringRef CurrentLinePrefix, tok::TokenKind Kind, bool ContinuesPPDirective)
: CreateReplacement(CreateReplacement),
OriginalWhitespaceRange(OriginalWhitespaceRange),
StartOfTokenColumn(StartOfTokenColumn), NewlinesBefore(NewlinesBefore),
PreviousLinePostfix(PreviousLinePostfix),
CurrentLinePrefix(CurrentLinePrefix), Kind(Kind),
ContinuesPPDirective(ContinuesPPDirective), IndentLevel(IndentLevel),
Spaces(Spaces), IsTrailingComment(false), TokenLength(0),
PreviousEndOfTokenColumn(0), EscapedNewlineColumn(0),
StartOfBlockComment(nullptr), IndentationOffset(0) {}
void WhitespaceManager::reset() {
Changes.clear();
Replaces.clear();
}
void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
unsigned IndentLevel, unsigned Spaces,
unsigned StartOfTokenColumn,
bool InPPDirective) {
if (Tok.Finalized)
return;
Tok.Decision = (Newlines > 0) ? FD_Break : FD_Continue;
Changes.push_back(Change(true, Tok.WhitespaceRange, IndentLevel, Spaces,
StartOfTokenColumn, Newlines, "", "",
Tok.Tok.getKind(), InPPDirective && !Tok.IsFirst));
}
void WhitespaceManager::addUntouchableToken(const FormatToken &Tok,
bool InPPDirective) {
if (Tok.Finalized)
return;
Changes.push_back(Change(false, Tok.WhitespaceRange, /*IndentLevel=*/0,
/*Spaces=*/0, Tok.OriginalColumn, Tok.NewlinesBefore,
"", "", Tok.Tok.getKind(),
InPPDirective && !Tok.IsFirst));
}
void WhitespaceManager::replaceWhitespaceInToken(
const FormatToken &Tok, unsigned Offset, unsigned ReplaceChars,
StringRef PreviousPostfix, StringRef CurrentPrefix, bool InPPDirective,
unsigned Newlines, unsigned IndentLevel, int Spaces) {
if (Tok.Finalized)
return;
SourceLocation Start = Tok.getStartOfNonWhitespace().getLocWithOffset(Offset);
Changes.push_back(Change(
true, SourceRange(Start, Start.getLocWithOffset(ReplaceChars)),
IndentLevel, Spaces, std::max(0, Spaces), Newlines, PreviousPostfix,
CurrentPrefix,
// If we don't add a newline this change doesn't start a comment. Thus,
// when we align line comments, we don't need to treat this change as one.
// FIXME: We still need to take this change in account to properly
// calculate the new length of the comment and to calculate the changes
// for which to do the alignment when aligning comments.
Tok.is(TT_LineComment) && Newlines > 0 ? tok::comment : tok::unknown,
InPPDirective && !Tok.IsFirst));
}
const tooling::Replacements &WhitespaceManager::generateReplacements() {
if (Changes.empty())
return Replaces;
std::sort(Changes.begin(), Changes.end(), Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
alignConsecutiveAssignments();
alignTrailingComments();
alignEscapedNewlines();
generateChanges();
return Replaces;
}
void WhitespaceManager::calculateLineBreakInformation() {
Changes[0].PreviousEndOfTokenColumn = 0;
for (unsigned i = 1, e = Changes.size(); i != e; ++i) {
unsigned OriginalWhitespaceStart =
SourceMgr.getFileOffset(Changes[i].OriginalWhitespaceRange.getBegin());
unsigned PreviousOriginalWhitespaceEnd = SourceMgr.getFileOffset(
Changes[i - 1].OriginalWhitespaceRange.getEnd());
Changes[i - 1].TokenLength = OriginalWhitespaceStart -
PreviousOriginalWhitespaceEnd +
Changes[i].PreviousLinePostfix.size() +
Changes[i - 1].CurrentLinePrefix.size();
Changes[i].PreviousEndOfTokenColumn =
Changes[i - 1].StartOfTokenColumn + Changes[i - 1].TokenLength;
Changes[i - 1].IsTrailingComment =
(Changes[i].NewlinesBefore > 0 || Changes[i].Kind == tok::eof) &&
Changes[i - 1].Kind == tok::comment;
}
// FIXME: The last token is currently not always an eof token; in those
// cases, setting TokenLength of the last token to 0 is wrong.
Changes.back().TokenLength = 0;
Changes.back().IsTrailingComment = Changes.back().Kind == tok::comment;
const WhitespaceManager::Change *LastBlockComment = nullptr;
for (auto &Change : Changes) {
Change.StartOfBlockComment = nullptr;
Change.IndentationOffset = 0;
if (Change.Kind == tok::comment) {
LastBlockComment = &Change;
} else if (Change.Kind == tok::unknown) {
if ((Change.StartOfBlockComment = LastBlockComment))
Change.IndentationOffset =
Change.StartOfTokenColumn -
Change.StartOfBlockComment->StartOfTokenColumn;
} else {
LastBlockComment = nullptr;
}
}
}
// Walk through all of the changes and find sequences of "=" to align. To do
// so, keep track of the lines and whether or not an "=" was found on align. If
// a "=" is found on a line, extend the current sequence. If the current line
// cannot be part of a sequence, e.g. because there is an empty line before it
// or it contains non-assignments, finalize the previous sequence.
void WhitespaceManager::alignConsecutiveAssignments() {
if (!Style.AlignConsecutiveAssignments)
return;
unsigned MinColumn = 0;
unsigned StartOfSequence = 0;
unsigned EndOfSequence = 0;
bool FoundAssignmentOnLine = false;
bool FoundLeftParenOnLine = false;
unsigned CurrentLine = 0;
auto AlignSequence = [&] {
alignConsecutiveAssignments(StartOfSequence, EndOfSequence, MinColumn);
MinColumn = 0;
StartOfSequence = 0;
EndOfSequence = 0;
};
for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
if (Changes[i].NewlinesBefore != 0) {
CurrentLine += Changes[i].NewlinesBefore;
if (StartOfSequence > 0 &&
(Changes[i].NewlinesBefore > 1 || !FoundAssignmentOnLine)) {
EndOfSequence = i;
AlignSequence();
}
FoundAssignmentOnLine = false;
FoundLeftParenOnLine = false;
}
if ((Changes[i].Kind == tok::equal &&
(FoundAssignmentOnLine || ((Changes[i].NewlinesBefore > 0 ||
Changes[i + 1].NewlinesBefore > 0)))) ||
(!FoundLeftParenOnLine && Changes[i].Kind == tok::r_paren)) {
if (StartOfSequence > 0)
AlignSequence();
} else if (Changes[i].Kind == tok::l_paren) {
FoundLeftParenOnLine = true;
if (!FoundAssignmentOnLine && StartOfSequence > 0)
AlignSequence();
} else if (!FoundAssignmentOnLine && !FoundLeftParenOnLine &&
Changes[i].Kind == tok::equal) {
FoundAssignmentOnLine = true;
EndOfSequence = i;
if (StartOfSequence == 0)
StartOfSequence = i;
unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
MinColumn = std::max(MinColumn, ChangeMinColumn);
}
}
if (StartOfSequence > 0) {
EndOfSequence = Changes.size();
AlignSequence();
}
}
void WhitespaceManager::alignConsecutiveAssignments(unsigned Start,
unsigned End,
unsigned Column) {
bool AlignedAssignment = false;
int PreviousShift = 0;
for (unsigned i = Start; i != End; ++i) {
int Shift = 0;
if (Changes[i].NewlinesBefore > 0)
AlignedAssignment = false;
if (!AlignedAssignment && Changes[i].Kind == tok::equal) {
Shift = Column - Changes[i].StartOfTokenColumn;
AlignedAssignment = true;
PreviousShift = Shift;
}
assert(Shift >= 0);
Changes[i].Spaces += Shift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
Changes[i].StartOfTokenColumn += Shift;
if (AlignedAssignment) {
Changes[i].StartOfTokenColumn += PreviousShift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += PreviousShift;
}
}
}
void WhitespaceManager::alignTrailingComments() {
unsigned MinColumn = 0;
unsigned MaxColumn = UINT_MAX;
unsigned StartOfSequence = 0;
bool BreakBeforeNext = false;
unsigned Newlines = 0;
for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
if (Changes[i].StartOfBlockComment)
continue;
Newlines += Changes[i].NewlinesBefore;
if (!Changes[i].IsTrailingComment)
continue;
unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
unsigned ChangeMaxColumn = Style.ColumnLimit - Changes[i].TokenLength;
if (i + 1 != e && Changes[i + 1].ContinuesPPDirective)
ChangeMaxColumn -= 2;
// If this comment follows an } in column 0, it probably documents the
// closing of a namespace and we don't want to align it.
bool FollowsRBraceInColumn0 = i > 0 && Changes[i].NewlinesBefore == 0 &&
Changes[i - 1].Kind == tok::r_brace &&
Changes[i - 1].StartOfTokenColumn == 0;
bool WasAlignedWithStartOfNextLine = false;
if (Changes[i].NewlinesBefore == 1) { // A comment on its own line.
unsigned CommentColumn = SourceMgr.getSpellingColumnNumber(
Changes[i].OriginalWhitespaceRange.getEnd());
for (unsigned j = i + 1; j != e; ++j) {
if (Changes[j].Kind != tok::comment) { // Skip over comments.
unsigned NextColumn = SourceMgr.getSpellingColumnNumber(
Changes[j].OriginalWhitespaceRange.getEnd());
// The start of the next token was previously aligned with the
// start of this comment.
WasAlignedWithStartOfNextLine =
CommentColumn == NextColumn ||
CommentColumn == NextColumn + Style.IndentWidth;
break;
}
}
}
if (!Style.AlignTrailingComments || FollowsRBraceInColumn0) {
alignTrailingComments(StartOfSequence, i, MinColumn);
MinColumn = ChangeMinColumn;
MaxColumn = ChangeMinColumn;
StartOfSequence = i;
} else if (BreakBeforeNext || Newlines > 1 ||
(ChangeMinColumn > MaxColumn || ChangeMaxColumn < MinColumn) ||
// Break the comment sequence if the previous line did not end
// in a trailing comment.
(Changes[i].NewlinesBefore == 1 && i > 0 &&
!Changes[i - 1].IsTrailingComment) ||
WasAlignedWithStartOfNextLine) {
alignTrailingComments(StartOfSequence, i, MinColumn);
MinColumn = ChangeMinColumn;
MaxColumn = ChangeMaxColumn;
StartOfSequence = i;
} else {
MinColumn = std::max(MinColumn, ChangeMinColumn);
MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
BreakBeforeNext =
(i == 0) || (Changes[i].NewlinesBefore > 1) ||
// Never start a sequence with a comment at the beginning of
// the line.
(Changes[i].NewlinesBefore == 1 && StartOfSequence == i);
Newlines = 0;
}
alignTrailingComments(StartOfSequence, Changes.size(), MinColumn);
}
void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
unsigned Column) {
for (unsigned i = Start; i != End; ++i) {
int Shift = 0;
if (Changes[i].IsTrailingComment) {
Shift = Column - Changes[i].StartOfTokenColumn;
}
if (Changes[i].StartOfBlockComment) {
Shift = Changes[i].IndentationOffset +
Changes[i].StartOfBlockComment->StartOfTokenColumn -
Changes[i].StartOfTokenColumn;
}
assert(Shift >= 0);
Changes[i].Spaces += Shift;
if (i + 1 != End)
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
Changes[i].StartOfTokenColumn += Shift;
}
}
void WhitespaceManager::alignEscapedNewlines() {
unsigned MaxEndOfLine =
Style.AlignEscapedNewlinesLeft ? 0 : Style.ColumnLimit;
unsigned StartOfMacro = 0;
for (unsigned i = 1, e = Changes.size(); i < e; ++i) {
Change &C = Changes[i];
if (C.NewlinesBefore > 0) {
if (C.ContinuesPPDirective) {
MaxEndOfLine = std::max(C.PreviousEndOfTokenColumn + 2, MaxEndOfLine);
} else {
alignEscapedNewlines(StartOfMacro + 1, i, MaxEndOfLine);
MaxEndOfLine = Style.AlignEscapedNewlinesLeft ? 0 : Style.ColumnLimit;
StartOfMacro = i;
}
}
}
alignEscapedNewlines(StartOfMacro + 1, Changes.size(), MaxEndOfLine);
}
void WhitespaceManager::alignEscapedNewlines(unsigned Start, unsigned End,
unsigned Column) {
for (unsigned i = Start; i < End; ++i) {
Change &C = Changes[i];
if (C.NewlinesBefore > 0) {
assert(C.ContinuesPPDirective);
if (C.PreviousEndOfTokenColumn + 1 > Column)
C.EscapedNewlineColumn = 0;
else
C.EscapedNewlineColumn = Column;
}
}
}
void WhitespaceManager::generateChanges() {
for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
const Change &C = Changes[i];
if (i > 0) {
assert(Changes[i - 1].OriginalWhitespaceRange.getBegin() !=
C.OriginalWhitespaceRange.getBegin() &&
"Generating two replacements for the same location");
}
if (C.CreateReplacement) {
std::string ReplacementText = C.PreviousLinePostfix;
if (C.ContinuesPPDirective)
appendNewlineText(ReplacementText, C.NewlinesBefore,
C.PreviousEndOfTokenColumn, C.EscapedNewlineColumn);
else
appendNewlineText(ReplacementText, C.NewlinesBefore);
appendIndentText(ReplacementText, C.IndentLevel, std::max(0, C.Spaces),
C.StartOfTokenColumn - std::max(0, C.Spaces));
ReplacementText.append(C.CurrentLinePrefix);
storeReplacement(C.OriginalWhitespaceRange, ReplacementText);
}
}
}
void WhitespaceManager::storeReplacement(const SourceRange &Range,
StringRef Text) {
unsigned WhitespaceLength = SourceMgr.getFileOffset(Range.getEnd()) -
SourceMgr.getFileOffset(Range.getBegin());
// Don't create a replacement, if it does not change anything.
if (StringRef(SourceMgr.getCharacterData(Range.getBegin()),
WhitespaceLength) == Text)
return;
Replaces.insert(tooling::Replacement(
SourceMgr, CharSourceRange::getCharRange(Range), Text));
}
void WhitespaceManager::appendNewlineText(std::string &Text,
unsigned Newlines) {
for (unsigned i = 0; i < Newlines; ++i)
Text.append(UseCRLF ? "\r\n" : "\n");
}
void WhitespaceManager::appendNewlineText(std::string &Text, unsigned Newlines,
unsigned PreviousEndOfTokenColumn,
unsigned EscapedNewlineColumn) {
if (Newlines > 0) {
unsigned Offset =
std::min<int>(EscapedNewlineColumn - 1, PreviousEndOfTokenColumn);
for (unsigned i = 0; i < Newlines; ++i) {
Text.append(EscapedNewlineColumn - Offset - 1, ' ');
Text.append(UseCRLF ? "\\\r\n" : "\\\n");
Offset = 0;
}
}
}
void WhitespaceManager::appendIndentText(std::string &Text,
unsigned IndentLevel, unsigned Spaces,
unsigned WhitespaceStartColumn) {
switch (Style.UseTab) {
case FormatStyle::UT_Never:
Text.append(Spaces, ' ');
break;
case FormatStyle::UT_Always: {
unsigned FirstTabWidth =
Style.TabWidth - WhitespaceStartColumn % Style.TabWidth;
// Indent with tabs only when there's at least one full tab.
if (FirstTabWidth + Style.TabWidth <= Spaces) {
Spaces -= FirstTabWidth;
Text.append("\t");
}
Text.append(Spaces / Style.TabWidth, '\t');
Text.append(Spaces % Style.TabWidth, ' ');
break;
}
case FormatStyle::UT_ForIndentation:
if (WhitespaceStartColumn == 0) {
unsigned Indentation = IndentLevel * Style.IndentWidth;
// This happens, e.g. when a line in a block comment is indented less than
// the first one.
if (Indentation > Spaces)
Indentation = Spaces;
unsigned Tabs = Indentation / Style.TabWidth;
Text.append(Tabs, '\t');
Spaces -= Tabs * Style.TabWidth;
}
Text.append(Spaces, ' ');
break;
}
}
} // namespace format
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/UnwrappedLineParser.h | //===--- UnwrappedLineParser.h - Format C++ code ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file contains the declaration of the UnwrappedLineParser,
/// which turns a stream of tokens into UnwrappedLines.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
#include "FormatToken.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Format/Format.h"
#include <list>
#include <stack>
namespace clang {
namespace format {
struct UnwrappedLineNode;
/// \brief An unwrapped line is a sequence of \c Token, that we would like to
/// put on a single line if there was no column limit.
///
/// This is used as a main interface between the \c UnwrappedLineParser and the
/// \c UnwrappedLineFormatter. The key property is that changing the formatting
/// within an unwrapped line does not affect any other unwrapped lines.
struct UnwrappedLine {
UnwrappedLine();
// FIXME: Don't use std::list here.
/// \brief The \c Tokens comprising this \c UnwrappedLine.
std::list<UnwrappedLineNode> Tokens;
/// \brief The indent level of the \c UnwrappedLine.
unsigned Level;
/// \brief Whether this \c UnwrappedLine is part of a preprocessor directive.
bool InPPDirective;
bool MustBeDeclaration;
};
class UnwrappedLineConsumer {
public:
virtual ~UnwrappedLineConsumer() {}
virtual void consumeUnwrappedLine(const UnwrappedLine &Line) = 0;
virtual void finishRun() = 0;
};
class FormatTokenSource;
class UnwrappedLineParser {
public:
UnwrappedLineParser(const FormatStyle &Style,
const AdditionalKeywords &Keywords,
ArrayRef<FormatToken *> Tokens,
UnwrappedLineConsumer &Callback);
void parse();
private:
void reset();
void parseFile();
void parseLevel(bool HasOpeningBrace);
void parseBlock(bool MustBeDeclaration, bool AddLevel = true,
bool MunchSemi = true);
void parseChildBlock();
void parsePPDirective();
void parsePPDefine();
void parsePPIf(bool IfDef);
void parsePPElIf();
void parsePPElse();
void parsePPEndIf();
void parsePPUnknown();
void parseStructuralElement();
bool tryToParseBracedList();
bool parseBracedList(bool ContinueOnSemicolons = false);
void parseParens();
void parseSquare();
void parseIfThenElse();
void parseTryCatch();
void parseForOrWhileLoop();
void parseDoWhile();
void parseLabel();
void parseCaseLabel();
void parseSwitch();
void parseNamespace();
void parseNew();
void parseAccessSpecifier();
void parseEnum();
void parseJavaEnumBody();
void parseRecord();
void parseObjCProtocolList();
void parseObjCUntilAtEnd();
void parseObjCInterfaceOrImplementation();
void parseObjCProtocol();
void parseJavaScriptEs6ImportExport();
bool tryToParseLambda();
bool tryToParseLambdaIntroducer();
void tryToParseJSFunction();
void addUnwrappedLine();
bool eof() const;
void nextToken();
void readToken();
void flushComments(bool NewlineBeforeNext);
void pushToken(FormatToken *Tok);
void calculateBraceTypes(bool ExpectClassBody = false);
// Marks a conditional compilation edge (for example, an '#if', '#ifdef',
// '#else' or merge conflict marker). If 'Unreachable' is true, assumes
// this branch either cannot be taken (for example '#if false'), or should
// not be taken in this round.
void conditionalCompilationCondition(bool Unreachable);
void conditionalCompilationStart(bool Unreachable);
void conditionalCompilationAlternative();
void conditionalCompilationEnd();
bool isOnNewLine(const FormatToken &FormatTok);
// FIXME: We are constantly running into bugs where Line.Level is incorrectly
// subtracted from beyond 0. Introduce a method to subtract from Line.Level
// and use that everywhere in the Parser.
std::unique_ptr<UnwrappedLine> Line;
// Comments are sorted into unwrapped lines by whether they are in the same
// line as the previous token, or not. If not, they belong to the next token.
// Since the next token might already be in a new unwrapped line, we need to
// store the comments belonging to that token.
SmallVector<FormatToken *, 1> CommentsBeforeNextToken;
FormatToken *FormatTok;
bool MustBreakBeforeNextToken;
// The parsed lines. Only added to through \c CurrentLines.
SmallVector<UnwrappedLine, 8> Lines;
// Preprocessor directives are parsed out-of-order from other unwrapped lines.
// Thus, we need to keep a list of preprocessor directives to be reported
// after an unwarpped line that has been started was finished.
SmallVector<UnwrappedLine, 4> PreprocessorDirectives;
// New unwrapped lines are added via CurrentLines.
// Usually points to \c &Lines. While parsing a preprocessor directive when
// there is an unfinished previous unwrapped line, will point to
// \c &PreprocessorDirectives.
SmallVectorImpl<UnwrappedLine> *CurrentLines;
// We store for each line whether it must be a declaration depending on
// whether we are in a compound statement or not.
std::vector<bool> DeclarationScopeStack;
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
FormatTokenSource *Tokens;
UnwrappedLineConsumer &Callback;
// FIXME: This is a temporary measure until we have reworked the ownership
// of the format tokens. The goal is to have the actual tokens created and
// owned outside of and handed into the UnwrappedLineParser.
ArrayRef<FormatToken *> AllTokens;
// Represents preprocessor branch type, so we can find matching
// #if/#else/#endif directives.
enum PPBranchKind {
PP_Conditional, // Any #if, #ifdef, #ifndef, #elif, block outside #if 0
PP_Unreachable // #if 0 or a conditional preprocessor block inside #if 0
};
// Keeps a stack of currently active preprocessor branching directives.
SmallVector<PPBranchKind, 16> PPStack;
// The \c UnwrappedLineParser re-parses the code for each combination
// of preprocessor branches that can be taken.
// To that end, we take the same branch (#if, #else, or one of the #elif
// branches) for each nesting level of preprocessor branches.
// \c PPBranchLevel stores the current nesting level of preprocessor
// branches during one pass over the code.
int PPBranchLevel;
// Contains the current branch (#if, #else or one of the #elif branches)
// for each nesting level.
SmallVector<int, 8> PPLevelBranchIndex;
// Contains the maximum number of branches at each nesting level.
SmallVector<int, 8> PPLevelBranchCount;
// Contains the number of branches per nesting level we are currently
// in while parsing a preprocessor branch sequence.
// This is used to update PPLevelBranchCount at the end of a branch
// sequence.
std::stack<int> PPChainBranchIndex;
friend class ScopedLineState;
friend class CompoundStatementIndenter;
};
struct UnwrappedLineNode {
UnwrappedLineNode() : Tok(nullptr) {}
UnwrappedLineNode(FormatToken *Tok) : Tok(Tok) {}
FormatToken *Tok;
SmallVector<UnwrappedLine, 0> Children;
};
inline UnwrappedLine::UnwrappedLine()
: Level(0), InPPDirective(false), MustBeDeclaration(false) {}
} // end namespace format
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/TokenAnnotator.cpp | //===--- TokenAnnotator.cpp - Format C++ code -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements a token annotator, i.e. creates
/// \c AnnotatedTokens out of \c FormatTokens with required extra information.
///
//===----------------------------------------------------------------------===//
#include "TokenAnnotator.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "format-token-annotator"
namespace clang {
namespace format {
namespace {
/// \brief A parser that gathers additional information about tokens.
///
/// The \c TokenAnnotator tries to match parenthesis and square brakets and
/// store a parenthesis levels. It also tries to resolve matching "<" and ">"
/// into template parameter lists.
class AnnotatingParser {
public:
AnnotatingParser(const FormatStyle &Style, AnnotatedLine &Line,
const AdditionalKeywords &Keywords)
: Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false),
Keywords(Keywords) {
Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
resetTokenMetadata(CurrentToken);
}
private:
bool parseAngle() {
if (!CurrentToken)
return false;
FormatToken *Left = CurrentToken->Previous;
Left->ParentBracket = Contexts.back().ContextKind;
ScopedContextCreator ContextCreator(*this, tok::less, 10);
// If this angle is in the context of an expression, we need to be more
// hesitant to detect it as opening template parameters.
bool InExprContext = Contexts.back().IsExpression;
Contexts.back().IsExpression = false;
// If there's a template keyword before the opening angle bracket, this is a
// template parameter, not an argument.
Contexts.back().InTemplateArgument =
Left->Previous && Left->Previous->Tok.isNot(tok::kw_template);
if (Style.Language == FormatStyle::LK_Java &&
CurrentToken->is(tok::question))
next();
while (CurrentToken) {
if (CurrentToken->is(tok::greater)) {
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
CurrentToken->Type = TT_TemplateCloser;
next();
return true;
}
if (CurrentToken->is(tok::question) &&
Style.Language == FormatStyle::LK_Java) {
next();
continue;
}
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) ||
(CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext))
return false;
// If a && or || is found and interpreted as a binary operator, this set
// of angles is likely part of something like "a < b && c > d". If the
// angles are inside an expression, the ||/&& might also be a binary
// operator that was misinterpreted because we are parsing template
// parameters.
// FIXME: This is getting out of hand, write a decent parser.
if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) &&
CurrentToken->Previous->is(TT_BinaryOperator) &&
Contexts[Contexts.size() - 2].IsExpression &&
!Line.startsWith(tok::kw_template))
return false;
updateParameterCount(Left, CurrentToken);
if (!consumeToken())
return false;
}
return false;
}
bool parseParens(bool LookForDecls = false) {
if (!CurrentToken)
return false;
FormatToken *Left = CurrentToken->Previous;
Left->ParentBracket = Contexts.back().ContextKind;
ScopedContextCreator ContextCreator(*this, tok::l_paren, 1);
// FIXME: This is a bit of a hack. Do better.
Contexts.back().ColonIsForRangeExpr =
Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr;
bool StartsObjCMethodExpr = false;
if (CurrentToken->is(tok::caret)) {
// (^ can start a block type.
Left->Type = TT_ObjCBlockLParen;
} else if (FormatToken *MaybeSel = Left->Previous) {
// @selector( starts a selector.
if (MaybeSel->isObjCAtKeyword(tok::objc_selector) && MaybeSel->Previous &&
MaybeSel->Previous->is(tok::at)) {
StartsObjCMethodExpr = true;
}
}
if (Left->Previous &&
(Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_if,
tok::kw_while, tok::l_paren, tok::comma) ||
Left->Previous->is(TT_BinaryOperator))) {
// static_assert, if and while usually contain expressions.
Contexts.back().IsExpression = true;
} else if (Left->Previous && Left->Previous->is(tok::r_square) &&
Left->Previous->MatchingParen &&
Left->Previous->MatchingParen->is(TT_LambdaLSquare)) {
// This is a parameter list of a lambda expression.
Contexts.back().IsExpression = false;
} else if (Line.InPPDirective &&
(!Left->Previous ||
!Left->Previous->isOneOf(tok::identifier,
TT_OverloadedOperator))) {
Contexts.back().IsExpression = true;
} else if (Contexts[Contexts.size() - 2].CaretFound) {
// This is the parameter list of an ObjC block.
Contexts.back().IsExpression = false;
} else if (Left->Previous && Left->Previous->is(tok::kw___attribute)) {
Left->Type = TT_AttributeParen;
} else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) {
// The first argument to a foreach macro is a declaration.
Contexts.back().IsForEachMacro = true;
Contexts.back().IsExpression = false;
} else if (Left->Previous && Left->Previous->MatchingParen &&
Left->Previous->MatchingParen->is(TT_ObjCBlockLParen)) {
Contexts.back().IsExpression = false;
}
if (StartsObjCMethodExpr) {
Contexts.back().ColonIsObjCMethodExpr = true;
Left->Type = TT_ObjCMethodExpr;
}
bool MightBeFunctionType = CurrentToken->is(tok::star);
bool HasMultipleLines = false;
bool HasMultipleParametersOnALine = false;
bool MightBeObjCForRangeLoop =
Left->Previous && Left->Previous->is(tok::kw_for);
while (CurrentToken) {
// LookForDecls is set when "if (" has been seen. Check for
// 'identifier' '*' 'identifier' followed by not '=' -- this
// '*' has to be a binary operator but determineStarAmpUsage() will
// categorize it as an unary operator, so set the right type here.
if (LookForDecls && CurrentToken->Next) {
FormatToken *Prev = CurrentToken->getPreviousNonComment();
if (Prev) {
FormatToken *PrevPrev = Prev->getPreviousNonComment();
FormatToken *Next = CurrentToken->Next;
if (PrevPrev && PrevPrev->is(tok::identifier) &&
Prev->isOneOf(tok::star, tok::amp, tok::ampamp) &&
CurrentToken->is(tok::identifier) && Next->isNot(tok::equal)) {
Prev->Type = TT_BinaryOperator;
LookForDecls = false;
}
}
}
if (CurrentToken->Previous->is(TT_PointerOrReference) &&
CurrentToken->Previous->Previous->isOneOf(tok::l_paren,
tok::coloncolon))
MightBeFunctionType = true;
if (CurrentToken->Previous->is(TT_BinaryOperator))
Contexts.back().IsExpression = true;
if (CurrentToken->is(tok::r_paren)) {
if (MightBeFunctionType && CurrentToken->Next &&
(CurrentToken->Next->is(tok::l_paren) ||
(CurrentToken->Next->is(tok::l_square) &&
!Contexts.back().IsExpression)))
Left->Type = TT_FunctionTypeLParen;
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
if (StartsObjCMethodExpr) {
CurrentToken->Type = TT_ObjCMethodExpr;
if (Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
}
}
if (Left->is(TT_AttributeParen))
CurrentToken->Type = TT_AttributeParen;
if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
CurrentToken->Type = TT_JavaAnnotation;
if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
CurrentToken->Type = TT_LeadingJavaAnnotation;
if (!HasMultipleLines)
Left->PackingKind = PPK_Inconclusive;
else if (HasMultipleParametersOnALine)
Left->PackingKind = PPK_BinPacked;
else
Left->PackingKind = PPK_OnePerLine;
next();
return true;
}
if (CurrentToken->isOneOf(tok::r_square, tok::r_brace))
return false;
if (CurrentToken->is(tok::l_brace))
Left->Type = TT_Unknown; // Not TT_ObjCBlockLParen
if (CurrentToken->is(tok::comma) && CurrentToken->Next &&
!CurrentToken->Next->HasUnescapedNewline &&
!CurrentToken->Next->isTrailingComment())
HasMultipleParametersOnALine = true;
if (CurrentToken->isOneOf(tok::kw_const, tok::kw_auto) ||
CurrentToken->isSimpleTypeSpecifier())
Contexts.back().IsExpression = false;
if (CurrentToken->isOneOf(tok::semi, tok::colon))
MightBeObjCForRangeLoop = false;
if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in))
CurrentToken->Type = TT_ObjCForIn;
// When we discover a 'new', we set CanBeExpression to 'false' in order to
// parse the type correctly. Reset that after a comma.
if (CurrentToken->is(tok::comma))
Contexts.back().CanBeExpression = true;
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
updateParameterCount(Left, Tok);
if (CurrentToken && CurrentToken->HasUnescapedNewline)
HasMultipleLines = true;
}
return false;
}
bool parseSquare() {
if (!CurrentToken)
return false;
// A '[' could be an index subscript (after an identifier or after
// ')' or ']'), it could be the start of an Objective-C method
// expression, or it could the start of an Objective-C array literal.
FormatToken *Left = CurrentToken->Previous;
Left->ParentBracket = Contexts.back().ContextKind;
FormatToken *Parent = Left->getPreviousNonComment();
bool StartsObjCMethodExpr =
Style.Language == FormatStyle::LK_Cpp &&
Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
CurrentToken->isNot(tok::l_brace) &&
(!Parent ||
Parent->isOneOf(tok::colon, tok::l_square, tok::l_paren,
tok::kw_return, tok::kw_throw) ||
Parent->isUnaryOperator() ||
Parent->isOneOf(TT_ObjCForIn, TT_CastRParen) ||
getBinOpPrecedence(Parent->Tok.getKind(), true, true) > prec::Unknown);
bool ColonFound = false;
unsigned BindingIncrease = 1;
if (Left->is(TT_Unknown)) {
if (StartsObjCMethodExpr) {
Left->Type = TT_ObjCMethodExpr;
} else if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->Type = TT_JsComputedPropertyName;
} else if (Parent &&
Parent->isOneOf(tok::at, tok::equal, tok::comma, tok::l_paren,
tok::l_square, tok::question, tok::colon,
tok::kw_return)) {
Left->Type = TT_ArrayInitializerLSquare;
} else {
BindingIncrease = 10;
Left->Type = TT_ArraySubscriptLSquare;
}
}
ScopedContextCreator ContextCreator(*this, tok::l_square, BindingIncrease);
Contexts.back().IsExpression = true;
Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
if (CurrentToken->Next && CurrentToken->Next->is(tok::l_paren) &&
Left->is(TT_ObjCMethodExpr)) {
// An ObjC method call is rarely followed by an open parenthesis.
// FIXME: Do we incorrectly label ":" with this?
StartsObjCMethodExpr = false;
Left->Type = TT_Unknown;
}
if (StartsObjCMethodExpr && CurrentToken->Previous != Left) {
CurrentToken->Type = TT_ObjCMethodExpr;
// determineStarAmpUsage() thinks that '*' '[' is allocating an
// array of pointers, but if '[' starts a selector then '*' is a
// binary operator.
if (Parent && Parent->is(TT_PointerOrReference))
Parent->Type = TT_BinaryOperator;
}
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
if (Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
if (Left->BlockParameterCount > 1)
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName = 0;
}
next();
return true;
}
if (CurrentToken->isOneOf(tok::r_paren, tok::r_brace))
return false;
if (CurrentToken->is(tok::colon)) {
if (Left->is(TT_ArraySubscriptLSquare)) {
Left->Type = TT_ObjCMethodExpr;
StartsObjCMethodExpr = true;
Contexts.back().ColonIsObjCMethodExpr = true;
if (Parent && Parent->is(tok::r_paren))
Parent->Type = TT_CastRParen;
}
ColonFound = true;
}
if (CurrentToken->is(tok::comma) && Left->is(TT_ObjCMethodExpr) &&
!ColonFound)
Left->Type = TT_ArrayInitializerLSquare;
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
updateParameterCount(Left, Tok);
}
return false;
}
bool parseBrace() {
if (CurrentToken) {
FormatToken *Left = CurrentToken->Previous;
Left->ParentBracket = Contexts.back().ContextKind;
if (Contexts.back().CaretFound)
Left->Type = TT_ObjCBlockLBrace;
Contexts.back().CaretFound = false;
ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
Contexts.back().ColonIsDictLiteral = true;
if (Left->BlockKind == BK_BracedInit)
Contexts.back().IsExpression = true;
while (CurrentToken) {
if (CurrentToken->is(tok::r_brace)) {
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
next();
return true;
}
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
return false;
updateParameterCount(Left, CurrentToken);
if (CurrentToken->isOneOf(tok::colon, tok::l_brace)) {
FormatToken *Previous = CurrentToken->getPreviousNonComment();
if ((CurrentToken->is(tok::colon) ||
Style.Language == FormatStyle::LK_Proto) &&
Previous->is(tok::identifier))
Previous->Type = TT_SelectorName;
if (CurrentToken->is(tok::colon) ||
Style.Language == FormatStyle::LK_JavaScript)
Left->Type = TT_DictLiteral;
}
if (!consumeToken())
return false;
}
}
return true;
}
void updateParameterCount(FormatToken *Left, FormatToken *Current) {
if (Current->is(TT_LambdaLSquare) ||
(Current->is(tok::caret) && Current->is(TT_UnaryOperator)) ||
(Style.Language == FormatStyle::LK_JavaScript &&
Current->is(Keywords.kw_function))) {
++Left->BlockParameterCount;
}
if (Current->is(tok::comma)) {
++Left->ParameterCount;
if (!Left->Role)
Left->Role.reset(new CommaSeparatedList(Style));
Left->Role->CommaFound(Current);
} else if (Left->ParameterCount == 0 && Current->isNot(tok::comment)) {
Left->ParameterCount = 1;
}
}
bool parseConditional() {
while (CurrentToken) {
if (CurrentToken->is(tok::colon)) {
CurrentToken->Type = TT_ConditionalExpr;
next();
return true;
}
if (!consumeToken())
return false;
}
return false;
}
bool parseTemplateDeclaration() {
if (CurrentToken && CurrentToken->is(tok::less)) {
CurrentToken->Type = TT_TemplateOpener;
next();
if (!parseAngle())
return false;
if (CurrentToken)
CurrentToken->Previous->ClosesTemplateDeclaration = true;
return true;
}
return false;
}
bool consumeToken() {
FormatToken *Tok = CurrentToken;
next();
switch (Tok->Tok.getKind()) {
case tok::plus:
case tok::minus:
if (!Tok->Previous && Line.MustBeDeclaration)
Tok->Type = TT_ObjCMethodSpecifier;
break;
case tok::colon:
if (!Tok->Previous)
return false;
// Colons from ?: are handled in parseConditional().
if (Style.Language == FormatStyle::LK_JavaScript) {
if (Contexts.back().ColonIsForRangeExpr || // colon in for loop
(Contexts.size() == 1 && // switch/case labels
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) ||
Contexts.back().ContextKind == tok::l_paren || // function params
Contexts.back().ContextKind == tok::l_square || // array type
(Contexts.size() == 1 &&
Line.MustBeDeclaration)) { // method/property declaration
Tok->Type = TT_JsTypeColon;
break;
}
}
if (Contexts.back().ColonIsDictLiteral) {
Tok->Type = TT_DictLiteral;
} else if (Contexts.back().ColonIsObjCMethodExpr ||
Line.startsWith(TT_ObjCMethodSpecifier)) {
Tok->Type = TT_ObjCMethodExpr;
Tok->Previous->Type = TT_SelectorName;
if (Tok->Previous->ColumnWidth >
Contexts.back().LongestObjCSelectorName) {
Contexts.back().LongestObjCSelectorName = Tok->Previous->ColumnWidth;
}
if (!Contexts.back().FirstObjCSelectorName)
Contexts.back().FirstObjCSelectorName = Tok->Previous;
} else if (Contexts.back().ColonIsForRangeExpr) {
Tok->Type = TT_RangeBasedForLoopColon;
} else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) {
Tok->Type = TT_BitFieldColon;
} else if (Contexts.size() == 1 &&
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) {
if (Tok->Previous->is(tok::r_paren))
Tok->Type = TT_CtorInitializerColon;
else
Tok->Type = TT_InheritanceColon;
} else if (Tok->Previous->is(tok::identifier) && Tok->Next &&
Tok->Next->isOneOf(tok::r_paren, tok::comma)) {
// This handles a special macro in ObjC code where selectors including
// the colon are passed as macro arguments.
Tok->Type = TT_ObjCMethodExpr;
} else if (Contexts.back().ContextKind == tok::l_paren) {
Tok->Type = TT_InlineASMColon;
}
break;
case tok::kw_if:
case tok::kw_while:
if (CurrentToken && CurrentToken->is(tok::l_paren)) {
next();
if (!parseParens(/*LookForDecls=*/true))
return false;
}
break;
case tok::kw_for:
Contexts.back().ColonIsForRangeExpr = true;
next();
if (!parseParens())
return false;
break;
case tok::l_paren:
if (!parseParens())
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
!Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
(!Tok->Previous ||
!Tok->Previous->isOneOf(tok::kw_decltype, tok::kw___attribute,
TT_LeadingJavaAnnotation)))
Line.MightBeFunctionDecl = true;
break;
case tok::l_square:
if (!parseSquare())
return false;
break;
case tok::l_brace:
if (!parseBrace())
return false;
break;
case tok::less:
if (!NonTemplateLess.count(Tok) &&
(!Tok->Previous ||
(!Tok->Previous->Tok.isLiteral() &&
!(Tok->Previous->is(tok::r_paren) && Contexts.size() > 1))) &&
parseAngle()) {
Tok->Type = TT_TemplateOpener;
} else {
Tok->Type = TT_BinaryOperator;
NonTemplateLess.insert(Tok);
CurrentToken = Tok;
next();
}
break;
case tok::r_paren:
case tok::r_square:
return false;
case tok::r_brace:
// Lines can start with '}'.
if (Tok->Previous)
return false;
break;
case tok::greater:
Tok->Type = TT_BinaryOperator;
break;
case tok::kw_operator:
while (CurrentToken &&
!CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
CurrentToken->Type = TT_PointerOrReference;
consumeToken();
if (CurrentToken && CurrentToken->Previous->is(TT_BinaryOperator))
CurrentToken->Previous->Type = TT_OverloadedOperator;
}
if (CurrentToken) {
CurrentToken->Type = TT_OverloadedOperatorLParen;
if (CurrentToken->Previous->is(TT_BinaryOperator))
CurrentToken->Previous->Type = TT_OverloadedOperator;
}
break;
case tok::question:
if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next &&
Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
tok::r_brace)) {
// Question marks before semicolons, colons, etc. indicate optional
// types (fields, parameters), e.g.
// function(x?: string, y?) {...}
// class X { y?; }
Tok->Type = TT_JsTypeOptionalQuestion;
break;
}
// Declarations cannot be conditional expressions, this can only be part
// of a type declaration.
if (Line.MustBeDeclaration &&
Style.Language == FormatStyle::LK_JavaScript)
break;
parseConditional();
break;
case tok::kw_template:
parseTemplateDeclaration();
break;
case tok::comma:
if (Contexts.back().InCtorInitializer)
Tok->Type = TT_CtorInitializerComma;
else if (Contexts.back().FirstStartOfName &&
(Contexts.size() == 1 || Line.startsWith(tok::kw_for))) {
Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
Line.IsMultiVariableDeclStmt = true;
}
if (Contexts.back().IsForEachMacro)
Contexts.back().IsExpression = true;
break;
default:
break;
}
return true;
}
void parseIncludeDirective() {
if (CurrentToken && CurrentToken->is(tok::less)) {
next();
while (CurrentToken) {
if (CurrentToken->isNot(tok::comment) || CurrentToken->Next)
CurrentToken->Type = TT_ImplicitStringLiteral;
next();
}
}
}
void parseWarningOrError() {
next();
// We still want to format the whitespace left of the first token of the
// warning or error.
next();
while (CurrentToken) {
CurrentToken->Type = TT_ImplicitStringLiteral;
next();
}
}
void parsePragma() {
next(); // Consume "pragma".
if (CurrentToken &&
CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option)) {
bool IsMark = CurrentToken->is(Keywords.kw_mark);
next(); // Consume "mark".
next(); // Consume first token (so we fix leading whitespace).
while (CurrentToken) {
if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator))
CurrentToken->Type = TT_ImplicitStringLiteral;
next();
}
}
}
LineType parsePreprocessorDirective() {
LineType Type = LT_PreprocessorDirective;
next();
if (!CurrentToken)
return Type;
if (CurrentToken->Tok.is(tok::numeric_constant)) {
CurrentToken->SpacesRequiredBefore = 1;
return Type;
}
// Hashes in the middle of a line can lead to any strange token
// sequence.
if (!CurrentToken->Tok.getIdentifierInfo())
return Type;
switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_include:
case tok::pp_include_next:
case tok::pp_import:
next();
parseIncludeDirective();
Type = LT_ImportStatement;
break;
case tok::pp_error:
case tok::pp_warning:
parseWarningOrError();
break;
case tok::pp_pragma:
parsePragma();
break;
case tok::pp_if:
case tok::pp_elif:
Contexts.back().IsExpression = true;
parseLine();
break;
default:
break;
}
while (CurrentToken)
next();
return Type;
}
public:
LineType parseLine() {
NonTemplateLess.clear();
if (CurrentToken->is(tok::hash))
return parsePreprocessorDirective();
// Directly allow to 'import <string-literal>' to support protocol buffer
// definitions (code.google.com/p/protobuf) or missing "#" (either way we
// should not break the line).
IdentifierInfo *Info = CurrentToken->Tok.getIdentifierInfo();
if ((Style.Language == FormatStyle::LK_Java &&
CurrentToken->is(Keywords.kw_package)) ||
(Info && Info->getPPKeywordID() == tok::pp_import &&
CurrentToken->Next &&
CurrentToken->Next->isOneOf(tok::string_literal, tok::identifier,
tok::kw_static))) {
next();
parseIncludeDirective();
return LT_ImportStatement;
}
// If this line starts and ends in '<' and '>', respectively, it is likely
// part of "#define <a/b.h>".
if (CurrentToken->is(tok::less) && Line.Last->is(tok::greater)) {
parseIncludeDirective();
return LT_ImportStatement;
}
// In .proto files, top-level options are very similar to import statements
// and should not be line-wrapped.
if (Style.Language == FormatStyle::LK_Proto && Line.Level == 0 &&
CurrentToken->is(Keywords.kw_option)) {
next();
if (CurrentToken && CurrentToken->is(tok::identifier))
return LT_ImportStatement;
}
bool KeywordVirtualFound = false;
bool ImportStatement = false;
while (CurrentToken) {
if (CurrentToken->is(tok::kw_virtual))
KeywordVirtualFound = true;
if (IsImportStatement(*CurrentToken))
ImportStatement = true;
if (!consumeToken())
return LT_Invalid;
}
if (KeywordVirtualFound)
return LT_VirtualFunctionDecl;
if (ImportStatement)
return LT_ImportStatement;
if (Line.startsWith(TT_ObjCMethodSpecifier)) {
if (Contexts.back().FirstObjCSelectorName)
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
return LT_ObjCMethodDecl;
}
return LT_Other;
}
private:
bool IsImportStatement(const FormatToken &Tok) {
// FIXME: Closure-library specific stuff should not be hard-coded but be
// configurable.
return Style.Language == FormatStyle::LK_JavaScript &&
Tok.TokenText == "goog" && Tok.Next && Tok.Next->is(tok::period) &&
Tok.Next->Next && (Tok.Next->Next->TokenText == "module" ||
Tok.Next->Next->TokenText == "require" ||
Tok.Next->Next->TokenText == "provide") &&
Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren);
}
void resetTokenMetadata(FormatToken *Token) {
if (!Token)
return;
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
if (!CurrentToken->isOneOf(TT_LambdaLSquare, TT_ForEachMacro,
TT_FunctionLBrace, TT_ImplicitStringLiteral,
TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow,
TT_RegexLiteral))
CurrentToken->Type = TT_Unknown;
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
CurrentToken->FakeLParens.clear();
CurrentToken->FakeRParens = 0;
}
void next() {
if (CurrentToken) {
CurrentToken->NestingLevel = Contexts.size() - 1;
CurrentToken->BindingStrength = Contexts.back().BindingStrength;
modifyContext(*CurrentToken);
determineTokenType(*CurrentToken);
CurrentToken = CurrentToken->Next;
}
resetTokenMetadata(CurrentToken);
}
/// \brief A struct to hold information valid in a specific context, e.g.
/// a pair of parenthesis.
struct Context {
Context(tok::TokenKind ContextKind, unsigned BindingStrength,
bool IsExpression)
: ContextKind(ContextKind), BindingStrength(BindingStrength),
IsExpression(IsExpression) {}
tok::TokenKind ContextKind;
unsigned BindingStrength;
bool IsExpression;
unsigned LongestObjCSelectorName = 0;
bool ColonIsForRangeExpr = false;
bool ColonIsDictLiteral = false;
bool ColonIsObjCMethodExpr = false;
FormatToken *FirstObjCSelectorName = nullptr;
FormatToken *FirstStartOfName = nullptr;
bool CanBeExpression = true;
bool InTemplateArgument = false;
bool InCtorInitializer = false;
bool CaretFound = false;
bool IsForEachMacro = false;
};
/// \brief Puts a new \c Context onto the stack \c Contexts for the lifetime
/// of each instance.
struct ScopedContextCreator {
AnnotatingParser &P;
ScopedContextCreator(AnnotatingParser &P, tok::TokenKind ContextKind,
unsigned Increase)
: P(P) {
P.Contexts.push_back(Context(ContextKind,
P.Contexts.back().BindingStrength + Increase,
P.Contexts.back().IsExpression));
}
~ScopedContextCreator() { P.Contexts.pop_back(); }
};
void modifyContext(const FormatToken &Current) {
if (Current.getPrecedence() == prec::Assignment &&
!Line.First->isOneOf(tok::kw_template, tok::kw_using) &&
(!Current.Previous || Current.Previous->isNot(tok::kw_operator))) {
Contexts.back().IsExpression = true;
if (!Line.startsWith(TT_UnaryOperator)) {
for (FormatToken *Previous = Current.Previous;
Previous && !Previous->isOneOf(tok::comma, tok::semi);
Previous = Previous->Previous) {
if (Previous->isOneOf(tok::r_square, tok::r_paren)) {
Previous = Previous->MatchingParen;
if (!Previous)
break;
}
if (Previous->opensScope())
break;
if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) &&
Previous->isOneOf(tok::star, tok::amp, tok::ampamp) &&
Previous->Previous && Previous->Previous->isNot(tok::equal))
Previous->Type = TT_PointerOrReference;
}
}
} else if (Current.is(tok::lessless) &&
(!Current.Previous || !Current.Previous->is(tok::kw_operator))) {
Contexts.back().IsExpression = true;
} else if (Current.isOneOf(tok::kw_return, tok::kw_throw)) {
Contexts.back().IsExpression = true;
} else if (Current.is(TT_TrailingReturnArrow)) {
Contexts.back().IsExpression = false;
} else if (Current.is(TT_LambdaArrow)) {
Contexts.back().IsExpression = Style.Language == FormatStyle::LK_Java;
} else if (Current.is(tok::l_paren) && !Line.MustBeDeclaration &&
!Line.InPPDirective &&
(!Current.Previous ||
Current.Previous->isNot(tok::kw_decltype))) {
bool ParametersOfFunctionType =
Current.Previous && Current.Previous->is(tok::r_paren) &&
Current.Previous->MatchingParen &&
Current.Previous->MatchingParen->is(TT_FunctionTypeLParen);
bool IsForOrCatch = Current.Previous &&
Current.Previous->isOneOf(tok::kw_for, tok::kw_catch);
Contexts.back().IsExpression = !ParametersOfFunctionType && !IsForOrCatch;
} else if (Current.isOneOf(tok::r_paren, tok::greater, tok::comma)) {
for (FormatToken *Previous = Current.Previous;
Previous && Previous->isOneOf(tok::star, tok::amp);
Previous = Previous->Previous)
Previous->Type = TT_PointerOrReference;
if (Line.MustBeDeclaration)
Contexts.back().IsExpression = Contexts.front().InCtorInitializer;
} else if (Current.Previous &&
Current.Previous->is(TT_CtorInitializerColon)) {
Contexts.back().IsExpression = true;
Contexts.back().InCtorInitializer = true;
} else if (Current.is(tok::kw_new)) {
Contexts.back().CanBeExpression = false;
} else if (Current.isOneOf(tok::semi, tok::exclaim)) {
// This should be the condition or increment in a for-loop.
Contexts.back().IsExpression = true;
}
}
void determineTokenType(FormatToken &Current) {
if (!Current.is(TT_Unknown))
// The token type is already known.
return;
// Line.MightBeFunctionDecl can only be true after the parentheses of a
// function declaration have been found. In this case, 'Current' is a
// trailing token of this declaration and thus cannot be a name.
if (Current.is(Keywords.kw_instanceof)) {
Current.Type = TT_BinaryOperator;
} else if (isStartOfName(Current) &&
(!Line.MightBeFunctionDecl || Current.NestingLevel != 0)) {
Contexts.back().FirstStartOfName = &Current;
Current.Type = TT_StartOfName;
} else if (Current.is(tok::kw_auto)) {
AutoFound = true;
} else if (Current.is(tok::arrow) &&
Style.Language == FormatStyle::LK_Java) {
Current.Type = TT_LambdaArrow;
} else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
Current.NestingLevel == 0) {
Current.Type = TT_TrailingReturnArrow;
} else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
Current.Type =
determineStarAmpUsage(Current, Contexts.back().CanBeExpression &&
Contexts.back().IsExpression,
Contexts.back().InTemplateArgument);
} else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
Current.Type = determinePlusMinusCaretUsage(Current);
if (Current.is(TT_UnaryOperator) && Current.is(tok::caret))
Contexts.back().CaretFound = true;
} else if (Current.isOneOf(tok::minusminus, tok::plusplus)) {
Current.Type = determineIncrementUsage(Current);
} else if (Current.isOneOf(tok::exclaim, tok::tilde)) {
Current.Type = TT_UnaryOperator;
} else if (Current.is(tok::question)) {
if (Style.Language == FormatStyle::LK_JavaScript &&
Line.MustBeDeclaration) {
// In JavaScript, `interface X { foo?(): bar; }` is an optional method
// on the interface, not a ternary expression.
Current.Type = TT_JsTypeOptionalQuestion;
} else {
Current.Type = TT_ConditionalExpr;
}
} else if (Current.isBinaryOperator() &&
(!Current.Previous || Current.Previous->isNot(tok::l_square))) {
Current.Type = TT_BinaryOperator;
} else if (Current.is(tok::comment)) {
if (Current.TokenText.startswith("/*")) {
if (Current.TokenText.endswith("*/"))
Current.Type = TT_BlockComment;
else
// The lexer has for some reason determined a comment here. But we
// cannot really handle it, if it isn't properly terminated.
Current.Tok.setKind(tok::unknown);
} else {
Current.Type = TT_LineComment;
}
} else if (Current.is(tok::r_paren)) {
if (rParenEndsCast(Current))
Current.Type = TT_CastRParen;
if (Current.MatchingParen && Current.Next &&
!Current.Next->isBinaryOperator() &&
!Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace))
if (FormatToken *BeforeParen = Current.MatchingParen->Previous)
if (BeforeParen->is(tok::identifier) &&
BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
(!BeforeParen->Previous ||
BeforeParen->Previous->ClosesTemplateDeclaration))
Current.Type = TT_FunctionAnnotationRParen;
} else if (Current.is(tok::at) && Current.Next) {
if (Current.Next->isStringLiteral()) {
Current.Type = TT_ObjCStringLiteral;
} else {
switch (Current.Next->Tok.getObjCKeywordID()) {
case tok::objc_interface:
case tok::objc_implementation:
case tok::objc_protocol:
Current.Type = TT_ObjCDecl;
break;
case tok::objc_property:
Current.Type = TT_ObjCProperty;
break;
default:
break;
}
}
} else if (Current.is(tok::period)) {
FormatToken *PreviousNoComment = Current.getPreviousNonComment();
if (PreviousNoComment &&
PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
Current.Type = TT_DesignatedInitializerPeriod;
else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
Current.Previous->isOneOf(TT_JavaAnnotation,
TT_LeadingJavaAnnotation)) {
Current.Type = Current.Previous->Type;
}
} else if (Current.isOneOf(tok::identifier, tok::kw_const) &&
Current.Previous &&
!Current.Previous->isOneOf(tok::equal, tok::at) &&
Line.MightBeFunctionDecl && Contexts.size() == 1) {
// Line.MightBeFunctionDecl can only be true after the parentheses of a
// function declaration have been found.
Current.Type = TT_TrailingAnnotation;
} else if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
Current.Previous) {
if (Current.Previous->is(tok::at) &&
Current.isNot(Keywords.kw_interface)) {
const FormatToken &AtToken = *Current.Previous;
const FormatToken *Previous = AtToken.getPreviousNonComment();
if (!Previous || Previous->is(TT_LeadingJavaAnnotation))
Current.Type = TT_LeadingJavaAnnotation;
else
Current.Type = TT_JavaAnnotation;
} else if (Current.Previous->is(tok::period) &&
Current.Previous->isOneOf(TT_JavaAnnotation,
TT_LeadingJavaAnnotation)) {
Current.Type = Current.Previous->Type;
}
}
}
/// \brief Take a guess at whether \p Tok starts a name of a function or
/// variable declaration.
///
/// This is a heuristic based on whether \p Tok is an identifier following
/// something that is likely a type.
bool isStartOfName(const FormatToken &Tok) {
if (Tok.isNot(tok::identifier) || !Tok.Previous)
return false;
if (Tok.Previous->isOneOf(TT_LeadingJavaAnnotation, Keywords.kw_instanceof))
return false;
// Skip "const" as it does not have an influence on whether this is a name.
FormatToken *PreviousNotConst = Tok.Previous;
while (PreviousNotConst && PreviousNotConst->is(tok::kw_const))
PreviousNotConst = PreviousNotConst->Previous;
if (!PreviousNotConst)
return false;
bool IsPPKeyword = PreviousNotConst->is(tok::identifier) &&
PreviousNotConst->Previous &&
PreviousNotConst->Previous->is(tok::hash);
if (PreviousNotConst->is(TT_TemplateCloser))
return PreviousNotConst && PreviousNotConst->MatchingParen &&
PreviousNotConst->MatchingParen->Previous &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::period) &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::kw_template);
if (PreviousNotConst->is(tok::r_paren) && PreviousNotConst->MatchingParen &&
PreviousNotConst->MatchingParen->Previous &&
PreviousNotConst->MatchingParen->Previous->is(tok::kw_decltype))
return true;
return (!IsPPKeyword && PreviousNotConst->is(tok::identifier)) ||
PreviousNotConst->is(TT_PointerOrReference) ||
PreviousNotConst->isSimpleTypeSpecifier();
}
/// \brief Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
FormatToken *LeftOfParens = nullptr;
if (Tok.MatchingParen)
LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
if (LeftOfParens && LeftOfParens->is(tok::r_paren) &&
LeftOfParens->MatchingParen)
LeftOfParens = LeftOfParens->MatchingParen->Previous;
if (LeftOfParens && LeftOfParens->is(tok::r_square) &&
LeftOfParens->MatchingParen &&
LeftOfParens->MatchingParen->is(TT_LambdaLSquare))
return false;
if (Tok.Next) {
if (Tok.Next->is(tok::question))
return false;
if (Style.Language == FormatStyle::LK_JavaScript &&
Tok.Next->is(Keywords.kw_in))
return false;
if (Style.Language == FormatStyle::LK_Java && Tok.Next->is(tok::l_paren))
return true;
}
bool IsCast = false;
bool ParensAreEmpty = Tok.Previous == Tok.MatchingParen;
bool ParensAreType =
!Tok.Previous ||
Tok.Previous->isOneOf(TT_PointerOrReference, TT_TemplateCloser) ||
Tok.Previous->isSimpleTypeSpecifier();
bool ParensCouldEndDecl =
Tok.Next && Tok.Next->isOneOf(tok::equal, tok::semi, tok::l_brace);
bool IsSizeOfOrAlignOf =
LeftOfParens && LeftOfParens->isOneOf(tok::kw_sizeof, tok::kw_alignof);
if (ParensAreType && !ParensCouldEndDecl && !IsSizeOfOrAlignOf &&
(Contexts.size() > 1 && Contexts[Contexts.size() - 2].IsExpression))
IsCast = true;
else if (Tok.Next && Tok.Next->isNot(tok::string_literal) &&
(Tok.Next->Tok.isLiteral() ||
Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof)))
IsCast = true;
// If there is an identifier after the (), it is likely a cast, unless
// there is also an identifier before the ().
else if (LeftOfParens && Tok.Next &&
(LeftOfParens->Tok.getIdentifierInfo() == nullptr ||
LeftOfParens->isOneOf(tok::kw_return, tok::kw_case)) &&
!LeftOfParens->isOneOf(TT_OverloadedOperator, tok::at,
TT_TemplateCloser)) {
if (Tok.Next->isOneOf(tok::identifier, tok::numeric_constant)) {
IsCast = true;
} else {
// Use heuristics to recognize c style casting.
FormatToken *Prev = Tok.Previous;
if (Prev && Prev->isOneOf(tok::amp, tok::star))
Prev = Prev->Previous;
if (Prev && Tok.Next && Tok.Next->Next) {
bool NextIsUnary = Tok.Next->isUnaryOperator() ||
Tok.Next->isOneOf(tok::amp, tok::star);
IsCast =
NextIsUnary && !Tok.Next->is(tok::plus) &&
Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant);
}
for (; Prev != Tok.MatchingParen; Prev = Prev->Previous) {
if (!Prev ||
!Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon)) {
IsCast = false;
break;
}
}
}
}
return IsCast && !ParensAreEmpty;
}
/// \brief Return the type of the given token assuming it is * or &.
TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
bool InTemplateArgument) {
if (Style.Language == FormatStyle::LK_JavaScript)
return TT_BinaryOperator;
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken)
return TT_UnaryOperator;
const FormatToken *NextToken = Tok.getNextNonComment();
if (!NextToken || NextToken->is(tok::arrow) ||
(NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
return TT_Unknown;
if (PrevToken->is(tok::coloncolon))
return TT_PointerOrReference;
if (PrevToken->isOneOf(tok::l_paren, tok::l_square, tok::l_brace,
tok::comma, tok::semi, tok::kw_return, tok::colon,
tok::equal, tok::kw_delete, tok::kw_sizeof) ||
PrevToken->isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
TT_UnaryOperator, TT_CastRParen))
return TT_UnaryOperator;
if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare))
return TT_PointerOrReference;
if (NextToken->isOneOf(tok::kw_operator, tok::comma, tok::semi))
return TT_PointerOrReference;
if (PrevToken->is(tok::r_paren) && PrevToken->MatchingParen &&
PrevToken->MatchingParen->Previous &&
PrevToken->MatchingParen->Previous->isOneOf(tok::kw_typeof,
tok::kw_decltype))
return TT_PointerOrReference;
if (PrevToken->Tok.isLiteral() ||
PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true,
tok::kw_false, tok::r_brace) ||
NextToken->Tok.isLiteral() ||
NextToken->isOneOf(tok::kw_true, tok::kw_false) ||
NextToken->isUnaryOperator() ||
// If we know we're in a template argument, there are no named
// declarations. Thus, having an identifier on the right-hand side
// indicates a binary operator.
(InTemplateArgument && NextToken->Tok.isAnyIdentifier()))
return TT_BinaryOperator;
// "&&(" is quite unlikely to be two successive unary "&".
if (Tok.is(tok::ampamp) && NextToken && NextToken->is(tok::l_paren))
return TT_BinaryOperator;
// This catches some cases where evaluation order is used as control flow:
// aaa && aaa->f();
const FormatToken *NextNextToken = NextToken->getNextNonComment();
if (NextNextToken && NextNextToken->is(tok::arrow))
return TT_BinaryOperator;
// It is very unlikely that we are going to find a pointer or reference type
// definition on the RHS of an assignment.
if (IsExpression && !Contexts.back().CaretFound)
return TT_BinaryOperator;
return TT_PointerOrReference;
}
TokenType determinePlusMinusCaretUsage(const FormatToken &Tok) {
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken || PrevToken->is(TT_CastRParen))
return TT_UnaryOperator;
// Use heuristics to recognize unary operators.
if (PrevToken->isOneOf(tok::equal, tok::l_paren, tok::comma, tok::l_square,
tok::question, tok::colon, tok::kw_return,
tok::kw_case, tok::at, tok::l_brace))
return TT_UnaryOperator;
// There can't be two consecutive binary operators.
if (PrevToken->is(TT_BinaryOperator))
return TT_UnaryOperator;
// Fall back to marking the token as binary operator.
return TT_BinaryOperator;
}
/// \brief Determine whether ++/-- are pre- or post-increments/-decrements.
TokenType determineIncrementUsage(const FormatToken &Tok) {
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken || PrevToken->is(TT_CastRParen))
return TT_UnaryOperator;
if (PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::identifier))
return TT_TrailingUnaryOperator;
return TT_UnaryOperator;
}
SmallVector<Context, 8> Contexts;
const FormatStyle &Style;
AnnotatedLine &Line;
FormatToken *CurrentToken;
bool AutoFound;
const AdditionalKeywords &Keywords;
// Set of "<" tokens that do not open a template parameter list. If parseAngle
// determines that a specific token can't be a template opener, it will make
// same decision irrespective of the decisions for tokens leading up to it.
// Store this information to prevent this from causing exponential runtime.
llvm::SmallPtrSet<FormatToken *, 16> NonTemplateLess;
};
static const int PrecedenceUnaryOperator = prec::PointerToMember + 1;
static const int PrecedenceArrowAndPeriod = prec::PointerToMember + 2;
/// \brief Parses binary expressions by inserting fake parenthesis based on
/// operator precedence.
class ExpressionParser {
public:
ExpressionParser(const FormatStyle &Style, const AdditionalKeywords &Keywords,
AnnotatedLine &Line)
: Style(Style), Keywords(Keywords), Current(Line.First) {}
/// \brief Parse expressions with the given operatore precedence.
void parse(int Precedence = 0) {
// Skip 'return' and ObjC selector colons as they are not part of a binary
// expression.
while (Current && (Current->is(tok::kw_return) ||
(Current->is(tok::colon) &&
Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))))
next();
if (!Current || Precedence > PrecedenceArrowAndPeriod)
return;
// Conditional expressions need to be parsed separately for proper nesting.
if (Precedence == prec::Conditional) {
parseConditionalExpr();
return;
}
// Parse unary operators, which all have a higher precedence than binary
// operators.
if (Precedence == PrecedenceUnaryOperator) {
parseUnaryOperator();
return;
}
FormatToken *Start = Current;
FormatToken *LatestOperator = nullptr;
unsigned OperatorIndex = 0;
while (Current) {
// Consume operators with higher precedence.
parse(Precedence + 1);
int CurrentPrecedence = getCurrentPrecedence();
if (Current && Current->is(TT_SelectorName) &&
Precedence == CurrentPrecedence) {
if (LatestOperator)
addFakeParenthesis(Start, prec::Level(Precedence));
Start = Current;
}
// At the end of the line or when an operator with higher precedence is
// found, insert fake parenthesis and return.
if (!Current || (Current->closesScope() && Current->MatchingParen) ||
(CurrentPrecedence != -1 && CurrentPrecedence < Precedence) ||
(CurrentPrecedence == prec::Conditional &&
Precedence == prec::Assignment && Current->is(tok::colon))) {
break;
}
// Consume scopes: (), [], <> and {}
if (Current->opensScope()) {
while (Current && !Current->closesScope()) {
next();
parse();
}
next();
} else {
// Operator found.
if (CurrentPrecedence == Precedence) {
LatestOperator = Current;
Current->OperatorIndex = OperatorIndex;
++OperatorIndex;
}
next(/*SkipPastLeadingComments=*/Precedence > 0);
}
}
if (LatestOperator && (Current || Precedence > 0)) {
LatestOperator->LastOperator = true;
if (Precedence == PrecedenceArrowAndPeriod) {
// Call expressions don't have a binary operator precedence.
addFakeParenthesis(Start, prec::Unknown);
} else {
addFakeParenthesis(Start, prec::Level(Precedence));
}
}
}
private:
/// \brief Gets the precedence (+1) of the given token for binary operators
/// and other tokens that we treat like binary operators.
int getCurrentPrecedence() {
if (Current) {
const FormatToken *NextNonComment = Current->getNextNonComment();
if (Current->is(TT_ConditionalExpr))
return prec::Conditional;
if (NextNonComment && NextNonComment->is(tok::colon) &&
NextNonComment->is(TT_DictLiteral))
return prec::Comma;
if (Current->is(TT_LambdaArrow))
return prec::Comma;
if (Current->is(TT_JsFatArrow))
return prec::Assignment;
if (Current->isOneOf(tok::semi, TT_InlineASMColon, TT_SelectorName,
TT_JsComputedPropertyName) ||
(Current->is(tok::comment) && NextNonComment &&
NextNonComment->is(TT_SelectorName)))
return 0;
if (Current->is(TT_RangeBasedForLoopColon))
return prec::Comma;
if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
Current->is(Keywords.kw_instanceof))
return prec::Relational;
if (Current->is(TT_BinaryOperator) || Current->is(tok::comma))
return Current->getPrecedence();
if (Current->isOneOf(tok::period, tok::arrow))
return PrecedenceArrowAndPeriod;
if (Style.Language == FormatStyle::LK_Java &&
Current->isOneOf(Keywords.kw_extends, Keywords.kw_implements,
Keywords.kw_throws))
return 0;
}
return -1;
}
void addFakeParenthesis(FormatToken *Start, prec::Level Precedence) {
Start->FakeLParens.push_back(Precedence);
if (Precedence > prec::Unknown)
Start->StartsBinaryExpression = true;
if (Current) {
FormatToken *Previous = Current->Previous;
while (Previous->is(tok::comment) && Previous->Previous)
Previous = Previous->Previous;
++Previous->FakeRParens;
if (Precedence > prec::Unknown)
Previous->EndsBinaryExpression = true;
}
}
/// \brief Parse unary operator expressions and surround them with fake
/// parentheses if appropriate.
void parseUnaryOperator() {
if (!Current || Current->isNot(TT_UnaryOperator)) {
parse(PrecedenceArrowAndPeriod);
return;
}
FormatToken *Start = Current;
next();
parseUnaryOperator();
// The actual precedence doesn't matter.
addFakeParenthesis(Start, prec::Unknown);
}
void parseConditionalExpr() {
while (Current && Current->isTrailingComment()) {
next();
}
FormatToken *Start = Current;
parse(prec::LogicalOr);
if (!Current || !Current->is(tok::question))
return;
next();
parse(prec::Assignment);
if (!Current || Current->isNot(TT_ConditionalExpr))
return;
next();
parse(prec::Assignment);
addFakeParenthesis(Start, prec::Conditional);
}
void next(bool SkipPastLeadingComments = true) {
if (Current)
Current = Current->Next;
while (Current &&
(Current->NewlinesBefore == 0 || SkipPastLeadingComments) &&
Current->isTrailingComment())
Current = Current->Next;
}
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
FormatToken *Current;
};
} // end anonymous namespace
void TokenAnnotator::setCommentLineLevels(
SmallVectorImpl<AnnotatedLine *> &Lines) {
const AnnotatedLine *NextNonCommentLine = nullptr;
for (SmallVectorImpl<AnnotatedLine *>::reverse_iterator I = Lines.rbegin(),
E = Lines.rend();
I != E; ++I) {
if (NextNonCommentLine && (*I)->First->is(tok::comment) &&
(*I)->First->Next == nullptr)
(*I)->Level = NextNonCommentLine->Level;
else
NextNonCommentLine = (*I)->First->isNot(tok::r_brace) ? (*I) : nullptr;
setCommentLineLevels((*I)->Children);
}
}
void TokenAnnotator::annotate(AnnotatedLine &Line) {
for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
E = Line.Children.end();
I != E; ++I) {
annotate(**I);
}
AnnotatingParser Parser(Style, Line, Keywords);
Line.Type = Parser.parseLine();
if (Line.Type == LT_Invalid)
return;
ExpressionParser ExprParser(Style, Keywords, Line);
ExprParser.parse();
if (Line.startsWith(TT_ObjCMethodSpecifier))
Line.Type = LT_ObjCMethodDecl;
else if (Line.startsWith(TT_ObjCDecl))
Line.Type = LT_ObjCDecl;
else if (Line.startsWith(TT_ObjCProperty))
Line.Type = LT_ObjCProperty;
Line.First->SpacesRequiredBefore = 1;
Line.First->CanBreakBefore = Line.First->MustBreakBefore;
}
// This function heuristically determines whether 'Current' starts the name of a
// function declaration.
static bool isFunctionDeclarationName(const FormatToken &Current) {
if (!Current.is(TT_StartOfName) || Current.NestingLevel != 0)
return false;
const FormatToken *Next = Current.Next;
for (; Next; Next = Next->Next) {
if (Next->is(TT_TemplateOpener)) {
Next = Next->MatchingParen;
} else if (Next->is(tok::coloncolon)) {
Next = Next->Next;
if (!Next || !Next->is(tok::identifier))
return false;
} else if (Next->is(tok::l_paren)) {
break;
} else {
return false;
}
}
if (!Next)
return false;
assert(Next->is(tok::l_paren));
if (Next->Next == Next->MatchingParen)
return true;
for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
Tok = Tok->Next) {
if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() ||
Tok->isOneOf(TT_PointerOrReference, TT_StartOfName))
return true;
if (Tok->isOneOf(tok::l_brace, tok::string_literal, TT_ObjCMethodExpr) ||
Tok->Tok.isLiteral())
return false;
}
return false;
}
void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
E = Line.Children.end();
I != E; ++I) {
calculateFormattingInformation(**I);
}
Line.First->TotalLength =
Line.First->IsMultiline ? Style.ColumnLimit : Line.First->ColumnWidth;
if (!Line.First->Next)
return;
FormatToken *Current = Line.First->Next;
bool InFunctionDecl = Line.MightBeFunctionDecl;
while (Current) {
if (isFunctionDeclarationName(*Current))
Current->Type = TT_FunctionDeclarationName;
if (Current->is(TT_LineComment)) {
if (Current->Previous->BlockKind == BK_BracedInit &&
Current->Previous->opensScope())
Current->SpacesRequiredBefore = Style.Cpp11BracedListStyle ? 0 : 1;
else
Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments;
// If we find a trailing comment, iterate backwards to determine whether
// it seems to relate to a specific parameter. If so, break before that
// parameter to avoid changing the comment's meaning. E.g. don't move 'b'
// to the previous line in:
// SomeFunction(a,
// b, // comment
// c);
if (!Current->HasUnescapedNewline) {
for (FormatToken *Parameter = Current->Previous; Parameter;
Parameter = Parameter->Previous) {
if (Parameter->isOneOf(tok::comment, tok::r_brace))
break;
if (Parameter->Previous && Parameter->Previous->is(tok::comma)) {
if (!Parameter->Previous->is(TT_CtorInitializerComma) &&
Parameter->HasUnescapedNewline)
Parameter->MustBreakBefore = true;
break;
}
}
}
} else if (Current->SpacesRequiredBefore == 0 &&
spaceRequiredBefore(Line, *Current)) {
Current->SpacesRequiredBefore = 1;
}
Current->MustBreakBefore =
Current->MustBreakBefore || mustBreakBefore(Line, *Current);
if ((Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_All ||
(Style.AlwaysBreakAfterDefinitionReturnType ==
FormatStyle::DRTBS_TopLevel &&
Line.Level == 0)) &&
InFunctionDecl && Current->is(TT_FunctionDeclarationName) &&
!Line.Last->isOneOf(tok::semi, tok::comment)) // Only for definitions.
// FIXME: Line.Last points to other characters than tok::semi
// and tok::lbrace.
Current->MustBreakBefore = true;
Current->CanBreakBefore =
Current->MustBreakBefore || canBreakBefore(Line, *Current);
unsigned ChildSize = 0;
if (Current->Previous->Children.size() == 1) {
FormatToken &LastOfChild = *Current->Previous->Children[0]->Last;
ChildSize = LastOfChild.isTrailingComment() ? Style.ColumnLimit
: LastOfChild.TotalLength + 1;
}
const FormatToken *Prev = Current->Previous;
if (Current->MustBreakBefore || Prev->Children.size() > 1 ||
(Prev->Children.size() == 1 &&
Prev->Children[0]->First->MustBreakBefore) ||
Current->IsMultiline)
Current->TotalLength = Prev->TotalLength + Style.ColumnLimit;
else
Current->TotalLength = Prev->TotalLength + Current->ColumnWidth +
ChildSize + Current->SpacesRequiredBefore;
if (Current->is(TT_CtorInitializerColon))
InFunctionDecl = false;
// FIXME: Only calculate this if CanBreakBefore is true once static
// initializers etc. are sorted out.
// FIXME: Move magic numbers to a better place.
Current->SplitPenalty = 20 * Current->BindingStrength +
splitPenalty(Line, *Current, InFunctionDecl);
Current = Current->Next;
}
calculateUnbreakableTailLengths(Line);
for (Current = Line.First; Current != nullptr; Current = Current->Next) {
if (Current->Role)
Current->Role->precomputeFormattingInfos(Current);
}
DEBUG({ printDebugInfo(Line); });
}
void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
unsigned UnbreakableTailLength = 0;
FormatToken *Current = Line.Last;
while (Current) {
Current->UnbreakableTailLength = UnbreakableTailLength;
if (Current->CanBreakBefore ||
Current->isOneOf(tok::comment, tok::string_literal)) {
UnbreakableTailLength = 0;
} else {
UnbreakableTailLength +=
Current->ColumnWidth + Current->SpacesRequiredBefore;
}
Current = Current->Previous;
}
}
unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
const FormatToken &Tok,
bool InFunctionDecl) {
const FormatToken &Left = *Tok.Previous;
const FormatToken &Right = Tok;
if (Left.is(tok::semi))
return 0;
if (Style.Language == FormatStyle::LK_Java) {
if (Right.isOneOf(Keywords.kw_extends, Keywords.kw_throws))
return 1;
if (Right.is(Keywords.kw_implements))
return 2;
if (Left.is(tok::comma) && Left.NestingLevel == 0)
return 3;
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Right.is(Keywords.kw_function) && Left.isNot(tok::comma))
return 100;
if (Left.is(TT_JsTypeColon))
return 100;
}
if (Left.is(tok::comma) || (Right.is(tok::identifier) && Right.Next &&
Right.Next->is(TT_DictLiteral)))
return 1;
if (Right.is(tok::l_square)) {
if (Style.Language == FormatStyle::LK_Proto)
return 1;
// Slightly prefer formatting local lambda definitions like functions.
if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal))
return 50;
if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare))
return 500;
}
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
Right.is(tok::kw_operator)) {
if (Line.startsWith(tok::kw_for) && Right.PartOfMultiVariableDeclStmt)
return 3;
if (Left.is(TT_StartOfName))
return 110;
if (InFunctionDecl && Right.NestingLevel == 0)
return Style.PenaltyReturnTypeOnItsOwnLine;
return 200;
}
if (Right.is(TT_PointerOrReference))
return 190;
if (Right.is(TT_LambdaArrow))
return 110;
if (Left.is(tok::equal) && Right.is(tok::l_brace))
return 150;
if (Left.is(TT_CastRParen))
return 100;
if (Left.is(tok::coloncolon) ||
(Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
return 500;
if (Left.isOneOf(tok::kw_class, tok::kw_struct))
return 5000;
if (Left.isOneOf(TT_RangeBasedForLoopColon, TT_InheritanceColon))
return 2;
if (Right.isMemberAccess()) {
if (Left.is(tok::r_paren) && Left.MatchingParen &&
Left.MatchingParen->ParameterCount > 0)
return 20; // Should be smaller than breaking at a nested comma.
return 150;
}
if (Right.is(TT_TrailingAnnotation) &&
(!Right.Next || Right.Next->isNot(tok::l_paren))) {
// Moving trailing annotations to the next line is fine for ObjC method
// declarations.
if (Line.startsWith(TT_ObjCMethodSpecifier))
return 10;
// Generally, breaking before a trailing annotation is bad unless it is
// function-like. It seems to be especially preferable to keep standard
// annotations (i.e. "const", "final" and "override") on the same line.
// Use a slightly higher penalty after ")" so that annotations like
// "const override" are kept together.
bool is_short_annotation = Right.TokenText.size() < 10;
return (Left.is(tok::r_paren) ? 100 : 120) + (is_short_annotation ? 50 : 0);
}
// In for-loops, prefer breaking at ',' and ';'.
if (Line.startsWith(tok::kw_for) && Left.is(tok::equal))
return 4;
// In Objective-C method expressions, prefer breaking before "param:" over
// breaking after it.
if (Right.is(TT_SelectorName))
return 0;
if (Left.is(tok::colon) && Left.is(TT_ObjCMethodExpr))
return Line.MightBeFunctionDecl ? 50 : 500;
if (Left.is(tok::l_paren) && InFunctionDecl && Style.AlignAfterOpenBracket)
return 100;
if (Left.is(tok::l_paren) && Left.Previous &&
Left.Previous->isOneOf(tok::kw_if, tok::kw_for))
return 1000;
if (Left.is(tok::equal) && InFunctionDecl)
return 110;
if (Right.is(tok::r_brace))
return 1;
if (Left.is(TT_TemplateOpener))
return 100;
if (Left.opensScope()) {
if (!Style.AlignAfterOpenBracket)
return 0;
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
: 19;
}
if (Left.is(TT_JavaAnnotation))
return 50;
if (Right.is(tok::lessless)) {
if (Left.is(tok::string_literal) &&
(!Right.LastOperator || Right.OperatorIndex != 1)) {
StringRef Content = Left.TokenText;
if (Content.startswith("\""))
Content = Content.drop_front(1);
if (Content.endswith("\""))
Content = Content.drop_back(1);
Content = Content.trim();
if (Content.size() > 1 &&
(Content.back() == ':' || Content.back() == '='))
return 25;
}
return 1; // Breaking at a << is really cheap.
}
if (Left.is(TT_ConditionalExpr))
return prec::Conditional;
prec::Level Level = Left.getPrecedence();
if (Level != prec::Unknown)
return Level;
Level = Right.getPrecedence();
if (Level != prec::Unknown)
return Level;
return 3;
}
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Left,
const FormatToken &Right) {
if (Left.is(tok::kw_return) && Right.isNot(tok::semi))
return true;
if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
Left.Tok.getObjCKeywordID() == tok::objc_property)
return true;
if (Right.is(tok::hashhash))
return Left.is(tok::hash);
if (Left.isOneOf(tok::hashhash, tok::hash))
return Right.is(tok::hash);
if (Left.is(tok::l_paren) && Right.is(tok::r_paren))
return Style.SpaceInEmptyParentheses;
if (Left.is(tok::l_paren) || Right.is(tok::r_paren))
return (Right.is(TT_CastRParen) ||
(Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen)))
? Style.SpacesInCStyleCastParentheses
: Style.SpacesInParentheses;
if (Right.isOneOf(tok::semi, tok::comma))
return false;
if (Right.is(tok::less) &&
(Left.is(tok::kw_template) ||
(Line.Type == LT_ObjCDecl && Style.ObjCSpaceBeforeProtocolList)))
return true;
if (Left.isOneOf(tok::exclaim, tok::tilde))
return false;
if (Left.is(tok::at) &&
Right.isOneOf(tok::identifier, tok::string_literal, tok::char_constant,
tok::numeric_constant, tok::l_paren, tok::l_brace,
tok::kw_true, tok::kw_false))
return false;
if (Left.is(tok::coloncolon))
return false;
if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less))
return false;
if (Right.is(tok::ellipsis))
return Left.Tok.isLiteral();
if (Left.is(tok::l_square) && Right.is(tok::amp))
return false;
if (Right.is(TT_PointerOrReference))
return !(Left.is(tok::r_paren) && Left.MatchingParen &&
(Left.MatchingParen->is(TT_OverloadedOperatorLParen) ||
(Left.MatchingParen->Previous &&
Left.MatchingParen->Previous->is(
TT_FunctionDeclarationName)))) &&
(Left.Tok.isLiteral() ||
(!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
(Style.PointerAlignment != FormatStyle::PAS_Left ||
Line.IsMultiVariableDeclStmt)));
if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) &&
(!Left.is(TT_PointerOrReference) ||
(Style.PointerAlignment != FormatStyle::PAS_Right &&
!Line.IsMultiVariableDeclStmt)))
return true;
if (Left.is(TT_PointerOrReference))
return Right.Tok.isLiteral() || Right.is(TT_BlockComment) ||
(Right.is(tok::l_brace) && Right.BlockKind == BK_Block) ||
(!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
tok::l_paren) &&
(Style.PointerAlignment != FormatStyle::PAS_Right &&
!Line.IsMultiVariableDeclStmt) &&
Left.Previous &&
!Left.Previous->isOneOf(tok::l_paren, tok::coloncolon));
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
if (Left.is(tok::l_square))
return (Left.is(TT_ArrayInitializerLSquare) &&
Style.SpacesInContainerLiterals && Right.isNot(tok::r_square)) ||
(Left.is(TT_ArraySubscriptLSquare) && Style.SpacesInSquareBrackets &&
Right.isNot(tok::r_square));
if (Right.is(tok::r_square))
return Right.MatchingParen &&
((Style.SpacesInContainerLiterals &&
Right.MatchingParen->is(TT_ArrayInitializerLSquare)) ||
(Style.SpacesInSquareBrackets &&
Right.MatchingParen->is(TT_ArraySubscriptLSquare)));
if (Right.is(tok::l_square) &&
!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare) &&
!Left.isOneOf(tok::numeric_constant, TT_DictLiteral))
return false;
if (Left.is(tok::colon))
return !Left.is(TT_ObjCMethodExpr);
if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
return !Left.Children.empty(); // No spaces in "{}".
if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
Right.MatchingParen->BlockKind != BK_Block))
return !Style.Cpp11BracedListStyle;
if (Left.is(TT_BlockComment))
return !Left.TokenText.endswith("=*/");
if (Right.is(tok::l_paren)) {
if (Left.is(tok::r_paren) && Left.is(TT_AttributeParen))
return true;
return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
(Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
(Left.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while,
tok::kw_switch, tok::kw_case, TT_ForEachMacro) ||
(Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch,
tok::kw_new, tok::kw_delete) &&
(!Left.Previous || Left.Previous->isNot(tok::period))))) ||
(Style.SpaceBeforeParens == FormatStyle::SBPO_Always &&
(Left.is(tok::identifier) || Left.isFunctionLikeKeyword() ||
Left.is(tok::r_paren)) &&
Line.Type != LT_PreprocessorDirective);
}
if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword)
return false;
if (Right.is(TT_UnaryOperator))
return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
(Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr));
if ((Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
tok::r_paren) ||
Left.isSimpleTypeSpecifier()) &&
Right.is(tok::l_brace) && Right.getNextNonComment() &&
Right.BlockKind != BK_Block)
return false;
if (Left.is(tok::period) || Right.is(tok::period))
return false;
if (Right.is(tok::hash) && Left.is(tok::identifier) && Left.TokenText == "L")
return false;
if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
Left.MatchingParen->Previous &&
Left.MatchingParen->Previous->is(tok::period))
// A.<B>DoSomething();
return false;
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
return false;
return true;
}
bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
return true; // Never ever merge two identifiers.
if (Style.Language == FormatStyle::LK_Cpp) {
if (Left.is(tok::kw_operator))
return Right.is(tok::coloncolon);
} else if (Style.Language == FormatStyle::LK_Proto) {
if (Right.is(tok::period) &&
Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
Keywords.kw_repeated))
return true;
if (Right.is(tok::l_paren) &&
Left.isOneOf(Keywords.kw_returns, Keywords.kw_option))
return true;
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Left.isOneOf(Keywords.kw_var, TT_JsFatArrow))
return true;
if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion))
return false;
if ((Left.is(tok::l_brace) || Right.is(tok::r_brace)) &&
Line.First->isOneOf(Keywords.kw_import, tok::kw_export))
return false;
if (Left.is(tok::ellipsis))
return false;
if (Left.is(TT_TemplateCloser) &&
!Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square,
Keywords.kw_implements, Keywords.kw_extends))
// Type assertions ('<type>expr') are not followed by whitespace. Other
// locations that should have whitespace following are identified by the
// above set of follower tokens.
return false;
} else if (Style.Language == FormatStyle::LK_Java) {
if (Left.is(tok::r_square) && Right.is(tok::l_brace))
return true;
if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren))
return Style.SpaceBeforeParens != FormatStyle::SBPO_Never;
if ((Left.isOneOf(tok::kw_static, tok::kw_public, tok::kw_private,
tok::kw_protected) ||
Left.isOneOf(Keywords.kw_final, Keywords.kw_abstract,
Keywords.kw_native)) &&
Right.is(TT_TemplateOpener))
return true;
}
if (Left.is(TT_ImplicitStringLiteral))
return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
if (Line.Type == LT_ObjCMethodDecl) {
if (Left.is(TT_ObjCMethodSpecifier))
return true;
if (Left.is(tok::r_paren) && Right.is(tok::identifier))
// Don't space between ')' and <id>
return false;
}
if (Line.Type == LT_ObjCProperty &&
(Right.is(tok::equal) || Left.is(tok::equal)))
return false;
if (Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow) ||
Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow))
return true;
if (Left.is(tok::comma))
return true;
if (Right.is(tok::comma))
return false;
if (Right.isOneOf(TT_CtorInitializerColon, TT_ObjCBlockLParen))
return true;
if (Right.is(TT_OverloadedOperatorLParen))
return false;
if (Right.is(tok::colon)) {
if (Line.First->isOneOf(tok::kw_case, tok::kw_default) ||
!Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
return false;
if (Right.is(TT_ObjCMethodExpr))
return false;
if (Left.is(tok::question))
return false;
if (Right.is(TT_InlineASMColon) && Left.is(tok::coloncolon))
return false;
if (Right.is(TT_DictLiteral))
return Style.SpacesInContainerLiterals;
return true;
}
if (Left.is(TT_UnaryOperator))
return Right.is(TT_BinaryOperator);
// If the next token is a binary operator or a selector name, we have
// incorrectly classified the parenthesis as a cast. FIXME: Detect correctly.
if (Left.is(TT_CastRParen))
return Style.SpaceAfterCStyleCast ||
Right.isOneOf(TT_BinaryOperator, TT_SelectorName);
if (Left.is(tok::greater) && Right.is(tok::greater))
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
(Style.Standard != FormatStyle::LS_Cpp11 || Style.SpacesInAngles);
if (Right.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) ||
Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar))
return false;
if (!Style.SpaceBeforeAssignmentOperators &&
Right.getPrecedence() == prec::Assignment)
return false;
if (Right.is(tok::coloncolon) && Left.isNot(tok::l_brace))
return (Left.is(TT_TemplateOpener) &&
Style.Standard == FormatStyle::LS_Cpp03) ||
!(Left.isOneOf(tok::identifier, tok::l_paren, tok::r_paren) ||
Left.isOneOf(TT_TemplateCloser, TT_TemplateOpener));
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return Style.SpacesInAngles;
if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) ||
Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr))
return true;
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren) &&
Right.isNot(TT_FunctionTypeLParen))
return Style.SpaceBeforeParens == FormatStyle::SBPO_Always;
if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) &&
Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen))
return false;
if (Right.is(tok::less) && Left.isNot(tok::l_paren) &&
Line.startsWith(tok::hash))
return true;
if (Right.is(TT_TrailingUnaryOperator))
return false;
if (Left.is(TT_RegexLiteral))
return false;
return spaceRequiredBetween(Line, Left, Right);
}
// Returns 'true' if 'Tok' is a brace we'd want to break before in Allman style.
static bool isAllmanBrace(const FormatToken &Tok) {
return Tok.is(tok::l_brace) && Tok.BlockKind == BK_Block &&
!Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
}
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
if (Right.NewlinesBefore > 1)
return true;
if (Style.Language == FormatStyle::LK_JavaScript) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::char_constant) && Left.is(tok::plus) && Left.Previous &&
Left.Previous->is(tok::char_constant))
return true;
if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) && Line.Level == 0 &&
Left.Previous && Left.Previous->is(tok::equal) &&
Line.First->isOneOf(tok::identifier, Keywords.kw_import, tok::kw_export,
tok::kw_const) &&
// kw_var is a pseudo-token that's a tok::identifier, so matches above.
!Line.startsWith(Keywords.kw_var))
// Object literals on the top level of a file are treated as "enum-style".
// Each key/value pair is put on a separate line, instead of bin-packing.
return true;
if (Left.is(tok::l_brace) && Line.Level == 0 &&
(Line.startsWith(tok::kw_enum) ||
Line.startsWith(tok::kw_export, tok::kw_enum)))
// JavaScript top-level enum key/value pairs are put on separate lines
// instead of bin-packing.
return true;
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) &&
!Left.Children.empty())
// Support AllowShortFunctionsOnASingleLine for JavaScript.
return Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_None ||
(Left.NestingLevel == 0 && Line.Level == 0 &&
Style.AllowShortFunctionsOnASingleLine ==
FormatStyle::SFS_Inline);
} else if (Style.Language == FormatStyle::LK_Java) {
if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next &&
Right.Next->is(tok::string_literal))
return true;
}
// If the last token before a '}' is a comma or a trailing comment, the
// intention is to insert a line break after it in order to make shuffling
// around entries easier.
const FormatToken *BeforeClosingBrace = nullptr;
if (Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
Left.BlockKind != BK_Block && Left.MatchingParen)
BeforeClosingBrace = Left.MatchingParen->Previous;
else if (Right.MatchingParen &&
Right.MatchingParen->isOneOf(tok::l_brace,
TT_ArrayInitializerLSquare))
BeforeClosingBrace = &Left;
if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) ||
BeforeClosingBrace->isTrailingComment()))
return true;
if (Right.is(tok::comment))
return Left.BlockKind != BK_BracedInit &&
Left.isNot(TT_CtorInitializerColon) &&
(Right.NewlinesBefore > 0 && Right.HasUnescapedNewline);
if (Left.isTrailingComment())
return true;
if (Left.isStringLiteral() &&
(Right.isStringLiteral() || Right.is(TT_ObjCStringLiteral)))
return true;
if (Right.Previous->IsUnterminatedLiteral)
return true;
if (Right.is(tok::lessless) && Right.Next &&
Right.Previous->is(tok::string_literal) &&
Right.Next->is(tok::string_literal))
return true;
if (Right.Previous->ClosesTemplateDeclaration &&
Right.Previous->MatchingParen &&
Right.Previous->MatchingParen->NestingLevel == 0 &&
Style.AlwaysBreakTemplateDeclarations)
return true;
if ((Right.isOneOf(TT_CtorInitializerComma, TT_CtorInitializerColon)) &&
Style.BreakConstructorInitializersBeforeComma &&
!Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
return true;
if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\""))
// Raw string literals are special wrt. line breaks. The author has made a
// deliberate choice and might have aligned the contents of the string
// literal accordingly. Thus, we try keep existing line breaks.
return Right.NewlinesBefore > 0;
if (Right.Previous->is(tok::l_brace) && Right.NestingLevel == 1 &&
Style.Language == FormatStyle::LK_Proto)
// Don't put enums onto single lines in protocol buffers.
return true;
if (Right.is(TT_InlineASMBrace))
return Right.HasUnescapedNewline;
if (isAllmanBrace(Left) || isAllmanBrace(Right))
return Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU ||
(Style.BreakBeforeBraces == FormatStyle::BS_Mozilla &&
Line.startsWith(tok::kw_enum));
if (Style.Language == FormatStyle::LK_Proto && Left.isNot(tok::l_brace) &&
Right.is(TT_SelectorName))
return true;
if (Left.is(TT_ObjCBlockLBrace) && !Style.AllowShortBlocksOnASingleLine)
return true;
if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
Left.is(TT_LeadingJavaAnnotation) &&
Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) &&
Line.Last->is(tok::l_brace))
return true;
return false;
}
bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
// Language-specific stuff.
if (Style.Language == FormatStyle::LK_Java) {
if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
Keywords.kw_implements))
return false;
if (Right.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
Keywords.kw_implements))
return true;
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Left.is(TT_JsFatArrow) && Right.is(tok::l_brace))
return false;
if (Left.is(TT_JsTypeColon))
return true;
}
if (Left.is(tok::at))
return false;
if (Left.Tok.getObjCKeywordID() == tok::objc_interface)
return false;
if (Left.isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation))
return !Right.is(tok::l_paren);
if (Right.is(TT_PointerOrReference))
return Line.IsMultiVariableDeclStmt ||
(Style.PointerAlignment == FormatStyle::PAS_Right &&
(!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName)));
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
Right.is(tok::kw_operator))
return true;
if (Left.is(TT_PointerOrReference))
return false;
if (Right.isTrailingComment())
// We rely on MustBreakBefore being set correctly here as we should not
// change the "binding" behavior of a comment.
// The first comment in a braced lists is always interpreted as belonging to
// the first list element. Otherwise, it should be placed outside of the
// list.
return Left.BlockKind == BK_BracedInit;
if (Left.is(tok::question) && Right.is(tok::colon))
return false;
if (Right.is(TT_ConditionalExpr) || Right.is(tok::question))
return Style.BreakBeforeTernaryOperators;
if (Left.is(TT_ConditionalExpr) || Left.is(tok::question))
return !Style.BreakBeforeTernaryOperators;
if (Right.is(TT_InheritanceColon))
return true;
if (Right.is(tok::colon) &&
!Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon))
return false;
if (Left.is(tok::colon) && (Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))
return true;
if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next &&
Right.Next->is(TT_ObjCMethodExpr)))
return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls.
if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty)
return true;
if (Left.ClosesTemplateDeclaration)
return true;
if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen,
TT_OverloadedOperator))
return false;
if (Left.is(TT_RangeBasedForLoopColon))
return true;
if (Right.is(TT_RangeBasedForLoopColon))
return false;
if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator) ||
Left.is(tok::kw_operator))
return false;
if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) &&
Line.Type == LT_VirtualFunctionDecl)
return false;
if (Left.is(tok::l_paren) && Left.is(TT_AttributeParen))
return false;
if (Left.is(tok::l_paren) && Left.Previous &&
(Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen)))
return false;
if (Right.is(TT_ImplicitStringLiteral))
return false;
if (Right.is(tok::r_paren) || Right.is(TT_TemplateCloser))
return false;
if (Right.is(tok::r_square) && Right.MatchingParen &&
Right.MatchingParen->is(TT_LambdaLSquare))
return false;
// We only break before r_brace if there was a corresponding break before
// the l_brace, which is tracked by BreakBeforeClosingBrace.
if (Right.is(tok::r_brace))
return Right.MatchingParen && Right.MatchingParen->BlockKind == BK_Block;
// Allow breaking after a trailing annotation, e.g. after a method
// declaration.
if (Left.is(TT_TrailingAnnotation))
return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
tok::less, tok::coloncolon);
if (Right.is(tok::kw___attribute))
return true;
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral))
return true;
if (Left.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializersBeforeComma)
return false;
if (Right.is(TT_CtorInitializerComma) &&
Style.BreakConstructorInitializersBeforeComma)
return true;
if ((Left.is(tok::greater) && Right.is(tok::greater)) ||
(Left.is(tok::less) && Right.is(tok::less)))
return false;
if (Right.is(TT_BinaryOperator) &&
Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None &&
(Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All ||
Right.getPrecedence() != prec::Assignment))
return true;
if (Left.is(TT_ArrayInitializerLSquare))
return true;
if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const))
return true;
if ((Left.isBinaryOperator() || Left.is(TT_BinaryOperator)) &&
!Left.isOneOf(tok::arrowstar, tok::lessless) &&
Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All &&
(Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None ||
Left.getPrecedence() == prec::Assignment))
return true;
return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
tok::kw_class, tok::kw_struct) ||
Right.isMemberAccess() ||
Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless,
tok::colon, tok::l_square, tok::at) ||
(Left.is(tok::r_paren) &&
Right.isOneOf(tok::identifier, tok::kw_const)) ||
(Left.is(tok::l_paren) && !Right.is(tok::r_paren));
}
void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
llvm::errs() << "AnnotatedTokens:\n";
const FormatToken *Tok = Line.First;
while (Tok) {
llvm::errs() << " M=" << Tok->MustBreakBefore
<< " C=" << Tok->CanBreakBefore
<< " T=" << getTokenTypeName(Tok->Type)
<< " S=" << Tok->SpacesRequiredBefore
<< " B=" << Tok->BlockParameterCount
<< " P=" << Tok->SplitPenalty << " Name=" << Tok->Tok.getName()
<< " L=" << Tok->TotalLength << " PPK=" << Tok->PackingKind
<< " FakeLParens=";
for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i)
llvm::errs() << Tok->FakeLParens[i] << "/";
llvm::errs() << " FakeRParens=" << Tok->FakeRParens << "\n";
if (!Tok->Next)
assert(Tok == Line.Last);
Tok = Tok->Next;
}
llvm::errs() << "----\n";
}
} // namespace format
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/UnwrappedLineParser.cpp | //===--- UnwrappedLineParser.cpp - Format C++ code ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file contains the implementation of the UnwrappedLineParser,
/// which turns a stream of tokens into UnwrappedLines.
///
//===----------------------------------------------------------------------===//
#include "UnwrappedLineParser.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "format-parser"
namespace clang {
namespace format {
class FormatTokenSource {
public:
virtual ~FormatTokenSource() {}
virtual FormatToken *getNextToken() = 0;
virtual unsigned getPosition() = 0;
virtual FormatToken *setPosition(unsigned Position) = 0;
};
namespace {
class ScopedDeclarationState {
public:
ScopedDeclarationState(UnwrappedLine &Line, std::vector<bool> &Stack,
bool MustBeDeclaration)
: Line(Line), Stack(Stack) {
Line.MustBeDeclaration = MustBeDeclaration;
Stack.push_back(MustBeDeclaration);
}
~ScopedDeclarationState() {
Stack.pop_back();
if (!Stack.empty())
Line.MustBeDeclaration = Stack.back();
else
Line.MustBeDeclaration = true;
}
private:
UnwrappedLine &Line;
std::vector<bool> &Stack;
};
class ScopedMacroState : public FormatTokenSource {
public:
ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource,
FormatToken *&ResetToken)
: Line(Line), TokenSource(TokenSource), ResetToken(ResetToken),
PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource),
Token(nullptr) {
TokenSource = this;
Line.Level = 0;
Line.InPPDirective = true;
}
~ScopedMacroState() override {
TokenSource = PreviousTokenSource;
ResetToken = Token;
Line.InPPDirective = false;
Line.Level = PreviousLineLevel;
}
FormatToken *getNextToken() override {
// The \c UnwrappedLineParser guards against this by never calling
// \c getNextToken() after it has encountered the first eof token.
assert(!eof());
Token = PreviousTokenSource->getNextToken();
if (eof())
return getFakeEOF();
return Token;
}
unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
FormatToken *setPosition(unsigned Position) override {
Token = PreviousTokenSource->setPosition(Position);
return Token;
}
private:
bool eof() { return Token && Token->HasUnescapedNewline; }
FormatToken *getFakeEOF() {
static bool EOFInitialized = false;
static FormatToken FormatTok;
if (!EOFInitialized) {
FormatTok.Tok.startToken();
FormatTok.Tok.setKind(tok::eof);
EOFInitialized = true;
}
return &FormatTok;
}
UnwrappedLine &Line;
FormatTokenSource *&TokenSource;
FormatToken *&ResetToken;
unsigned PreviousLineLevel;
FormatTokenSource *PreviousTokenSource;
FormatToken *Token;
};
} // end anonymous namespace
class ScopedLineState {
public:
ScopedLineState(UnwrappedLineParser &Parser,
bool SwitchToPreprocessorLines = false)
: Parser(Parser), OriginalLines(Parser.CurrentLines) {
if (SwitchToPreprocessorLines)
Parser.CurrentLines = &Parser.PreprocessorDirectives;
else if (!Parser.Line->Tokens.empty())
Parser.CurrentLines = &Parser.Line->Tokens.back().Children;
PreBlockLine = std::move(Parser.Line);
Parser.Line = llvm::make_unique<UnwrappedLine>();
Parser.Line->Level = PreBlockLine->Level;
Parser.Line->InPPDirective = PreBlockLine->InPPDirective;
}
~ScopedLineState() {
if (!Parser.Line->Tokens.empty()) {
Parser.addUnwrappedLine();
}
assert(Parser.Line->Tokens.empty());
Parser.Line = std::move(PreBlockLine);
if (Parser.CurrentLines == &Parser.PreprocessorDirectives)
Parser.MustBreakBeforeNextToken = true;
Parser.CurrentLines = OriginalLines;
}
private:
UnwrappedLineParser &Parser;
std::unique_ptr<UnwrappedLine> PreBlockLine;
SmallVectorImpl<UnwrappedLine> *OriginalLines;
};
class CompoundStatementIndenter {
public:
CompoundStatementIndenter(UnwrappedLineParser *Parser,
const FormatStyle &Style, unsigned &LineLevel)
: LineLevel(LineLevel), OldLineLevel(LineLevel) {
if (Style.BreakBeforeBraces == FormatStyle::BS_Allman) {
Parser->addUnwrappedLine();
} else if (Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
Parser->addUnwrappedLine();
++LineLevel;
}
}
~CompoundStatementIndenter() { LineLevel = OldLineLevel; }
private:
unsigned &LineLevel;
unsigned OldLineLevel;
};
namespace {
class IndexedTokenSource : public FormatTokenSource {
public:
IndexedTokenSource(ArrayRef<FormatToken *> Tokens)
: Tokens(Tokens), Position(-1) {}
FormatToken *getNextToken() override {
++Position;
return Tokens[Position];
}
unsigned getPosition() override {
assert(Position >= 0);
return Position;
}
FormatToken *setPosition(unsigned P) override {
Position = P;
return Tokens[Position];
}
void reset() { Position = -1; }
private:
ArrayRef<FormatToken *> Tokens;
int Position;
};
} // end anonymous namespace
UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
const AdditionalKeywords &Keywords,
ArrayRef<FormatToken *> Tokens,
UnwrappedLineConsumer &Callback)
: Line(new UnwrappedLine), MustBreakBeforeNextToken(false),
CurrentLines(&Lines), Style(Style), Keywords(Keywords), Tokens(nullptr),
Callback(Callback), AllTokens(Tokens), PPBranchLevel(-1) {}
void UnwrappedLineParser::reset() {
PPBranchLevel = -1;
Line.reset(new UnwrappedLine);
CommentsBeforeNextToken.clear();
FormatTok = nullptr;
MustBreakBeforeNextToken = false;
PreprocessorDirectives.clear();
CurrentLines = &Lines;
DeclarationScopeStack.clear();
PPStack.clear();
}
void UnwrappedLineParser::parse() {
IndexedTokenSource TokenSource(AllTokens);
do {
DEBUG(llvm::dbgs() << "----\n");
reset();
Tokens = &TokenSource;
TokenSource.reset();
readToken();
parseFile();
// Create line with eof token.
pushToken(FormatTok);
addUnwrappedLine();
for (SmallVectorImpl<UnwrappedLine>::iterator I = Lines.begin(),
E = Lines.end();
I != E; ++I) {
Callback.consumeUnwrappedLine(*I);
}
Callback.finishRun();
Lines.clear();
while (!PPLevelBranchIndex.empty() &&
PPLevelBranchIndex.back() + 1 >= PPLevelBranchCount.back()) {
PPLevelBranchIndex.resize(PPLevelBranchIndex.size() - 1);
PPLevelBranchCount.resize(PPLevelBranchCount.size() - 1);
}
if (!PPLevelBranchIndex.empty()) {
++PPLevelBranchIndex.back();
assert(PPLevelBranchIndex.size() == PPLevelBranchCount.size());
assert(PPLevelBranchIndex.back() <= PPLevelBranchCount.back());
}
} while (!PPLevelBranchIndex.empty());
}
void UnwrappedLineParser::parseFile() {
// The top-level context in a file always has declarations, except for pre-
// processor directives and JavaScript files.
bool MustBeDeclaration =
!Line->InPPDirective && Style.Language != FormatStyle::LK_JavaScript;
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
MustBeDeclaration);
parseLevel(/*HasOpeningBrace=*/false);
// Make sure to format the remaining tokens.
flushComments(true);
addUnwrappedLine();
}
void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
bool SwitchLabelEncountered = false;
do {
tok::TokenKind kind = FormatTok->Tok.getKind();
if (FormatTok->Type == TT_MacroBlockBegin) {
kind = tok::l_brace;
} else if (FormatTok->Type == TT_MacroBlockEnd) {
kind = tok::r_brace;
}
switch (kind) {
case tok::comment:
nextToken();
addUnwrappedLine();
break;
case tok::l_brace:
// FIXME: Add parameter whether this can happen - if this happens, we must
// be in a non-declaration context.
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
break;
case tok::r_brace:
if (HasOpeningBrace)
return;
nextToken();
addUnwrappedLine();
break;
case tok::kw_default:
case tok::kw_case:
if (!SwitchLabelEncountered &&
(Style.IndentCaseLabels || (Line->InPPDirective && Line->Level == 1)))
++Line->Level;
SwitchLabelEncountered = true;
parseStructuralElement();
break;
default:
parseStructuralElement();
break;
}
} while (!eof());
}
void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// We'll parse forward through the tokens until we hit
// a closing brace or eof - note that getNextToken() will
// parse macros, so this will magically work inside macro
// definitions, too.
unsigned StoredPosition = Tokens->getPosition();
FormatToken *Tok = FormatTok;
// Keep a stack of positions of lbrace tokens. We will
// update information about whether an lbrace starts a
// braced init list or a different block during the loop.
SmallVector<FormatToken *, 8> LBraceStack;
assert(Tok->Tok.is(tok::l_brace));
do {
// Get next none-comment token.
FormatToken *NextTok;
unsigned ReadTokens = 0;
do {
NextTok = Tokens->getNextToken();
++ReadTokens;
} while (NextTok->is(tok::comment));
switch (Tok->Tok.getKind()) {
case tok::l_brace:
Tok->BlockKind = BK_Unknown;
LBraceStack.push_back(Tok);
break;
case tok::r_brace:
if (!LBraceStack.empty()) {
if (LBraceStack.back()->BlockKind == BK_Unknown) {
bool ProbablyBracedList = false;
if (Style.Language == FormatStyle::LK_Proto) {
ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
} else {
// Using OriginalColumn to distinguish between ObjC methods and
// binary operators is a bit hacky.
bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
NextTok->OriginalColumn == 0;
// If there is a comma, semicolon or right paren after the closing
// brace, we assume this is a braced initializer list. Note that
// regardless how we mark inner braces here, we will overwrite the
// BlockKind later if we parse a braced list (where all blocks
// inside are by default braced lists), or when we explicitly detect
// blocks (for example while parsing lambdas).
//
// We exclude + and - as they can be ObjC visibility modifiers.
ProbablyBracedList =
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
tok::r_paren, tok::r_square, tok::l_brace,
tok::l_paren, tok::ellipsis) ||
(NextTok->is(tok::semi) &&
(!ExpectClassBody || LBraceStack.size() != 1)) ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
}
if (ProbablyBracedList) {
Tok->BlockKind = BK_BracedInit;
LBraceStack.back()->BlockKind = BK_BracedInit;
} else {
Tok->BlockKind = BK_Block;
LBraceStack.back()->BlockKind = BK_Block;
}
}
LBraceStack.pop_back();
}
break;
case tok::at:
case tok::semi:
case tok::kw_if:
case tok::kw_while:
case tok::kw_for:
case tok::kw_switch:
case tok::kw_try:
case tok::kw___try:
if (!LBraceStack.empty())
LBraceStack.back()->BlockKind = BK_Block;
break;
default:
break;
}
Tok = NextTok;
} while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty());
// Assume other blocks for all unclosed opening braces.
for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
if (LBraceStack[i]->BlockKind == BK_Unknown)
LBraceStack[i]->BlockKind = BK_Block;
}
FormatTok = Tokens->setPosition(StoredPosition);
}
void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
bool MunchSemi) {
assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
"'{' or macro block token expected");
const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
unsigned InitialLevel = Line->Level;
nextToken();
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
addUnwrappedLine();
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
MustBeDeclaration);
if (AddLevel)
++Line->Level;
parseLevel(/*HasOpeningBrace=*/true);
if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd)
: !FormatTok->is(tok::r_brace)) {
Line->Level = InitialLevel;
return;
}
nextToken(); // Munch the closing brace.
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
if (MunchSemi && FormatTok->Tok.is(tok::semi))
nextToken();
Line->Level = InitialLevel;
}
static bool isGoogScope(const UnwrappedLine &Line) {
// FIXME: Closure-library specific stuff should not be hard-coded but be
// configurable.
if (Line.Tokens.size() < 4)
return false;
auto I = Line.Tokens.begin();
if (I->Tok->TokenText != "goog")
return false;
++I;
if (I->Tok->isNot(tok::period))
return false;
++I;
if (I->Tok->TokenText != "scope")
return false;
++I;
return I->Tok->is(tok::l_paren);
}
static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
const FormatToken &InitialToken) {
switch (Style.BreakBeforeBraces) {
case FormatStyle::BS_Linux:
return InitialToken.isOneOf(tok::kw_namespace, tok::kw_class);
case FormatStyle::BS_Mozilla:
return InitialToken.isOneOf(tok::kw_class, tok::kw_struct, tok::kw_union);
case FormatStyle::BS_Allman:
case FormatStyle::BS_GNU:
return true;
default:
return false;
}
}
void UnwrappedLineParser::parseChildBlock() {
FormatTok->BlockKind = BK_Block;
nextToken();
{
bool GoogScope =
Style.Language == FormatStyle::LK_JavaScript && isGoogScope(*Line);
ScopedLineState LineState(*this);
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
/*MustBeDeclaration=*/false);
Line->Level += GoogScope ? 0 : 1;
parseLevel(/*HasOpeningBrace=*/true);
flushComments(isOnNewLine(*FormatTok));
Line->Level -= GoogScope ? 0 : 1;
}
nextToken();
}
void UnwrappedLineParser::parsePPDirective() {
assert(FormatTok->Tok.is(tok::hash) && "'#' expected");
ScopedMacroState MacroState(*Line, Tokens, FormatTok);
nextToken();
if (!FormatTok->Tok.getIdentifierInfo()) {
parsePPUnknown();
return;
}
switch (FormatTok->Tok.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_define:
parsePPDefine();
return;
case tok::pp_if:
parsePPIf(/*IfDef=*/false);
break;
case tok::pp_ifdef:
case tok::pp_ifndef:
parsePPIf(/*IfDef=*/true);
break;
case tok::pp_else:
parsePPElse();
break;
case tok::pp_elif:
parsePPElIf();
break;
case tok::pp_endif:
parsePPEndIf();
break;
default:
parsePPUnknown();
break;
}
}
void UnwrappedLineParser::conditionalCompilationCondition(bool Unreachable) {
if (Unreachable || (!PPStack.empty() && PPStack.back() == PP_Unreachable))
PPStack.push_back(PP_Unreachable);
else
PPStack.push_back(PP_Conditional);
}
void UnwrappedLineParser::conditionalCompilationStart(bool Unreachable) {
++PPBranchLevel;
assert(PPBranchLevel >= 0 && PPBranchLevel <= (int)PPLevelBranchIndex.size());
if (PPBranchLevel == (int)PPLevelBranchIndex.size()) {
PPLevelBranchIndex.push_back(0);
PPLevelBranchCount.push_back(0);
}
PPChainBranchIndex.push(0);
bool Skip = PPLevelBranchIndex[PPBranchLevel] > 0;
conditionalCompilationCondition(Unreachable || Skip);
}
void UnwrappedLineParser::conditionalCompilationAlternative() {
if (!PPStack.empty())
PPStack.pop_back();
assert(PPBranchLevel < (int)PPLevelBranchIndex.size());
if (!PPChainBranchIndex.empty())
++PPChainBranchIndex.top();
conditionalCompilationCondition(
PPBranchLevel >= 0 && !PPChainBranchIndex.empty() &&
PPLevelBranchIndex[PPBranchLevel] != PPChainBranchIndex.top());
}
void UnwrappedLineParser::conditionalCompilationEnd() {
assert(PPBranchLevel < (int)PPLevelBranchIndex.size());
if (PPBranchLevel >= 0 && !PPChainBranchIndex.empty()) {
if (PPChainBranchIndex.top() + 1 > PPLevelBranchCount[PPBranchLevel]) {
PPLevelBranchCount[PPBranchLevel] = PPChainBranchIndex.top() + 1;
}
}
// Guard against #endif's without #if.
if (PPBranchLevel > 0)
--PPBranchLevel;
if (!PPChainBranchIndex.empty())
PPChainBranchIndex.pop();
if (!PPStack.empty())
PPStack.pop_back();
}
void UnwrappedLineParser::parsePPIf(bool IfDef) {
nextToken();
bool IsLiteralFalse = (FormatTok->Tok.isLiteral() &&
FormatTok->Tok.getLiteralData() != nullptr &&
StringRef(FormatTok->Tok.getLiteralData(),
FormatTok->Tok.getLength()) == "0") ||
FormatTok->Tok.is(tok::kw_false);
conditionalCompilationStart(!IfDef && IsLiteralFalse);
parsePPUnknown();
}
void UnwrappedLineParser::parsePPElse() {
conditionalCompilationAlternative();
parsePPUnknown();
}
void UnwrappedLineParser::parsePPElIf() { parsePPElse(); }
void UnwrappedLineParser::parsePPEndIf() {
conditionalCompilationEnd();
parsePPUnknown();
}
void UnwrappedLineParser::parsePPDefine() {
nextToken();
if (FormatTok->Tok.getKind() != tok::identifier) {
parsePPUnknown();
return;
}
nextToken();
if (FormatTok->Tok.getKind() == tok::l_paren &&
FormatTok->WhitespaceRange.getBegin() ==
FormatTok->WhitespaceRange.getEnd()) {
parseParens();
}
addUnwrappedLine();
Line->Level = 1;
// Errors during a preprocessor directive can only affect the layout of the
// preprocessor directive, and thus we ignore them. An alternative approach
// would be to use the same approach we use on the file level (no
// re-indentation if there was a structural error) within the macro
// definition.
parseFile();
}
void UnwrappedLineParser::parsePPUnknown() {
do {
nextToken();
} while (!eof());
addUnwrappedLine();
}
// Here we blacklist certain tokens that are not usually the first token in an
// unwrapped line. This is used in attempt to distinguish macro calls without
// trailing semicolons from other constructs split to several lines.
static bool tokenCanStartNewLine(const clang::Token &Tok) {
// Semicolon can be a null-statement, l_square can be a start of a macro or
// a C++11 attribute, but this doesn't seem to be common.
return Tok.isNot(tok::semi) && Tok.isNot(tok::l_brace) &&
Tok.isNot(tok::l_square) &&
// Tokens that can only be used as binary operators and a part of
// overloaded operator names.
Tok.isNot(tok::period) && Tok.isNot(tok::periodstar) &&
Tok.isNot(tok::arrow) && Tok.isNot(tok::arrowstar) &&
Tok.isNot(tok::less) && Tok.isNot(tok::greater) &&
Tok.isNot(tok::slash) && Tok.isNot(tok::percent) &&
Tok.isNot(tok::lessless) && Tok.isNot(tok::greatergreater) &&
Tok.isNot(tok::equal) && Tok.isNot(tok::plusequal) &&
Tok.isNot(tok::minusequal) && Tok.isNot(tok::starequal) &&
Tok.isNot(tok::slashequal) && Tok.isNot(tok::percentequal) &&
Tok.isNot(tok::ampequal) && Tok.isNot(tok::pipeequal) &&
Tok.isNot(tok::caretequal) && Tok.isNot(tok::greatergreaterequal) &&
Tok.isNot(tok::lesslessequal) &&
// Colon is used in labels, base class lists, initializer lists,
// range-based for loops, ternary operator, but should never be the
// first token in an unwrapped line.
Tok.isNot(tok::colon) &&
// 'noexcept' is a trailing annotation.
Tok.isNot(tok::kw_noexcept);
}
void UnwrappedLineParser::parseStructuralElement() {
assert(!FormatTok->Tok.is(tok::l_brace));
switch (FormatTok->Tok.getKind()) {
case tok::at:
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
parseBracedList();
break;
}
switch (FormatTok->Tok.getObjCKeywordID()) {
case tok::objc_public:
case tok::objc_protected:
case tok::objc_package:
case tok::objc_private:
return parseAccessSpecifier();
case tok::objc_interface:
case tok::objc_implementation:
return parseObjCInterfaceOrImplementation();
case tok::objc_protocol:
return parseObjCProtocol();
case tok::objc_end:
return; // Handled by the caller.
case tok::objc_optional:
case tok::objc_required:
nextToken();
addUnwrappedLine();
return;
case tok::objc_autoreleasepool:
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU)
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/false);
}
addUnwrappedLine();
return;
case tok::objc_try:
// This branch isn't strictly necessary (the kw_try case below would
// do this too after the tok::at is parsed above). But be explicit.
parseTryCatch();
return;
default:
break;
}
break;
case tok::kw_asm:
nextToken();
if (FormatTok->is(tok::l_brace)) {
FormatTok->Type = TT_InlineASMBrace;
nextToken();
while (FormatTok && FormatTok->isNot(tok::eof)) {
if (FormatTok->is(tok::r_brace)) {
FormatTok->Type = TT_InlineASMBrace;
nextToken();
addUnwrappedLine();
break;
}
FormatTok->Finalized = true;
nextToken();
}
}
break;
case tok::kw_namespace:
parseNamespace();
return;
case tok::kw_inline:
nextToken();
if (FormatTok->Tok.is(tok::kw_namespace)) {
parseNamespace();
return;
}
break;
case tok::kw_public:
case tok::kw_protected:
case tok::kw_private:
if (Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript)
nextToken();
else
parseAccessSpecifier();
return;
case tok::kw_if:
parseIfThenElse();
return;
case tok::kw_for:
case tok::kw_while:
parseForOrWhileLoop();
return;
case tok::kw_do:
parseDoWhile();
return;
case tok::kw_switch:
parseSwitch();
return;
case tok::kw_default:
nextToken();
parseLabel();
return;
case tok::kw_case:
parseCaseLabel();
return;
case tok::kw_try:
case tok::kw___try:
parseTryCatch();
return;
case tok::kw_extern:
nextToken();
if (FormatTok->Tok.is(tok::string_literal)) {
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/false);
addUnwrappedLine();
return;
}
}
break;
case tok::kw_export:
if (Style.Language == FormatStyle::LK_JavaScript) {
parseJavaScriptEs6ImportExport();
return;
}
break;
case tok::identifier:
if (FormatTok->is(TT_ForEachMacro)) {
parseForOrWhileLoop();
return;
}
if (FormatTok->is(TT_MacroBlockBegin)) {
parseBlock(/*MustBeDeclaration=*/false, /*AddLevel=*/true,
/*MunchSemi=*/false);
return;
}
if (Style.Language == FormatStyle::LK_JavaScript &&
FormatTok->is(Keywords.kw_import)) {
parseJavaScriptEs6ImportExport();
return;
}
if (FormatTok->is(Keywords.kw_signals)) {
nextToken();
if (FormatTok->is(tok::colon)) {
nextToken();
addUnwrappedLine();
}
return;
}
// In all other cases, parse the declaration.
break;
default:
break;
}
do {
switch (FormatTok->Tok.getKind()) {
case tok::at:
nextToken();
if (FormatTok->Tok.is(tok::l_brace))
parseBracedList();
break;
case tok::kw_enum:
// parseEnum falls through and does not yet add an unwrapped line as an
// enum definition can start a structural element.
parseEnum();
// This does not apply for Java and JavaScript.
if (Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) {
addUnwrappedLine();
return;
}
break;
case tok::kw_typedef:
nextToken();
if (FormatTok->isOneOf(Keywords.kw_NS_ENUM, Keywords.kw_NS_OPTIONS,
Keywords.kw_CF_ENUM, Keywords.kw_CF_OPTIONS))
parseEnum();
break;
case tok::kw_struct:
case tok::kw_union:
case tok::kw_class:
// parseRecord falls through and does not yet add an unwrapped line as a
// record declaration or definition can start a structural element.
parseRecord();
// This does not apply for Java and JavaScript.
if (Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) {
addUnwrappedLine();
return;
}
break;
case tok::period:
nextToken();
// In Java, classes have an implicit static member "class".
if (Style.Language == FormatStyle::LK_Java && FormatTok &&
FormatTok->is(tok::kw_class))
nextToken();
break;
case tok::semi:
nextToken();
addUnwrappedLine();
return;
case tok::r_brace:
addUnwrappedLine();
return;
case tok::l_paren:
parseParens();
break;
case tok::caret:
nextToken();
if (FormatTok->Tok.isAnyIdentifier() ||
FormatTok->isSimpleTypeSpecifier())
nextToken();
if (FormatTok->is(tok::l_paren))
parseParens();
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
case tok::l_brace:
if (!tryToParseBracedList()) {
// A block outside of parentheses must be the last part of a
// structural element.
// FIXME: Figure out cases where this is not true, and add projections
// for them (the one we know is missing are lambdas).
if (Style.BreakBeforeBraces != FormatStyle::BS_Attach)
addUnwrappedLine();
FormatTok->Type = TT_FunctionLBrace;
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
return;
}
// Otherwise this was a braced init list, and the structural
// element continues.
break;
case tok::kw_try:
// We arrive here when parsing function-try blocks.
parseTryCatch();
return;
case tok::identifier: {
if (FormatTok->is(TT_MacroBlockEnd)) {
addUnwrappedLine();
return;
}
// Parse function literal unless 'function' is the first token in a line
// in which case this should be treated as a free-standing function.
if (Style.Language == FormatStyle::LK_JavaScript &&
FormatTok->is(Keywords.kw_function) && Line->Tokens.size() > 0) {
tryToParseJSFunction();
break;
}
if ((Style.Language == FormatStyle::LK_JavaScript ||
Style.Language == FormatStyle::LK_Java) &&
FormatTok->is(Keywords.kw_interface)) {
parseRecord();
addUnwrappedLine();
return;
}
StringRef Text = FormatTok->TokenText;
nextToken();
if (Line->Tokens.size() == 1 &&
// JS doesn't have macros, and within classes colons indicate fields,
// not labels.
Style.Language != FormatStyle::LK_JavaScript) {
if (FormatTok->Tok.is(tok::colon) && !Line->MustBeDeclaration) {
parseLabel();
return;
}
// Recognize function-like macro usages without trailing semicolon as
// well as free-standing macros like Q_OBJECT.
bool FunctionLike = FormatTok->is(tok::l_paren);
if (FunctionLike)
parseParens();
bool FollowedByNewline =
CommentsBeforeNextToken.empty()
? FormatTok->NewlinesBefore > 0
: CommentsBeforeNextToken.front()->NewlinesBefore > 0;
if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) &&
tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) {
addUnwrappedLine();
return;
}
}
break;
}
case tok::equal:
// Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
// TT_JsFatArrow. The always start an expression or a child block if
// followed by a curly.
if (FormatTok->is(TT_JsFatArrow)) {
nextToken();
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
}
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
parseBracedList();
}
break;
case tok::l_square:
parseSquare();
break;
case tok::kw_new:
parseNew();
break;
default:
nextToken();
break;
}
} while (!eof());
}
bool UnwrappedLineParser::tryToParseLambda() {
if (Style.Language != FormatStyle::LK_Cpp) {
nextToken();
return false;
}
// FIXME: This is a dirty way to access the previous token. Find a better
// solution.
if (!Line->Tokens.empty() &&
(Line->Tokens.back().Tok->isOneOf(tok::identifier, tok::kw_operator,
tok::kw_new, tok::kw_delete) ||
Line->Tokens.back().Tok->closesScope() ||
Line->Tokens.back().Tok->isSimpleTypeSpecifier())) {
nextToken();
return false;
}
assert(FormatTok->is(tok::l_square));
FormatToken &LSquare = *FormatTok;
if (!tryToParseLambdaIntroducer())
return false;
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->isSimpleTypeSpecifier()) {
nextToken();
continue;
}
switch (FormatTok->Tok.getKind()) {
case tok::l_brace:
break;
case tok::l_paren:
parseParens();
break;
case tok::amp:
case tok::star:
case tok::kw_const:
case tok::comma:
case tok::less:
case tok::greater:
case tok::identifier:
case tok::coloncolon:
case tok::kw_mutable:
nextToken();
break;
case tok::arrow:
FormatTok->Type = TT_LambdaArrow;
nextToken();
break;
default:
return true;
}
}
LSquare.Type = TT_LambdaLSquare;
parseChildBlock();
return true;
}
bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
nextToken();
if (FormatTok->is(tok::equal)) {
nextToken();
if (FormatTok->is(tok::r_square)) {
nextToken();
return true;
}
if (FormatTok->isNot(tok::comma))
return false;
nextToken();
} else if (FormatTok->is(tok::amp)) {
nextToken();
if (FormatTok->is(tok::r_square)) {
nextToken();
return true;
}
if (!FormatTok->isOneOf(tok::comma, tok::identifier)) {
return false;
}
if (FormatTok->is(tok::comma))
nextToken();
} else if (FormatTok->is(tok::r_square)) {
nextToken();
return true;
}
do {
if (FormatTok->is(tok::amp))
nextToken();
if (!FormatTok->isOneOf(tok::identifier, tok::kw_this))
return false;
nextToken();
if (FormatTok->is(tok::ellipsis))
nextToken();
if (FormatTok->is(tok::comma)) {
nextToken();
} else if (FormatTok->is(tok::r_square)) {
nextToken();
return true;
} else {
return false;
}
} while (!eof());
return false;
}
void UnwrappedLineParser::tryToParseJSFunction() {
nextToken();
// Consume function name.
if (FormatTok->is(tok::identifier))
nextToken();
if (FormatTok->isNot(tok::l_paren))
return;
// Parse formal parameter list.
parseParens();
if (FormatTok->is(tok::colon)) {
// Parse a type definition.
nextToken();
// Eat the type declaration. For braced inline object types, balance braces,
// otherwise just parse until finding an l_brace for the function body.
if (FormatTok->is(tok::l_brace))
tryToParseBracedList();
else
while (FormatTok->isNot(tok::l_brace) && !eof())
nextToken();
}
parseChildBlock();
}
bool UnwrappedLineParser::tryToParseBracedList() {
if (FormatTok->BlockKind == BK_Unknown)
calculateBraceTypes();
assert(FormatTok->BlockKind != BK_Unknown);
if (FormatTok->BlockKind == BK_Block)
return false;
parseBracedList();
return true;
}
bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons) {
bool HasError = false;
nextToken();
// FIXME: Once we have an expression parser in the UnwrappedLineParser,
// replace this by using parseAssigmentExpression() inside.
do {
if (Style.Language == FormatStyle::LK_JavaScript) {
if (FormatTok->is(Keywords.kw_function)) {
tryToParseJSFunction();
continue;
}
if (FormatTok->is(TT_JsFatArrow)) {
nextToken();
// Fat arrows can be followed by simple expressions or by child blocks
// in curly braces.
if (FormatTok->is(tok::l_brace)) {
parseChildBlock();
continue;
}
}
}
switch (FormatTok->Tok.getKind()) {
case tok::caret:
nextToken();
if (FormatTok->is(tok::l_brace)) {
parseChildBlock();
}
break;
case tok::l_square:
tryToParseLambda();
break;
case tok::l_brace:
// Assume there are no blocks inside a braced init list apart
// from the ones we explicitly parse out (like lambdas).
FormatTok->BlockKind = BK_BracedInit;
parseBracedList();
break;
case tok::l_paren:
parseParens();
// JavaScript can just have free standing methods and getters/setters in
// object literals. Detect them by a "{" following ")".
if (Style.Language == FormatStyle::LK_JavaScript) {
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
}
break;
case tok::r_brace:
nextToken();
return !HasError;
case tok::semi:
HasError = true;
if (!ContinueOnSemicolons)
return !HasError;
nextToken();
break;
case tok::comma:
nextToken();
break;
default:
nextToken();
break;
}
} while (!eof());
return false;
}
void UnwrappedLineParser::parseParens() {
assert(FormatTok->Tok.is(tok::l_paren) && "'(' expected.");
nextToken();
do {
switch (FormatTok->Tok.getKind()) {
case tok::l_paren:
parseParens();
if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_brace))
parseChildBlock();
break;
case tok::r_paren:
nextToken();
return;
case tok::r_brace:
// A "}" inside parenthesis is an error if there wasn't a matching "{".
return;
case tok::l_square:
tryToParseLambda();
break;
case tok::l_brace:
if (!tryToParseBracedList())
parseChildBlock();
break;
case tok::at:
nextToken();
if (FormatTok->Tok.is(tok::l_brace))
parseBracedList();
break;
case tok::identifier:
if (Style.Language == FormatStyle::LK_JavaScript &&
FormatTok->is(Keywords.kw_function))
tryToParseJSFunction();
else
nextToken();
break;
default:
nextToken();
break;
}
} while (!eof());
}
void UnwrappedLineParser::parseSquare() {
assert(FormatTok->Tok.is(tok::l_square) && "'[' expected.");
if (tryToParseLambda())
return;
do {
switch (FormatTok->Tok.getKind()) {
case tok::l_paren:
parseParens();
break;
case tok::r_square:
nextToken();
return;
case tok::r_brace:
// A "}" inside parenthesis is an error if there wasn't a matching "{".
return;
case tok::l_square:
parseSquare();
break;
case tok::l_brace: {
if (!tryToParseBracedList())
parseChildBlock();
break;
}
case tok::at:
nextToken();
if (FormatTok->Tok.is(tok::l_brace))
parseBracedList();
break;
default:
nextToken();
break;
}
} while (!eof());
}
void UnwrappedLineParser::parseIfThenElse() {
assert(FormatTok->Tok.is(tok::kw_if) && "'if' expected");
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
bool NeedsUnwrappedLine = false;
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
addUnwrappedLine();
} else {
NeedsUnwrappedLine = true;
}
} else {
addUnwrappedLine();
++Line->Level;
parseStructuralElement();
--Line->Level;
}
if (FormatTok->Tok.is(tok::kw_else)) {
if (Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup)
addUnwrappedLine();
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
} else if (FormatTok->Tok.is(tok::kw_if)) {
parseIfThenElse();
} else {
addUnwrappedLine();
++Line->Level;
parseStructuralElement();
--Line->Level;
}
} else if (NeedsUnwrappedLine) {
addUnwrappedLine();
}
}
void UnwrappedLineParser::parseTryCatch() {
assert(FormatTok->isOneOf(tok::kw_try, tok::kw___try) && "'try' expected");
nextToken();
bool NeedsUnwrappedLine = false;
if (FormatTok->is(tok::colon)) {
// We are in a function try block, what comes is an initializer list.
nextToken();
while (FormatTok->is(tok::identifier)) {
nextToken();
if (FormatTok->is(tok::l_paren))
parseParens();
if (FormatTok->is(tok::comma))
nextToken();
}
}
// Parse try with resource.
if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren)) {
parseParens();
}
if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU ||
Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup) {
addUnwrappedLine();
} else {
NeedsUnwrappedLine = true;
}
} else if (!FormatTok->is(tok::kw_catch)) {
// The C++ standard requires a compound-statement after a try.
// If there's none, we try to assume there's a structuralElement
// and try to continue.
addUnwrappedLine();
++Line->Level;
parseStructuralElement();
--Line->Level;
}
while (1) {
if (FormatTok->is(tok::at))
nextToken();
if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except,
tok::kw___finally) ||
((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
FormatTok->is(Keywords.kw_finally)) ||
(FormatTok->Tok.isObjCAtKeyword(tok::objc_catch) ||
FormatTok->Tok.isObjCAtKeyword(tok::objc_finally))))
break;
nextToken();
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->is(tok::l_paren)) {
parseParens();
continue;
}
if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof))
return;
nextToken();
}
NeedsUnwrappedLine = false;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU ||
Style.BreakBeforeBraces == FormatStyle::BS_Stroustrup) {
addUnwrappedLine();
} else {
NeedsUnwrappedLine = true;
}
}
if (NeedsUnwrappedLine) {
addUnwrappedLine();
}
}
void UnwrappedLineParser::parseNamespace() {
assert(FormatTok->Tok.is(tok::kw_namespace) && "'namespace' expected");
const FormatToken &InitialToken = *FormatTok;
nextToken();
if (FormatTok->Tok.is(tok::identifier))
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
bool AddLevel = Style.NamespaceIndentation == FormatStyle::NI_All ||
(Style.NamespaceIndentation == FormatStyle::NI_Inner &&
DeclarationScopeStack.size() > 1);
parseBlock(/*MustBeDeclaration=*/true, AddLevel);
// Munch the semicolon after a namespace. This is more common than one would
// think. Puttin the semicolon into its own line is very ugly.
if (FormatTok->Tok.is(tok::semi))
nextToken();
addUnwrappedLine();
}
// FIXME: Add error handling.
}
void UnwrappedLineParser::parseNew() {
assert(FormatTok->is(tok::kw_new) && "'new' expected");
nextToken();
if (Style.Language != FormatStyle::LK_Java)
return;
// In Java, we can parse everything up to the parens, which aren't optional.
do {
// There should not be a ;, { or } before the new's open paren.
if (FormatTok->isOneOf(tok::semi, tok::l_brace, tok::r_brace))
return;
// Consume the parens.
if (FormatTok->is(tok::l_paren)) {
parseParens();
// If there is a class body of an anonymous class, consume that as child.
if (FormatTok->is(tok::l_brace))
parseChildBlock();
return;
}
nextToken();
} while (!eof());
}
void UnwrappedLineParser::parseForOrWhileLoop() {
assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) &&
"'for', 'while' or foreach macro expected");
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
} else {
addUnwrappedLine();
++Line->Level;
parseStructuralElement();
--Line->Level;
}
}
void UnwrappedLineParser::parseDoWhile() {
assert(FormatTok->Tok.is(tok::kw_do) && "'do' expected");
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
if (Style.BreakBeforeBraces == FormatStyle::BS_GNU)
addUnwrappedLine();
} else {
addUnwrappedLine();
++Line->Level;
parseStructuralElement();
--Line->Level;
}
// FIXME: Add error handling.
if (!FormatTok->Tok.is(tok::kw_while)) {
addUnwrappedLine();
return;
}
nextToken();
parseStructuralElement();
}
void UnwrappedLineParser::parseLabel() {
nextToken();
unsigned OldLineLevel = Line->Level;
if (Line->Level > 1 || (!Line->InPPDirective && Line->Level > 0))
--Line->Level;
if (CommentsBeforeNextToken.empty() && FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
if (FormatTok->Tok.is(tok::kw_break)) {
// "break;" after "}" on its own line only for BS_Allman and BS_GNU
if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU) {
addUnwrappedLine();
}
parseStructuralElement();
}
addUnwrappedLine();
} else {
if (FormatTok->is(tok::semi))
nextToken();
addUnwrappedLine();
}
Line->Level = OldLineLevel;
}
void UnwrappedLineParser::parseCaseLabel() {
assert(FormatTok->Tok.is(tok::kw_case) && "'case' expected");
// FIXME: fix handling of complex expressions here.
do {
nextToken();
} while (!eof() && !FormatTok->Tok.is(tok::colon));
parseLabel();
}
void UnwrappedLineParser::parseSwitch() {
assert(FormatTok->Tok.is(tok::kw_switch) && "'switch' expected");
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
} else {
addUnwrappedLine();
++Line->Level;
parseStructuralElement();
--Line->Level;
}
}
void UnwrappedLineParser::parseAccessSpecifier() {
nextToken();
// Understand Qt's slots.
if (FormatTok->isOneOf(Keywords.kw_slots, Keywords.kw_qslots))
nextToken();
// Otherwise, we don't know what it is, and we'd better keep the next token.
if (FormatTok->Tok.is(tok::colon))
nextToken();
addUnwrappedLine();
}
void UnwrappedLineParser::parseEnum() {
// Won't be 'enum' for NS_ENUMs.
if (FormatTok->Tok.is(tok::kw_enum))
nextToken();
// Eat up enum class ...
if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct))
nextToken();
while (FormatTok->Tok.getIdentifierInfo() ||
FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less,
tok::greater, tok::comma, tok::question)) {
nextToken();
// We can have macros or attributes in between 'enum' and the enum name.
if (FormatTok->is(tok::l_paren))
parseParens();
if (FormatTok->is(tok::identifier)) {
nextToken();
// If there are two identifiers in a row, this is likely an elaborate
// return type. In Java, this can be "implements", etc.
if (Style.Language == FormatStyle::LK_Cpp &&
FormatTok->is(tok::identifier))
return;
}
}
// Just a declaration or something is wrong.
if (FormatTok->isNot(tok::l_brace))
return;
FormatTok->BlockKind = BK_Block;
if (Style.Language == FormatStyle::LK_Java) {
// Java enums are different.
parseJavaEnumBody();
return;
}
// Parse enum body.
bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true);
if (HasError) {
if (FormatTok->is(tok::semi))
nextToken();
addUnwrappedLine();
}
// There is no addUnwrappedLine() here so that we fall through to parsing a
// structural element afterwards. Thus, in "enum A {} n, m;",
// "} n, m;" will end up in one unwrapped line.
}
void UnwrappedLineParser::parseJavaEnumBody() {
// Determine whether the enum is simple, i.e. does not have a semicolon or
// constants with class bodies. Simple enums can be formatted like braced
// lists, contracted to a single line, etc.
unsigned StoredPosition = Tokens->getPosition();
bool IsSimple = true;
FormatToken *Tok = Tokens->getNextToken();
while (Tok) {
if (Tok->is(tok::r_brace))
break;
if (Tok->isOneOf(tok::l_brace, tok::semi)) {
IsSimple = false;
break;
}
// FIXME: This will also mark enums with braces in the arguments to enum
// constants as "not simple". This is probably fine in practice, though.
Tok = Tokens->getNextToken();
}
FormatTok = Tokens->setPosition(StoredPosition);
if (IsSimple) {
parseBracedList();
addUnwrappedLine();
return;
}
// Parse the body of a more complex enum.
// First add a line for everything up to the "{".
nextToken();
addUnwrappedLine();
++Line->Level;
// Parse the enum constants.
while (FormatTok) {
if (FormatTok->is(tok::l_brace)) {
// Parse the constant's class body.
parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
/*MunchSemi=*/false);
} else if (FormatTok->is(tok::l_paren)) {
parseParens();
} else if (FormatTok->is(tok::comma)) {
nextToken();
addUnwrappedLine();
} else if (FormatTok->is(tok::semi)) {
nextToken();
addUnwrappedLine();
break;
} else if (FormatTok->is(tok::r_brace)) {
addUnwrappedLine();
break;
} else {
nextToken();
}
}
// Parse the class body after the enum's ";" if any.
parseLevel(/*HasOpeningBrace=*/true);
nextToken();
--Line->Level;
addUnwrappedLine();
}
void UnwrappedLineParser::parseRecord() {
const FormatToken &InitialToken = *FormatTok;
nextToken();
// The actual identifier can be a nested name specifier, and in macros
// it is often token-pasted.
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash,
tok::kw___attribute, tok::kw___declspec,
tok::kw_alignas) ||
((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
FormatTok->isOneOf(tok::period, tok::comma))) {
bool IsNonMacroIdentifier =
FormatTok->is(tok::identifier) &&
FormatTok->TokenText != FormatTok->TokenText.upper();
nextToken();
// We can have macros or attributes in between 'class' and the class name.
if (!IsNonMacroIdentifier && FormatTok->Tok.is(tok::l_paren))
parseParens();
}
// Note that parsing away template declarations here leads to incorrectly
// accepting function declarations as record declarations.
// In general, we cannot solve this problem. Consider:
// class A<int> B() {}
// which can be a function definition or a class definition when B() is a
// macro. If we find enough real-world cases where this is a problem, we
// can parse for the 'template' keyword in the beginning of the statement,
// and thus rule out the record production in case there is no template
// (this would still leave us with an ambiguity between template function
// and class declarations).
if (FormatTok->isOneOf(tok::colon, tok::less)) {
while (!eof()) {
if (FormatTok->is(tok::l_brace)) {
calculateBraceTypes(/*ExpectClassBody=*/true);
if (!tryToParseBracedList())
break;
}
if (FormatTok->Tok.is(tok::semi))
return;
nextToken();
}
}
if (FormatTok->Tok.is(tok::l_brace)) {
if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
/*MunchSemi=*/false);
}
// There is no addUnwrappedLine() here so that we fall through to parsing a
// structural element afterwards. Thus, in "class A {} n, m;",
// "} n, m;" will end up in one unwrapped line.
}
void UnwrappedLineParser::parseObjCProtocolList() {
assert(FormatTok->Tok.is(tok::less) && "'<' expected.");
do
nextToken();
while (!eof() && FormatTok->Tok.isNot(tok::greater));
nextToken(); // Skip '>'.
}
void UnwrappedLineParser::parseObjCUntilAtEnd() {
do {
if (FormatTok->Tok.isObjCAtKeyword(tok::objc_end)) {
nextToken();
addUnwrappedLine();
break;
}
if (FormatTok->is(tok::l_brace)) {
parseBlock(/*MustBeDeclaration=*/false);
// In ObjC interfaces, nothing should be following the "}".
addUnwrappedLine();
} else if (FormatTok->is(tok::r_brace)) {
// Ignore stray "}". parseStructuralElement doesn't consume them.
nextToken();
addUnwrappedLine();
} else {
parseStructuralElement();
}
} while (!eof());
}
void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
nextToken();
nextToken(); // interface name
// @interface can be followed by either a base class, or a category.
if (FormatTok->Tok.is(tok::colon)) {
nextToken();
nextToken(); // base class name
} else if (FormatTok->Tok.is(tok::l_paren))
// Skip category, if present.
parseParens();
if (FormatTok->Tok.is(tok::less))
parseObjCProtocolList();
if (FormatTok->Tok.is(tok::l_brace)) {
if (Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU)
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/true);
}
// With instance variables, this puts '}' on its own line. Without instance
// variables, this ends the @interface line.
addUnwrappedLine();
parseObjCUntilAtEnd();
}
void UnwrappedLineParser::parseObjCProtocol() {
nextToken();
nextToken(); // protocol name
if (FormatTok->Tok.is(tok::less))
parseObjCProtocolList();
// Check for protocol declaration.
if (FormatTok->Tok.is(tok::semi)) {
nextToken();
return addUnwrappedLine();
}
addUnwrappedLine();
parseObjCUntilAtEnd();
}
void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
assert(FormatTok->isOneOf(Keywords.kw_import, tok::kw_export));
nextToken();
// Consume the "default" in "export default class/function".
if (FormatTok->is(tok::kw_default))
nextToken();
// Consume "function" and "default function", so that these get parsed as
// free-standing JS functions, i.e. do not require a trailing semicolon.
if (FormatTok->is(Keywords.kw_function)) {
nextToken();
return;
}
if (FormatTok->isOneOf(tok::kw_const, tok::kw_class, tok::kw_enum,
Keywords.kw_var))
return; // Fall through to parsing the corresponding structure.
if (FormatTok->is(tok::l_brace)) {
FormatTok->BlockKind = BK_Block;
parseBracedList();
}
while (!eof() && FormatTok->isNot(tok::semi) &&
FormatTok->isNot(tok::l_brace)) {
nextToken();
}
}
LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
StringRef Prefix = "") {
llvm::dbgs() << Prefix << "Line(" << Line.Level << ")"
<< (Line.InPPDirective ? " MACRO" : "") << ": ";
for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
E = Line.Tokens.end();
I != E; ++I) {
llvm::dbgs() << I->Tok->Tok.getName() << "[" << I->Tok->Type << "] ";
}
for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
E = Line.Tokens.end();
I != E; ++I) {
const UnwrappedLineNode &Node = *I;
for (SmallVectorImpl<UnwrappedLine>::const_iterator
I = Node.Children.begin(),
E = Node.Children.end();
I != E; ++I) {
printDebugInfo(*I, "\nChild: ");
}
}
llvm::dbgs() << "\n";
}
void UnwrappedLineParser::addUnwrappedLine() {
if (Line->Tokens.empty())
return;
DEBUG({
if (CurrentLines == &Lines)
printDebugInfo(*Line);
});
CurrentLines->push_back(std::move(*Line));
Line->Tokens.clear();
if (CurrentLines == &Lines && !PreprocessorDirectives.empty()) {
CurrentLines->append(
std::make_move_iterator(PreprocessorDirectives.begin()),
std::make_move_iterator(PreprocessorDirectives.end()));
PreprocessorDirectives.clear();
}
}
bool UnwrappedLineParser::eof() const { return FormatTok->Tok.is(tok::eof); }
bool UnwrappedLineParser::isOnNewLine(const FormatToken &FormatTok) {
return (Line->InPPDirective || FormatTok.HasUnescapedNewline) &&
FormatTok.NewlinesBefore > 0;
}
void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) {
bool JustComments = Line->Tokens.empty();
for (SmallVectorImpl<FormatToken *>::const_iterator
I = CommentsBeforeNextToken.begin(),
E = CommentsBeforeNextToken.end();
I != E; ++I) {
if (isOnNewLine(**I) && JustComments)
addUnwrappedLine();
pushToken(*I);
}
if (NewlineBeforeNext && JustComments)
addUnwrappedLine();
CommentsBeforeNextToken.clear();
}
void UnwrappedLineParser::nextToken() {
if (eof())
return;
flushComments(isOnNewLine(*FormatTok));
pushToken(FormatTok);
readToken();
}
void UnwrappedLineParser::readToken() {
bool CommentsInCurrentLine = true;
do {
FormatTok = Tokens->getNextToken();
assert(FormatTok);
while (!Line->InPPDirective && FormatTok->Tok.is(tok::hash) &&
(FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) {
// If there is an unfinished unwrapped line, we flush the preprocessor
// directives only after that unwrapped line was finished later.
bool SwitchToPreprocessorLines = !Line->Tokens.empty();
ScopedLineState BlockState(*this, SwitchToPreprocessorLines);
// Comments stored before the preprocessor directive need to be output
// before the preprocessor directive, at the same level as the
// preprocessor directive, as we consider them to apply to the directive.
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
}
while (FormatTok->Type == TT_ConflictStart ||
FormatTok->Type == TT_ConflictEnd ||
FormatTok->Type == TT_ConflictAlternative) {
if (FormatTok->Type == TT_ConflictStart) {
conditionalCompilationStart(/*Unreachable=*/false);
} else if (FormatTok->Type == TT_ConflictAlternative) {
conditionalCompilationAlternative();
} else if (FormatTok->Type == TT_ConflictEnd) {
conditionalCompilationEnd();
}
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
}
if (!PPStack.empty() && (PPStack.back() == PP_Unreachable) &&
!Line->InPPDirective) {
continue;
}
if (!FormatTok->Tok.is(tok::comment))
return;
if (isOnNewLine(*FormatTok) || FormatTok->IsFirst) {
CommentsInCurrentLine = false;
}
if (CommentsInCurrentLine) {
pushToken(FormatTok);
} else {
CommentsBeforeNextToken.push_back(FormatTok);
}
} while (!eof());
}
void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
Line->Tokens.back().Tok->MustBreakBefore = true;
MustBreakBeforeNextToken = false;
}
}
} // end namespace format
} // end namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/CMakeLists.txt | set(LLVM_LINK_COMPONENTS support)
add_clang_library(clangFormat
BreakableToken.cpp
ContinuationIndenter.cpp
Format.cpp
FormatToken.cpp
TokenAnnotator.cpp
UnwrappedLineFormatter.cpp
UnwrappedLineParser.cpp
WhitespaceManager.cpp
LINK_LIBS
clangBasic
clangLex
clangToolingCore
)
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/ContinuationIndenter.h | //===--- ContinuationIndenter.h - Format C++ code ---------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements an indenter that manages the indentation of
/// continuations.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_CONTINUATIONINDENTER_H
#define LLVM_CLANG_LIB_FORMAT_CONTINUATIONINDENTER_H
#include "Encoding.h"
#include "FormatToken.h"
#include "clang/Format/Format.h"
#include "llvm/Support/Regex.h"
namespace clang {
class SourceManager;
namespace format {
class AnnotatedLine;
struct FormatToken;
struct LineState;
struct ParenState;
class WhitespaceManager;
class ContinuationIndenter {
public:
/// \brief Constructs a \c ContinuationIndenter to format \p Line starting in
/// column \p FirstIndent.
ContinuationIndenter(const FormatStyle &Style,
const AdditionalKeywords &Keywords,
SourceManager &SourceMgr, WhitespaceManager &Whitespaces,
encoding::Encoding Encoding,
bool BinPackInconclusiveFunctions);
/// \brief Get the initial state, i.e. the state after placing \p Line's
/// first token at \p FirstIndent.
LineState getInitialState(unsigned FirstIndent, const AnnotatedLine *Line,
bool DryRun);
// FIXME: canBreak and mustBreak aren't strictly indentation-related. Find a
// better home.
/// \brief Returns \c true, if a line break after \p State is allowed.
bool canBreak(const LineState &State);
/// \brief Returns \c true, if a line break after \p State is mandatory.
bool mustBreak(const LineState &State);
/// \brief Appends the next token to \p State and updates information
/// necessary for indentation.
///
/// Puts the token on the current line if \p Newline is \c false and adds a
/// line break and necessary indentation otherwise.
///
/// If \p DryRun is \c false, also creates and stores the required
/// \c Replacement.
unsigned addTokenToState(LineState &State, bool Newline, bool DryRun,
unsigned ExtraSpaces = 0);
/// \brief Get the column limit for this line. This is the style's column
/// limit, potentially reduced for preprocessor definitions.
unsigned getColumnLimit(const LineState &State) const;
private:
/// \brief Mark the next token as consumed in \p State and modify its stacks
/// accordingly.
unsigned moveStateToNextToken(LineState &State, bool DryRun, bool Newline);
/// \brief Update 'State' according to the next token's fake left parentheses.
void moveStatePastFakeLParens(LineState &State, bool Newline);
/// \brief Update 'State' according to the next token's fake r_parens.
void moveStatePastFakeRParens(LineState &State);
/// \brief Update 'State' according to the next token being one of "(<{[".
void moveStatePastScopeOpener(LineState &State, bool Newline);
/// \brief Update 'State' according to the next token being one of ")>}]".
void moveStatePastScopeCloser(LineState &State);
/// \brief Update 'State' with the next token opening a nested block.
void moveStateToNewBlock(LineState &State);
/// \brief If the current token sticks out over the end of the line, break
/// it if possible.
///
/// \returns An extra penalty if a token was broken, otherwise 0.
///
/// The returned penalty will cover the cost of the additional line breaks and
/// column limit violation in all lines except for the last one. The penalty
/// for the column limit violation in the last line (and in single line
/// tokens) is handled in \c addNextStateToQueue.
unsigned breakProtrudingToken(const FormatToken &Current, LineState &State,
bool DryRun);
/// \brief Appends the next token to \p State and updates information
/// necessary for indentation.
///
/// Puts the token on the current line.
///
/// If \p DryRun is \c false, also creates and stores the required
/// \c Replacement.
void addTokenOnCurrentLine(LineState &State, bool DryRun,
unsigned ExtraSpaces);
/// \brief Appends the next token to \p State and updates information
/// necessary for indentation.
///
/// Adds a line break and necessary indentation.
///
/// If \p DryRun is \c false, also creates and stores the required
/// \c Replacement.
unsigned addTokenOnNewLine(LineState &State, bool DryRun);
/// \brief Calculate the new column for a line wrap before the next token.
unsigned getNewLineColumn(const LineState &State);
/// \brief Adds a multiline token to the \p State.
///
/// \returns Extra penalty for the first line of the literal: last line is
/// handled in \c addNextStateToQueue, and the penalty for other lines doesn't
/// matter, as we don't change them.
unsigned addMultilineToken(const FormatToken &Current, LineState &State);
/// \brief Returns \c true if the next token starts a multiline string
/// literal.
///
/// This includes implicitly concatenated strings, strings that will be broken
/// by clang-format and string literals with escaped newlines.
bool nextIsMultilineString(const LineState &State);
FormatStyle Style;
const AdditionalKeywords &Keywords;
SourceManager &SourceMgr;
WhitespaceManager &Whitespaces;
encoding::Encoding Encoding;
bool BinPackInconclusiveFunctions;
llvm::Regex CommentPragmasRegex;
};
struct ParenState {
ParenState(unsigned Indent, unsigned IndentLevel, unsigned LastSpace,
bool AvoidBinPacking, bool NoLineBreak)
: Indent(Indent), IndentLevel(IndentLevel), LastSpace(LastSpace),
NestedBlockIndent(Indent), BreakBeforeClosingBrace(false),
AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
NoLineBreak(NoLineBreak), LastOperatorWrapped(true),
ContainsLineBreak(false), ContainsUnwrappedBuilder(false),
AlignColons(true), ObjCSelectorNameFound(false),
HasMultipleNestedBlocks(false), NestedBlockInlined(false) {}
/// \brief The position to which a specific parenthesis level needs to be
/// indented.
unsigned Indent;
/// \brief The number of indentation levels of the block.
unsigned IndentLevel;
/// \brief The position of the last space on each level.
///
/// Used e.g. to break like:
/// functionCall(Parameter, otherCall(
/// OtherParameter));
unsigned LastSpace;
/// \brief If a block relative to this parenthesis level gets wrapped, indent
/// it this much.
unsigned NestedBlockIndent;
/// \brief The position the first "<<" operator encountered on each level.
///
/// Used to align "<<" operators. 0 if no such operator has been encountered
/// on a level.
unsigned FirstLessLess = 0;
/// \brief The column of a \c ? in a conditional expression;
unsigned QuestionColumn = 0;
/// \brief The position of the colon in an ObjC method declaration/call.
unsigned ColonPos = 0;
/// \brief The start of the most recent function in a builder-type call.
unsigned StartOfFunctionCall = 0;
/// \brief Contains the start of array subscript expressions, so that they
/// can be aligned.
unsigned StartOfArraySubscripts = 0;
/// \brief If a nested name specifier was broken over multiple lines, this
/// contains the start column of the second line. Otherwise 0.
unsigned NestedNameSpecifierContinuation = 0;
/// \brief If a call expression was broken over multiple lines, this
/// contains the start column of the second line. Otherwise 0.
unsigned CallContinuation = 0;
/// \brief The column of the first variable name in a variable declaration.
///
/// Used to align further variables if necessary.
unsigned VariablePos = 0;
/// \brief Whether a newline needs to be inserted before the block's closing
/// brace.
///
/// We only want to insert a newline before the closing brace if there also
/// was a newline after the beginning left brace.
bool BreakBeforeClosingBrace : 1;
/// \brief Avoid bin packing, i.e. multiple parameters/elements on multiple
/// lines, in this context.
bool AvoidBinPacking : 1;
/// \brief Break after the next comma (or all the commas in this context if
/// \c AvoidBinPacking is \c true).
bool BreakBeforeParameter : 1;
/// \brief Line breaking in this context would break a formatting rule.
bool NoLineBreak : 1;
/// \brief True if the last binary operator on this level was wrapped to the
/// next line.
bool LastOperatorWrapped : 1;
/// \brief \c true if this \c ParenState already contains a line-break.
///
/// The first line break in a certain \c ParenState causes extra penalty so
/// that clang-format prefers similar breaks, i.e. breaks in the same
/// parenthesis.
bool ContainsLineBreak : 1;
/// \brief \c true if this \c ParenState contains multiple segments of a
/// builder-type call on one line.
bool ContainsUnwrappedBuilder : 1;
/// \brief \c true if the colons of the curren ObjC method expression should
/// be aligned.
///
/// Not considered for memoization as it will always have the same value at
/// the same token.
bool AlignColons : 1;
/// \brief \c true if at least one selector name was found in the current
/// ObjC method expression.
///
/// Not considered for memoization as it will always have the same value at
/// the same token.
bool ObjCSelectorNameFound : 1;
/// \brief \c true if there are multiple nested blocks inside these parens.
///
/// Not considered for memoization as it will always have the same value at
/// the same token.
bool HasMultipleNestedBlocks : 1;
// \brief The start of a nested block (e.g. lambda introducer in C++ or
// "function" in JavaScript) is not wrapped to a new line.
bool NestedBlockInlined : 1;
bool operator<(const ParenState &Other) const {
if (Indent != Other.Indent)
return Indent < Other.Indent;
if (LastSpace != Other.LastSpace)
return LastSpace < Other.LastSpace;
if (NestedBlockIndent != Other.NestedBlockIndent)
return NestedBlockIndent < Other.NestedBlockIndent;
if (FirstLessLess != Other.FirstLessLess)
return FirstLessLess < Other.FirstLessLess;
if (BreakBeforeClosingBrace != Other.BreakBeforeClosingBrace)
return BreakBeforeClosingBrace;
if (QuestionColumn != Other.QuestionColumn)
return QuestionColumn < Other.QuestionColumn;
if (AvoidBinPacking != Other.AvoidBinPacking)
return AvoidBinPacking;
if (BreakBeforeParameter != Other.BreakBeforeParameter)
return BreakBeforeParameter;
if (NoLineBreak != Other.NoLineBreak)
return NoLineBreak;
if (LastOperatorWrapped != Other.LastOperatorWrapped)
return LastOperatorWrapped;
if (ColonPos != Other.ColonPos)
return ColonPos < Other.ColonPos;
if (StartOfFunctionCall != Other.StartOfFunctionCall)
return StartOfFunctionCall < Other.StartOfFunctionCall;
if (StartOfArraySubscripts != Other.StartOfArraySubscripts)
return StartOfArraySubscripts < Other.StartOfArraySubscripts;
if (CallContinuation != Other.CallContinuation)
return CallContinuation < Other.CallContinuation;
if (VariablePos != Other.VariablePos)
return VariablePos < Other.VariablePos;
if (ContainsLineBreak != Other.ContainsLineBreak)
return ContainsLineBreak;
if (ContainsUnwrappedBuilder != Other.ContainsUnwrappedBuilder)
return ContainsUnwrappedBuilder;
if (NestedBlockInlined != Other.NestedBlockInlined)
return NestedBlockInlined;
return false;
}
};
/// \brief The current state when indenting a unwrapped line.
///
/// As the indenting tries different combinations this is copied by value.
struct LineState {
/// \brief The number of used columns in the current line.
unsigned Column;
/// \brief The token that needs to be next formatted.
FormatToken *NextToken;
/// \brief \c true if this line contains a continued for-loop section.
bool LineContainsContinuedForLoopSection;
/// \brief The \c NestingLevel at the start of this line.
unsigned StartOfLineLevel;
/// \brief The lowest \c NestingLevel on the current line.
unsigned LowestLevelOnLine;
/// \brief The start column of the string literal, if we're in a string
/// literal sequence, 0 otherwise.
unsigned StartOfStringLiteral;
/// \brief A stack keeping track of properties applying to parenthesis
/// levels.
std::vector<ParenState> Stack;
/// \brief Ignore the stack of \c ParenStates for state comparison.
///
/// In long and deeply nested unwrapped lines, the current algorithm can
/// be insufficient for finding the best formatting with a reasonable amount
/// of time and memory. Setting this flag will effectively lead to the
/// algorithm not analyzing some combinations. However, these combinations
/// rarely contain the optimal solution: In short, accepting a higher
/// penalty early would need to lead to different values in the \c
/// ParenState stack (in an otherwise identical state) and these different
/// values would need to lead to a significant amount of avoided penalty
/// later.
///
/// FIXME: Come up with a better algorithm instead.
bool IgnoreStackForComparison;
/// \brief The indent of the first token.
unsigned FirstIndent;
/// \brief The line that is being formatted.
///
/// Does not need to be considered for memoization because it doesn't change.
const AnnotatedLine *Line;
/// \brief Comparison operator to be able to used \c LineState in \c map.
bool operator<(const LineState &Other) const {
if (NextToken != Other.NextToken)
return NextToken < Other.NextToken;
if (Column != Other.Column)
return Column < Other.Column;
if (LineContainsContinuedForLoopSection !=
Other.LineContainsContinuedForLoopSection)
return LineContainsContinuedForLoopSection;
if (StartOfLineLevel != Other.StartOfLineLevel)
return StartOfLineLevel < Other.StartOfLineLevel;
if (LowestLevelOnLine != Other.LowestLevelOnLine)
return LowestLevelOnLine < Other.LowestLevelOnLine;
if (StartOfStringLiteral != Other.StartOfStringLiteral)
return StartOfStringLiteral < Other.StartOfStringLiteral;
if (IgnoreStackForComparison || Other.IgnoreStackForComparison)
return false;
return Stack < Other.Stack;
}
};
} // end namespace format
} // end namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/UnwrappedLineFormatter.h | //===--- UnwrappedLineFormatter.h - Format C++ code -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Implements a combinartorial exploration of all the different
/// linebreaks unwrapped lines can be formatted in.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
#include "ContinuationIndenter.h"
#include "clang/Format/Format.h"
#include <map>
#include <queue>
#include <string>
namespace clang {
namespace format {
class ContinuationIndenter;
class WhitespaceManager;
class UnwrappedLineFormatter {
public:
UnwrappedLineFormatter(ContinuationIndenter *Indenter,
WhitespaceManager *Whitespaces,
const FormatStyle &Style,
const AdditionalKeywords &Keywords,
bool *IncompleteFormat)
: Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
Keywords(Keywords), IncompleteFormat(IncompleteFormat) {}
/// \brief Format the current block and return the penalty.
unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines,
bool DryRun = false, int AdditionalIndent = 0,
bool FixBadIndentation = false);
private:
/// \brief Add a new line and the required indent before the first Token
/// of the \c UnwrappedLine if there was no structural parsing error.
void formatFirstToken(FormatToken &RootToken,
const AnnotatedLine *PreviousLine, unsigned IndentLevel,
unsigned Indent, bool InPPDirective);
/// \brief Returns the column limit for a line, taking into account whether we
/// need an escaped newline due to a continued preprocessor directive.
unsigned getColumnLimit(bool InPPDirective,
const AnnotatedLine *NextLine) const;
// Cache to store the penalty of formatting a vector of AnnotatedLines
// starting from a specific additional offset. Improves performance if there
// are many nested blocks.
std::map<std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned>,
unsigned> PenaltyCache;
ContinuationIndenter *Indenter;
WhitespaceManager *Whitespaces;
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
bool *IncompleteFormat;
};
} // end namespace format
} // end namespace clang
#endif // LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEFORMATTER_H
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/FormatToken.cpp | //===--- FormatToken.cpp - Format C++ code --------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements specific functions of \c FormatTokens and their
/// roles.
///
//===----------------------------------------------------------------------===//
#include "FormatToken.h"
#include "ContinuationIndenter.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
#include <climits>
namespace clang {
namespace format {
const char *getTokenTypeName(TokenType Type) {
static const char *const TokNames[] = {
#define TYPE(X) #X,
LIST_TOKEN_TYPES
#undef TYPE
nullptr
};
if (Type < NUM_TOKEN_TYPES)
return TokNames[Type];
llvm_unreachable("unknown TokenType");
return nullptr;
}
// FIXME: This is copy&pasted from Sema. Put it in a common place and remove
// duplication.
bool FormatToken::isSimpleTypeSpecifier() const {
switch (Tok.getKind()) {
case tok::kw_short:
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_void:
case tok::kw_char:
case tok::kw_int:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
case tok::kw_wchar_t:
case tok::kw_bool:
case tok::kw___underlying_type:
case tok::annot_typename:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_typeof:
case tok::kw_decltype:
return true;
default:
return false;
}
}
TokenRole::~TokenRole() {}
void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
unsigned CommaSeparatedList::formatAfterToken(LineState &State,
ContinuationIndenter *Indenter,
bool DryRun) {
if (State.NextToken == nullptr || !State.NextToken->Previous)
return 0;
// Ensure that we start on the opening brace.
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->BlockKind == BK_Block ||
LBrace->Type == TT_DictLiteral ||
LBrace->Next->Type == TT_DesignatedInitializerPeriod)
return 0;
// Calculate the number of code points we have to format this list. As the
// first token is already placed, we have to subtract it.
unsigned RemainingCodePoints =
Style.ColumnLimit - State.Column + State.NextToken->Previous->ColumnWidth;
// Find the best ColumnFormat, i.e. the best number of columns to use.
const ColumnFormat *Format = getColumnFormat(RemainingCodePoints);
// If no ColumnFormat can be used, the braced list would generally be
// bin-packed. Add a severe penalty to this so that column layouts are
// preferred if possible.
if (!Format)
return 10000;
// Format the entire list.
unsigned Penalty = 0;
unsigned Column = 0;
unsigned Item = 0;
while (State.NextToken != LBrace->MatchingParen) {
bool NewLine = false;
unsigned ExtraSpaces = 0;
// If the previous token was one of our commas, we are now on the next item.
if (Item < Commas.size() && State.NextToken->Previous == Commas[Item]) {
if (!State.NextToken->isTrailingComment()) {
ExtraSpaces += Format->ColumnSizes[Column] - ItemLengths[Item];
++Column;
}
++Item;
}
if (Column == Format->Columns || State.NextToken->MustBreakBefore) {
Column = 0;
NewLine = true;
}
// Place token using the continuation indenter and store the penalty.
Penalty += Indenter->addTokenToState(State, NewLine, DryRun, ExtraSpaces);
}
return Penalty;
}
unsigned CommaSeparatedList::formatFromToken(LineState &State,
ContinuationIndenter *Indenter,
bool DryRun) {
if (HasNestedBracedList)
State.Stack.back().AvoidBinPacking = true;
return 0;
}
// Returns the lengths in code points between Begin and End (both included),
// assuming that the entire sequence is put on a single line.
static unsigned CodePointsBetween(const FormatToken *Begin,
const FormatToken *End) {
assert(End->TotalLength >= Begin->TotalLength);
return End->TotalLength - Begin->TotalLength + Begin->ColumnWidth;
}
void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// FIXME: At some point we might want to do this for other lists, too.
if (!Token->MatchingParen || Token->isNot(tok::l_brace))
return;
// In C++11 braced list style, we should not format in columns unless they
// have many items (20 or more) or we allow bin-packing of function call
// arguments.
if (Style.Cpp11BracedListStyle && !Style.BinPackArguments &&
Commas.size() < 19)
return;
// Column format doesn't really make sense if we don't align after brackets.
if (!Style.AlignAfterOpenBracket)
return;
FormatToken *ItemBegin = Token->Next;
while (ItemBegin->isTrailingComment())
ItemBegin = ItemBegin->Next;
SmallVector<bool, 8> MustBreakBeforeItem;
// The lengths of an item if it is put at the end of the line. This includes
// trailing comments which are otherwise ignored for column alignment.
SmallVector<unsigned, 8> EndOfLineItemLength;
bool HasSeparatingComment = false;
for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) {
// Skip comments on their own line.
while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment()) {
ItemBegin = ItemBegin->Next;
HasSeparatingComment = i > 0;
}
MustBreakBeforeItem.push_back(ItemBegin->MustBreakBefore);
if (ItemBegin->is(tok::l_brace))
HasNestedBracedList = true;
const FormatToken *ItemEnd = nullptr;
if (i == Commas.size()) {
ItemEnd = Token->MatchingParen;
const FormatToken *NonCommentEnd = ItemEnd->getPreviousNonComment();
ItemLengths.push_back(CodePointsBetween(ItemBegin, NonCommentEnd));
if (Style.Cpp11BracedListStyle) {
// In Cpp11 braced list style, the } and possibly other subsequent
// tokens will need to stay on a line with the last element.
while (ItemEnd->Next && !ItemEnd->Next->CanBreakBefore)
ItemEnd = ItemEnd->Next;
} else {
// In other braced lists styles, the "}" can be wrapped to the new line.
ItemEnd = Token->MatchingParen->Previous;
}
} else {
ItemEnd = Commas[i];
// The comma is counted as part of the item when calculating the length.
ItemLengths.push_back(CodePointsBetween(ItemBegin, ItemEnd));
// Consume trailing comments so the are included in EndOfLineItemLength.
if (ItemEnd->Next && !ItemEnd->Next->HasUnescapedNewline &&
ItemEnd->Next->isTrailingComment())
ItemEnd = ItemEnd->Next;
}
EndOfLineItemLength.push_back(CodePointsBetween(ItemBegin, ItemEnd));
// If there is a trailing comma in the list, the next item will start at the
// closing brace. Don't create an extra item for this.
if (ItemEnd->getNextNonComment() == Token->MatchingParen)
break;
ItemBegin = ItemEnd->Next;
}
// Don't use column layout for nested lists, lists with few elements and in
// presence of separating comments.
if (Token->NestingLevel != 0 || Commas.size() < 5 || HasSeparatingComment)
return;
// We can never place more than ColumnLimit / 3 items in a row (because of the
// spaces and the comma).
unsigned MaxItems = Style.ColumnLimit / 3;
std::vector<unsigned> MinSizeInColumn;
MinSizeInColumn.reserve(MaxItems);
for (unsigned Columns = 1; Columns <= MaxItems; ++Columns) {
ColumnFormat Format;
Format.Columns = Columns;
Format.ColumnSizes.resize(Columns);
MinSizeInColumn.assign(Columns, UINT_MAX);
Format.LineCount = 1;
bool HasRowWithSufficientColumns = false;
unsigned Column = 0;
for (unsigned i = 0, e = ItemLengths.size(); i != e; ++i) {
assert(i < MustBreakBeforeItem.size());
if (MustBreakBeforeItem[i] || Column == Columns) {
++Format.LineCount;
Column = 0;
}
if (Column == Columns - 1)
HasRowWithSufficientColumns = true;
unsigned Length =
(Column == Columns - 1) ? EndOfLineItemLength[i] : ItemLengths[i];
Format.ColumnSizes[Column] = std::max(Format.ColumnSizes[Column], Length);
MinSizeInColumn[Column] = std::min(MinSizeInColumn[Column], Length);
++Column;
}
// If all rows are terminated early (e.g. by trailing comments), we don't
// need to look further.
if (!HasRowWithSufficientColumns)
break;
Format.TotalWidth = Columns - 1; // Width of the N-1 spaces.
for (unsigned i = 0; i < Columns; ++i)
Format.TotalWidth += Format.ColumnSizes[i];
// Don't use this Format, if the difference between the longest and shortest
// element in a column exceeds a threshold to avoid excessive spaces.
if ([&] {
for (unsigned i = 0; i < Columns - 1; ++i)
if (Format.ColumnSizes[i] - MinSizeInColumn[i] > 10)
return true;
return false;
}())
continue;
// Ignore layouts that are bound to violate the column limit.
if (Format.TotalWidth > Style.ColumnLimit)
continue;
Formats.push_back(Format);
}
}
const CommaSeparatedList::ColumnFormat *
CommaSeparatedList::getColumnFormat(unsigned RemainingCharacters) const {
const ColumnFormat *BestFormat = nullptr;
for (SmallVector<ColumnFormat, 4>::const_reverse_iterator
I = Formats.rbegin(),
E = Formats.rend();
I != E; ++I) {
if (I->TotalWidth <= RemainingCharacters) {
if (BestFormat && I->LineCount > BestFormat->LineCount)
break;
BestFormat = &*I;
}
}
return BestFormat;
}
} // namespace format
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/Encoding.h | //===--- Encoding.h - Format C++ code -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Contains functions for text encoding manipulation. Supports UTF-8,
/// 8-bit encodings and escape sequences in C++ string literals.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_ENCODING_H
#define LLVM_CLANG_LIB_FORMAT_ENCODING_H
#include "clang/Basic/LLVM.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Unicode.h"
namespace clang {
namespace format {
namespace encoding {
enum Encoding {
Encoding_UTF8,
Encoding_Unknown // We treat all other encodings as 8-bit encodings.
};
/// \brief Detects encoding of the Text. If the Text can be decoded using UTF-8,
/// it is considered UTF8, otherwise we treat it as some 8-bit encoding.
inline Encoding detectEncoding(StringRef Text) {
const UTF8 *Ptr = reinterpret_cast<const UTF8 *>(Text.begin());
const UTF8 *BufEnd = reinterpret_cast<const UTF8 *>(Text.end());
if (::isLegalUTF8String(&Ptr, BufEnd))
return Encoding_UTF8;
return Encoding_Unknown;
}
inline unsigned getCodePointCountUTF8(StringRef Text) {
unsigned CodePoints = 0;
for (size_t i = 0, e = Text.size(); i < e; i += getNumBytesForUTF8(Text[i])) {
++CodePoints;
}
return CodePoints;
}
/// \brief Gets the number of code points in the Text using the specified
/// Encoding.
inline unsigned getCodePointCount(StringRef Text, Encoding Encoding) {
switch (Encoding) {
case Encoding_UTF8:
return getCodePointCountUTF8(Text);
default:
return Text.size();
}
}
/// \brief Returns the number of columns required to display the \p Text on a
/// generic Unicode-capable terminal. Text is assumed to use the specified
/// \p Encoding.
inline unsigned columnWidth(StringRef Text, Encoding Encoding) {
if (Encoding == Encoding_UTF8) {
int ContentWidth = llvm::sys::unicode::columnWidthUTF8(Text);
// FIXME: Figure out the correct way to handle this in the presence of both
// printable and unprintable multi-byte UTF-8 characters. Falling back to
// returning the number of bytes may cause problems, as columnWidth suddenly
// becomes non-additive.
if (ContentWidth >= 0)
return ContentWidth;
}
return Text.size();
}
/// \brief Returns the number of columns required to display the \p Text,
/// starting from the \p StartColumn on a terminal with the \p TabWidth. The
/// text is assumed to use the specified \p Encoding.
inline unsigned columnWidthWithTabs(StringRef Text, unsigned StartColumn,
unsigned TabWidth, Encoding Encoding) {
unsigned TotalWidth = 0;
StringRef Tail = Text;
for (;;) {
StringRef::size_type TabPos = Tail.find('\t');
if (TabPos == StringRef::npos)
return TotalWidth + columnWidth(Tail, Encoding);
TotalWidth += columnWidth(Tail.substr(0, TabPos), Encoding);
TotalWidth += TabWidth - (TotalWidth + StartColumn) % TabWidth;
Tail = Tail.substr(TabPos + 1);
}
}
/// \brief Gets the number of bytes in a sequence representing a single
/// codepoint and starting with FirstChar in the specified Encoding.
inline unsigned getCodePointNumBytes(char FirstChar, Encoding Encoding) {
switch (Encoding) {
case Encoding_UTF8:
return getNumBytesForUTF8(FirstChar);
default:
return 1;
}
}
inline bool isOctDigit(char c) { return '0' <= c && c <= '7'; }
inline bool isHexDigit(char c) {
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') ||
('A' <= c && c <= 'F');
}
/// \brief Gets the length of an escape sequence inside a C++ string literal.
/// Text should span from the beginning of the escape sequence (starting with a
/// backslash) to the end of the string literal.
inline unsigned getEscapeSequenceLength(StringRef Text) {
assert(Text[0] == '\\');
if (Text.size() < 2)
return 1;
switch (Text[1]) {
case 'u':
return 6;
case 'U':
return 10;
case 'x': {
unsigned I = 2; // Point after '\x'.
while (I < Text.size() && isHexDigit(Text[I]))
++I;
return I;
}
default:
if (isOctDigit(Text[1])) {
unsigned I = 1;
while (I < Text.size() && I < 4 && isOctDigit(Text[I]))
++I;
return I;
}
return 2;
}
}
} // namespace encoding
} // namespace format
} // namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/WhitespaceManager.h | //===--- WhitespaceManager.h - Format C++ code ------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief WhitespaceManager class manages whitespace around tokens and their
/// replacements.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_WHITESPACEMANAGER_H
#define LLVM_CLANG_LIB_FORMAT_WHITESPACEMANAGER_H
#include "TokenAnnotator.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include <string>
namespace clang {
namespace format {
/// \brief Manages the whitespaces around tokens and their replacements.
///
/// This includes special handling for certain constructs, e.g. the alignment of
/// trailing line comments.
///
/// To guarantee correctness of alignment operations, the \c WhitespaceManager
/// must be informed about every token in the source file; for each token, there
/// must be exactly one call to either \c replaceWhitespace or
/// \c addUntouchableToken.
///
/// There may be multiple calls to \c breakToken for a given token.
class WhitespaceManager {
public:
WhitespaceManager(SourceManager &SourceMgr, const FormatStyle &Style,
bool UseCRLF)
: SourceMgr(SourceMgr), Style(Style), UseCRLF(UseCRLF) {}
/// \brief Prepares the \c WhitespaceManager for another run.
void reset();
/// \brief Replaces the whitespace in front of \p Tok. Only call once for
/// each \c AnnotatedToken.
void replaceWhitespace(FormatToken &Tok, unsigned Newlines,
unsigned IndentLevel, unsigned Spaces,
unsigned StartOfTokenColumn,
bool InPPDirective = false);
/// \brief Adds information about an unchangeable token's whitespace.
///
/// Needs to be called for every token for which \c replaceWhitespace
/// was not called.
void addUntouchableToken(const FormatToken &Tok, bool InPPDirective);
/// \brief Inserts or replaces whitespace in the middle of a token.
///
/// Inserts \p PreviousPostfix, \p Newlines, \p Spaces and \p CurrentPrefix
/// (in this order) at \p Offset inside \p Tok, replacing \p ReplaceChars
/// characters.
///
/// Note: \p Spaces can be negative to retain information about initial
/// relative column offset between a line of a block comment and the start of
/// the comment. This negative offset may be compensated by trailing comment
/// alignment here. In all other cases negative \p Spaces will be truncated to
/// 0.
///
/// When \p InPPDirective is true, escaped newlines are inserted. \p Spaces is
/// used to align backslashes correctly.
void replaceWhitespaceInToken(const FormatToken &Tok, unsigned Offset,
unsigned ReplaceChars,
StringRef PreviousPostfix,
StringRef CurrentPrefix, bool InPPDirective,
unsigned Newlines, unsigned IndentLevel,
int Spaces);
/// \brief Returns all the \c Replacements created during formatting.
const tooling::Replacements &generateReplacements();
private:
/// \brief Represents a change before a token, a break inside a token,
/// or the layout of an unchanged token (or whitespace within).
struct Change {
/// \brief Functor to sort changes in original source order.
class IsBeforeInFile {
public:
IsBeforeInFile(const SourceManager &SourceMgr) : SourceMgr(SourceMgr) {}
bool operator()(const Change &C1, const Change &C2) const;
private:
const SourceManager &SourceMgr;
};
Change() {}
/// \brief Creates a \c Change.
///
/// The generated \c Change will replace the characters at
/// \p OriginalWhitespaceRange with a concatenation of
/// \p PreviousLinePostfix, \p NewlinesBefore line breaks, \p Spaces spaces
/// and \p CurrentLinePrefix.
///
/// \p StartOfTokenColumn and \p InPPDirective will be used to lay out
/// trailing comments and escaped newlines.
Change(bool CreateReplacement, const SourceRange &OriginalWhitespaceRange,
unsigned IndentLevel, int Spaces, unsigned StartOfTokenColumn,
unsigned NewlinesBefore, StringRef PreviousLinePostfix,
StringRef CurrentLinePrefix, tok::TokenKind Kind,
bool ContinuesPPDirective);
bool CreateReplacement;
// Changes might be in the middle of a token, so we cannot just keep the
// FormatToken around to query its information.
SourceRange OriginalWhitespaceRange;
unsigned StartOfTokenColumn;
unsigned NewlinesBefore;
std::string PreviousLinePostfix;
std::string CurrentLinePrefix;
// The kind of the token whose whitespace this change replaces, or in which
// this change inserts whitespace.
// FIXME: Currently this is not set correctly for breaks inside comments, as
// the \c BreakableToken is still doing its own alignment.
tok::TokenKind Kind;
bool ContinuesPPDirective;
// The number of nested blocks the token is in. This is used to add tabs
// only for the indentation, and not for alignment, when
// UseTab = US_ForIndentation.
unsigned IndentLevel;
// The number of spaces in front of the token or broken part of the token.
// This will be adapted when aligning tokens.
// Can be negative to retain information about the initial relative offset
// of the lines in a block comment. This is used when aligning trailing
// comments. Uncompensated negative offset is truncated to 0.
int Spaces;
// \c IsTrailingComment, \c TokenLength, \c PreviousEndOfTokenColumn and
// \c EscapedNewlineColumn will be calculated in
// \c calculateLineBreakInformation.
bool IsTrailingComment;
unsigned TokenLength;
unsigned PreviousEndOfTokenColumn;
unsigned EscapedNewlineColumn;
// These fields are used to retain correct relative line indentation in a
// block comment when aligning trailing comments.
//
// If this Change represents a continuation of a block comment,
// \c StartOfBlockComment is pointer to the first Change in the block
// comment. \c IndentationOffset is a relative column offset to this
// change, so that the correct column can be reconstructed at the end of
// the alignment process.
const Change *StartOfBlockComment;
int IndentationOffset;
};
/// \brief Calculate \c IsTrailingComment, \c TokenLength for the last tokens
/// or token parts in a line and \c PreviousEndOfTokenColumn and
/// \c EscapedNewlineColumn for the first tokens or token parts in a line.
void calculateLineBreakInformation();
/// \brief Align consecutive assignments over all \c Changes.
void alignConsecutiveAssignments();
/// \brief Align consecutive assignments from change \p Start to change \p End
/// at
/// the specified \p Column.
void alignConsecutiveAssignments(unsigned Start, unsigned End,
unsigned Column);
/// \brief Align trailing comments over all \c Changes.
void alignTrailingComments();
/// \brief Align trailing comments from change \p Start to change \p End at
/// the specified \p Column.
void alignTrailingComments(unsigned Start, unsigned End, unsigned Column);
/// \brief Align escaped newlines over all \c Changes.
void alignEscapedNewlines();
/// \brief Align escaped newlines from change \p Start to change \p End at
/// the specified \p Column.
void alignEscapedNewlines(unsigned Start, unsigned End, unsigned Column);
/// \brief Fill \c Replaces with the replacements for all effective changes.
void generateChanges();
/// \brief Stores \p Text as the replacement for the whitespace in \p Range.
void storeReplacement(const SourceRange &Range, StringRef Text);
void appendNewlineText(std::string &Text, unsigned Newlines);
void appendNewlineText(std::string &Text, unsigned Newlines,
unsigned PreviousEndOfTokenColumn,
unsigned EscapedNewlineColumn);
void appendIndentText(std::string &Text, unsigned IndentLevel,
unsigned Spaces, unsigned WhitespaceStartColumn);
SmallVector<Change, 16> Changes;
SourceManager &SourceMgr;
tooling::Replacements Replaces;
const FormatStyle &Style;
bool UseCRLF;
};
} // namespace format
} // namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/FormatToken.h | //===--- FormatToken.h - Format C++ code ------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file contains the declaration of the FormatToken, a wrapper
/// around Token with additional information related to formatting.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_FORMAT_FORMATTOKEN_H
#define LLVM_CLANG_LIB_FORMAT_FORMATTOKEN_H
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Format/Format.h"
#include "clang/Lex/Lexer.h"
#include <memory>
namespace clang {
namespace format {
#define LIST_TOKEN_TYPES \
TYPE(ArrayInitializerLSquare) \
TYPE(ArraySubscriptLSquare) \
TYPE(AttributeParen) \
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
TYPE(CastRParen) \
TYPE(ConditionalExpr) \
TYPE(ConflictAlternative) \
TYPE(ConflictEnd) \
TYPE(ConflictStart) \
TYPE(CtorInitializerColon) \
TYPE(CtorInitializerComma) \
TYPE(DesignatedInitializerPeriod) \
TYPE(DictLiteral) \
TYPE(ForEachMacro) \
TYPE(FunctionAnnotationRParen) \
TYPE(FunctionDeclarationName) \
TYPE(FunctionLBrace) \
TYPE(FunctionTypeLParen) \
TYPE(ImplicitStringLiteral) \
TYPE(InheritanceColon) \
TYPE(InlineASMBrace) \
TYPE(InlineASMColon) \
TYPE(JavaAnnotation) \
TYPE(JsComputedPropertyName) \
TYPE(JsFatArrow) \
TYPE(JsTypeColon) \
TYPE(JsTypeOptionalQuestion) \
TYPE(LambdaArrow) \
TYPE(LambdaLSquare) \
TYPE(LeadingJavaAnnotation) \
TYPE(LineComment) \
TYPE(MacroBlockBegin) \
TYPE(MacroBlockEnd) \
TYPE(ObjCBlockLBrace) \
TYPE(ObjCBlockLParen) \
TYPE(ObjCDecl) \
TYPE(ObjCForIn) \
TYPE(ObjCMethodExpr) \
TYPE(ObjCMethodSpecifier) \
TYPE(ObjCProperty) \
TYPE(ObjCStringLiteral) \
TYPE(OverloadedOperator) \
TYPE(OverloadedOperatorLParen) \
TYPE(PointerOrReference) \
TYPE(PureVirtualSpecifier) \
TYPE(RangeBasedForLoopColon) \
TYPE(RegexLiteral) \
TYPE(SelectorName) \
TYPE(StartOfName) \
TYPE(TemplateCloser) \
TYPE(TemplateOpener) \
TYPE(TemplateString) \
TYPE(TrailingAnnotation) \
TYPE(TrailingReturnArrow) \
TYPE(TrailingUnaryOperator) \
TYPE(UnaryOperator) \
TYPE(Unknown)
enum TokenType {
#define TYPE(X) TT_##X,
LIST_TOKEN_TYPES
#undef TYPE
NUM_TOKEN_TYPES
};
/// \brief Determines the name of a token type.
const char *getTokenTypeName(TokenType Type);
// Represents what type of block a set of braces open.
enum BraceBlockKind { BK_Unknown, BK_Block, BK_BracedInit };
// The packing kind of a function's parameters.
enum ParameterPackingKind { PPK_BinPacked, PPK_OnePerLine, PPK_Inconclusive };
enum FormatDecision { FD_Unformatted, FD_Continue, FD_Break };
class TokenRole;
class AnnotatedLine;
/// \brief A wrapper around a \c Token storing information about the
/// whitespace characters preceding it.
struct FormatToken {
FormatToken() {}
/// \brief The \c Token.
Token Tok;
/// \brief The number of newlines immediately before the \c Token.
///
/// This can be used to determine what the user wrote in the original code
/// and thereby e.g. leave an empty line between two function definitions.
unsigned NewlinesBefore = 0;
/// \brief Whether there is at least one unescaped newline before the \c
/// Token.
bool HasUnescapedNewline = false;
/// \brief The range of the whitespace immediately preceding the \c Token.
SourceRange WhitespaceRange;
/// \brief The offset just past the last '\n' in this token's leading
/// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'.
unsigned LastNewlineOffset = 0;
/// \brief The width of the non-whitespace parts of the token (or its first
/// line for multi-line tokens) in columns.
/// We need this to correctly measure number of columns a token spans.
unsigned ColumnWidth = 0;
/// \brief Contains the width in columns of the last line of a multi-line
/// token.
unsigned LastLineColumnWidth = 0;
/// \brief Whether the token text contains newlines (escaped or not).
bool IsMultiline = false;
/// \brief Indicates that this is the first token.
bool IsFirst = false;
/// \brief Whether there must be a line break before this token.
///
/// This happens for example when a preprocessor directive ended directly
/// before the token.
bool MustBreakBefore = false;
/// \brief The raw text of the token.
///
/// Contains the raw token text without leading whitespace and without leading
/// escaped newlines.
StringRef TokenText;
/// \brief Set to \c true if this token is an unterminated literal.
bool IsUnterminatedLiteral = 0;
/// \brief Contains the kind of block if this token is a brace.
BraceBlockKind BlockKind = BK_Unknown;
TokenType Type = TT_Unknown;
/// \brief The number of spaces that should be inserted before this token.
unsigned SpacesRequiredBefore = 0;
/// \brief \c true if it is allowed to break before this token.
bool CanBreakBefore = false;
/// \brief \c true if this is the ">" of "template<..>".
bool ClosesTemplateDeclaration = false;
/// \brief Number of parameters, if this is "(", "[" or "<".
///
/// This is initialized to 1 as we don't need to distinguish functions with
/// 0 parameters from functions with 1 parameter. Thus, we can simply count
/// the number of commas.
unsigned ParameterCount = 0;
/// \brief Number of parameters that are nested blocks,
/// if this is "(", "[" or "<".
unsigned BlockParameterCount = 0;
/// \brief If this is a bracket ("<", "(", "[" or "{"), contains the kind of
/// the surrounding bracket.
tok::TokenKind ParentBracket = tok::unknown;
/// \brief A token can have a special role that can carry extra information
/// about the token's formatting.
std::unique_ptr<TokenRole> Role;
/// \brief If this is an opening parenthesis, how are the parameters packed?
ParameterPackingKind PackingKind = PPK_Inconclusive;
/// \brief The total length of the unwrapped line up to and including this
/// token.
unsigned TotalLength = 0;
/// \brief The original 0-based column of this token, including expanded tabs.
/// The configured TabWidth is used as tab width.
unsigned OriginalColumn = 0;
/// \brief The length of following tokens until the next natural split point,
/// or the next token that can be broken.
unsigned UnbreakableTailLength = 0;
// FIXME: Come up with a 'cleaner' concept.
/// \brief The binding strength of a token. This is a combined value of
/// operator precedence, parenthesis nesting, etc.
unsigned BindingStrength = 0;
/// \brief The nesting level of this token, i.e. the number of surrounding (),
/// [], {} or <>.
unsigned NestingLevel = 0;
/// \brief Penalty for inserting a line break before this token.
unsigned SplitPenalty = 0;
/// \brief If this is the first ObjC selector name in an ObjC method
/// definition or call, this contains the length of the longest name.
///
/// This being set to 0 means that the selectors should not be colon-aligned,
/// e.g. because several of them are block-type.
unsigned LongestObjCSelectorName = 0;
/// \brief Stores the number of required fake parentheses and the
/// corresponding operator precedence.
///
/// If multiple fake parentheses start at a token, this vector stores them in
/// reverse order, i.e. inner fake parenthesis first.
SmallVector<prec::Level, 4> FakeLParens;
/// \brief Insert this many fake ) after this token for correct indentation.
unsigned FakeRParens = 0;
/// \brief \c true if this token starts a binary expression, i.e. has at least
/// one fake l_paren with a precedence greater than prec::Unknown.
bool StartsBinaryExpression = false;
/// \brief \c true if this token ends a binary expression.
bool EndsBinaryExpression = false;
/// \brief Is this is an operator (or "."/"->") in a sequence of operators
/// with the same precedence, contains the 0-based operator index.
unsigned OperatorIndex = 0;
/// \brief Is this the last operator (or "."/"->") in a sequence of operators
/// with the same precedence?
bool LastOperator = false;
/// \brief Is this token part of a \c DeclStmt defining multiple variables?
///
/// Only set if \c Type == \c TT_StartOfName.
bool PartOfMultiVariableDeclStmt = false;
/// \brief If this is a bracket, this points to the matching one.
FormatToken *MatchingParen = nullptr;
/// \brief The previous token in the unwrapped line.
FormatToken *Previous = nullptr;
/// \brief The next token in the unwrapped line.
FormatToken *Next = nullptr;
/// \brief If this token starts a block, this contains all the unwrapped lines
/// in it.
SmallVector<AnnotatedLine *, 1> Children;
/// \brief Stores the formatting decision for the token once it was made.
FormatDecision Decision = FD_Unformatted;
/// \brief If \c true, this token has been fully formatted (indented and
/// potentially re-formatted inside), and we do not allow further formatting
/// changes.
bool Finalized = false;
bool is(tok::TokenKind Kind) const { return Tok.is(Kind); }
bool is(TokenType TT) const { return Type == TT; }
bool is(const IdentifierInfo *II) const {
return II && II == Tok.getIdentifierInfo();
}
template <typename A, typename B> bool isOneOf(A K1, B K2) const {
return is(K1) || is(K2);
}
template <typename A, typename B, typename... Ts>
bool isOneOf(A K1, B K2, Ts... Ks) const {
return is(K1) || isOneOf(K2, Ks...);
}
template <typename T> bool isNot(T Kind) const { return !is(Kind); }
bool isStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); }
bool isObjCAtKeyword(tok::ObjCKeywordKind Kind) const {
return Tok.isObjCAtKeyword(Kind);
}
bool isAccessSpecifier(bool ColonRequired = true) const {
return isOneOf(tok::kw_public, tok::kw_protected, tok::kw_private) &&
(!ColonRequired || (Next && Next->is(tok::colon)));
}
/// \brief Determine whether the token is a simple-type-specifier.
bool isSimpleTypeSpecifier() const;
bool isObjCAccessSpecifier() const {
return is(tok::at) && Next && (Next->isObjCAtKeyword(tok::objc_public) ||
Next->isObjCAtKeyword(tok::objc_protected) ||
Next->isObjCAtKeyword(tok::objc_package) ||
Next->isObjCAtKeyword(tok::objc_private));
}
/// \brief Returns whether \p Tok is ([{ or a template opening <.
bool opensScope() const {
return isOneOf(tok::l_paren, tok::l_brace, tok::l_square,
TT_TemplateOpener);
}
/// \brief Returns whether \p Tok is )]} or a template closing >.
bool closesScope() const {
return isOneOf(tok::r_paren, tok::r_brace, tok::r_square,
TT_TemplateCloser);
}
/// \brief Returns \c true if this is a "." or "->" accessing a member.
bool isMemberAccess() const {
return isOneOf(tok::arrow, tok::period, tok::arrowstar) &&
!isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow,
TT_LambdaArrow);
}
bool isUnaryOperator() const {
switch (Tok.getKind()) {
case tok::plus:
case tok::plusplus:
case tok::minus:
case tok::minusminus:
case tok::exclaim:
case tok::tilde:
case tok::kw_sizeof:
case tok::kw_alignof:
return true;
default:
return false;
}
}
bool isBinaryOperator() const {
// Comma is a binary operator, but does not behave as such wrt. formatting.
return getPrecedence() > prec::Comma;
}
bool isTrailingComment() const {
return is(tok::comment) &&
(is(TT_LineComment) || !Next || Next->NewlinesBefore > 0);
}
/// \brief Returns \c true if this is a keyword that can be used
/// like a function call (e.g. sizeof, typeid, ...).
bool isFunctionLikeKeyword() const {
switch (Tok.getKind()) {
case tok::kw_throw:
case tok::kw_typeid:
case tok::kw_return:
case tok::kw_sizeof:
case tok::kw_alignof:
case tok::kw_alignas:
case tok::kw_decltype:
case tok::kw_noexcept:
case tok::kw_static_assert:
case tok::kw___attribute:
return true;
default:
return false;
}
}
/// \brief Returns actual token start location without leading escaped
/// newlines and whitespace.
///
/// This can be different to Tok.getLocation(), which includes leading escaped
/// newlines.
SourceLocation getStartOfNonWhitespace() const {
return WhitespaceRange.getEnd();
}
prec::Level getPrecedence() const {
return getBinOpPrecedence(Tok.getKind(), true, true);
}
/// \brief Returns the previous token ignoring comments.
FormatToken *getPreviousNonComment() const {
FormatToken *Tok = Previous;
while (Tok && Tok->is(tok::comment))
Tok = Tok->Previous;
return Tok;
}
/// \brief Returns the next token ignoring comments.
const FormatToken *getNextNonComment() const {
const FormatToken *Tok = Next;
while (Tok && Tok->is(tok::comment))
Tok = Tok->Next;
return Tok;
}
/// \brief Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
bool opensBlockTypeList(const FormatStyle &Style) const {
return is(TT_ArrayInitializerLSquare) ||
(is(tok::l_brace) &&
(BlockKind == BK_Block || is(TT_DictLiteral) ||
(!Style.Cpp11BracedListStyle && NestingLevel == 0)));
}
/// \brief Same as opensBlockTypeList, but for the closing token.
bool closesBlockTypeList(const FormatStyle &Style) const {
return MatchingParen && MatchingParen->opensBlockTypeList(Style);
}
private:
// Disallow copying.
FormatToken(const FormatToken &) = delete;
void operator=(const FormatToken &) = delete;
};
class ContinuationIndenter;
struct LineState;
class TokenRole {
public:
TokenRole(const FormatStyle &Style) : Style(Style) {}
virtual ~TokenRole();
/// \brief After the \c TokenAnnotator has finished annotating all the tokens,
/// this function precomputes required information for formatting.
virtual void precomputeFormattingInfos(const FormatToken *Token);
/// \brief Apply the special formatting that the given role demands.
///
/// Assumes that the token having this role is already formatted.
///
/// Continues formatting from \p State leaving indentation to \p Indenter and
/// returns the total penalty that this formatting incurs.
virtual unsigned formatFromToken(LineState &State,
ContinuationIndenter *Indenter,
bool DryRun) {
return 0;
}
/// \brief Same as \c formatFromToken, but assumes that the first token has
/// already been set thereby deciding on the first line break.
virtual unsigned formatAfterToken(LineState &State,
ContinuationIndenter *Indenter,
bool DryRun) {
return 0;
}
/// \brief Notifies the \c Role that a comma was found.
virtual void CommaFound(const FormatToken *Token) {}
protected:
const FormatStyle &Style;
};
class CommaSeparatedList : public TokenRole {
public:
CommaSeparatedList(const FormatStyle &Style)
: TokenRole(Style), HasNestedBracedList(false) {}
void precomputeFormattingInfos(const FormatToken *Token) override;
unsigned formatAfterToken(LineState &State, ContinuationIndenter *Indenter,
bool DryRun) override;
unsigned formatFromToken(LineState &State, ContinuationIndenter *Indenter,
bool DryRun) override;
/// \brief Adds \p Token as the next comma to the \c CommaSeparated list.
void CommaFound(const FormatToken *Token) override {
Commas.push_back(Token);
}
private:
/// \brief A struct that holds information on how to format a given list with
/// a specific number of columns.
struct ColumnFormat {
/// \brief The number of columns to use.
unsigned Columns;
/// \brief The total width in characters.
unsigned TotalWidth;
/// \brief The number of lines required for this format.
unsigned LineCount;
/// \brief The size of each column in characters.
SmallVector<unsigned, 8> ColumnSizes;
};
/// \brief Calculate which \c ColumnFormat fits best into
/// \p RemainingCharacters.
const ColumnFormat *getColumnFormat(unsigned RemainingCharacters) const;
/// \brief The ordered \c FormatTokens making up the commas of this list.
SmallVector<const FormatToken *, 8> Commas;
/// \brief The length of each of the list's items in characters including the
/// trailing comma.
SmallVector<unsigned, 8> ItemLengths;
/// \brief Precomputed formats that can be used for this list.
SmallVector<ColumnFormat, 4> Formats;
bool HasNestedBracedList;
};
/// \brief Encapsulates keywords that are context sensitive or for languages not
/// properly supported by Clang's lexer.
struct AdditionalKeywords {
AdditionalKeywords(IdentifierTable &IdentTable) {
kw_in = &IdentTable.get("in");
kw_CF_ENUM = &IdentTable.get("CF_ENUM");
kw_CF_OPTIONS = &IdentTable.get("CF_OPTIONS");
kw_NS_ENUM = &IdentTable.get("NS_ENUM");
kw_NS_OPTIONS = &IdentTable.get("NS_OPTIONS");
kw_finally = &IdentTable.get("finally");
kw_function = &IdentTable.get("function");
kw_import = &IdentTable.get("import");
kw_var = &IdentTable.get("var");
kw_abstract = &IdentTable.get("abstract");
kw_extends = &IdentTable.get("extends");
kw_final = &IdentTable.get("final");
kw_implements = &IdentTable.get("implements");
kw_instanceof = &IdentTable.get("instanceof");
kw_interface = &IdentTable.get("interface");
kw_native = &IdentTable.get("native");
kw_package = &IdentTable.get("package");
kw_synchronized = &IdentTable.get("synchronized");
kw_throws = &IdentTable.get("throws");
kw___except = &IdentTable.get("__except");
kw_mark = &IdentTable.get("mark");
kw_option = &IdentTable.get("option");
kw_optional = &IdentTable.get("optional");
kw_repeated = &IdentTable.get("repeated");
kw_required = &IdentTable.get("required");
kw_returns = &IdentTable.get("returns");
kw_signals = &IdentTable.get("signals");
kw_slots = &IdentTable.get("slots");
kw_qslots = &IdentTable.get("Q_SLOTS");
}
// Context sensitive keywords.
IdentifierInfo *kw_in;
IdentifierInfo *kw_CF_ENUM;
IdentifierInfo *kw_CF_OPTIONS;
IdentifierInfo *kw_NS_ENUM;
IdentifierInfo *kw_NS_OPTIONS;
IdentifierInfo *kw___except;
// JavaScript keywords.
IdentifierInfo *kw_finally;
IdentifierInfo *kw_function;
IdentifierInfo *kw_import;
IdentifierInfo *kw_var;
// Java keywords.
IdentifierInfo *kw_abstract;
IdentifierInfo *kw_extends;
IdentifierInfo *kw_final;
IdentifierInfo *kw_implements;
IdentifierInfo *kw_instanceof;
IdentifierInfo *kw_interface;
IdentifierInfo *kw_native;
IdentifierInfo *kw_package;
IdentifierInfo *kw_synchronized;
IdentifierInfo *kw_throws;
// Pragma keywords.
IdentifierInfo *kw_mark;
// Proto keywords.
IdentifierInfo *kw_option;
IdentifierInfo *kw_optional;
IdentifierInfo *kw_repeated;
IdentifierInfo *kw_required;
IdentifierInfo *kw_returns;
// QT keywords.
IdentifierInfo *kw_signals;
IdentifierInfo *kw_slots;
IdentifierInfo *kw_qslots;
};
} // namespace format
} // namespace clang
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/Format.cpp | //===--- Format.cpp - Format C++ code -------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief This file implements functions declared in Format.h. This will be
/// split into separate files as we go.
///
//===----------------------------------------------------------------------===//
#include "ContinuationIndenter.h"
#include "TokenAnnotator.h"
#include "UnwrappedLineFormatter.h"
#include "UnwrappedLineParser.h"
#include "WhitespaceManager.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/YAMLTraits.h"
#include <queue>
#include <string>
#define DEBUG_TYPE "format-formatter"
using clang::format::FormatStyle;
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(std::string)
namespace llvm {
namespace yaml {
template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
static void enumeration(IO &IO, FormatStyle::LanguageKind &Value) {
IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
IO.enumCase(Value, "Java", FormatStyle::LK_Java);
IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
}
};
template <> struct ScalarEnumerationTraits<FormatStyle::LanguageStandard> {
static void enumeration(IO &IO, FormatStyle::LanguageStandard &Value) {
IO.enumCase(Value, "Cpp03", FormatStyle::LS_Cpp03);
IO.enumCase(Value, "C++03", FormatStyle::LS_Cpp03);
IO.enumCase(Value, "Cpp11", FormatStyle::LS_Cpp11);
IO.enumCase(Value, "C++11", FormatStyle::LS_Cpp11);
IO.enumCase(Value, "Auto", FormatStyle::LS_Auto);
}
};
template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
static void enumeration(IO &IO, FormatStyle::UseTabStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::UT_Never);
IO.enumCase(Value, "false", FormatStyle::UT_Never);
IO.enumCase(Value, "Always", FormatStyle::UT_Always);
IO.enumCase(Value, "true", FormatStyle::UT_Always);
IO.enumCase(Value, "ForIndentation", FormatStyle::UT_ForIndentation);
}
};
template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
static void enumeration(IO &IO, FormatStyle::ShortFunctionStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::SFS_None);
IO.enumCase(Value, "false", FormatStyle::SFS_None);
IO.enumCase(Value, "All", FormatStyle::SFS_All);
IO.enumCase(Value, "true", FormatStyle::SFS_All);
IO.enumCase(Value, "Inline", FormatStyle::SFS_Inline);
IO.enumCase(Value, "Empty", FormatStyle::SFS_Empty);
}
};
template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
IO.enumCase(Value, "All", FormatStyle::BOS_All);
IO.enumCase(Value, "true", FormatStyle::BOS_All);
IO.enumCase(Value, "None", FormatStyle::BOS_None);
IO.enumCase(Value, "false", FormatStyle::BOS_None);
IO.enumCase(Value, "NonAssignment", FormatStyle::BOS_NonAssignment);
}
};
template <> struct ScalarEnumerationTraits<FormatStyle::BraceBreakingStyle> {
static void enumeration(IO &IO, FormatStyle::BraceBreakingStyle &Value) {
IO.enumCase(Value, "Attach", FormatStyle::BS_Attach);
IO.enumCase(Value, "Linux", FormatStyle::BS_Linux);
IO.enumCase(Value, "Mozilla", FormatStyle::BS_Mozilla);
IO.enumCase(Value, "Stroustrup", FormatStyle::BS_Stroustrup);
IO.enumCase(Value, "Allman", FormatStyle::BS_Allman);
IO.enumCase(Value, "GNU", FormatStyle::BS_GNU);
}
};
template <> struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
static void enumeration(IO &IO, FormatStyle::DefinitionReturnTypeBreakingStyle &Value) {
IO.enumCase(Value, "None", FormatStyle::DRTBS_None);
IO.enumCase(Value, "All", FormatStyle::DRTBS_All);
IO.enumCase(Value, "TopLevel", FormatStyle::DRTBS_TopLevel);
// For backward compatibility.
IO.enumCase(Value, "false", FormatStyle::DRTBS_None);
IO.enumCase(Value, "true", FormatStyle::DRTBS_All);
}
};
template <>
struct ScalarEnumerationTraits<FormatStyle::NamespaceIndentationKind> {
static void enumeration(IO &IO,
FormatStyle::NamespaceIndentationKind &Value) {
IO.enumCase(Value, "None", FormatStyle::NI_None);
IO.enumCase(Value, "Inner", FormatStyle::NI_Inner);
IO.enumCase(Value, "All", FormatStyle::NI_All);
}
};
template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
static void enumeration(IO &IO, FormatStyle::PointerAlignmentStyle &Value) {
IO.enumCase(Value, "Middle", FormatStyle::PAS_Middle);
IO.enumCase(Value, "Left", FormatStyle::PAS_Left);
IO.enumCase(Value, "Right", FormatStyle::PAS_Right);
// For backward compatibility.
IO.enumCase(Value, "true", FormatStyle::PAS_Left);
IO.enumCase(Value, "false", FormatStyle::PAS_Right);
}
};
template <>
struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
static void enumeration(IO &IO,
FormatStyle::SpaceBeforeParensOptions &Value) {
IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
IO.enumCase(Value, "ControlStatements",
FormatStyle::SBPO_ControlStatements);
IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
// For backward compatibility.
IO.enumCase(Value, "false", FormatStyle::SBPO_Never);
IO.enumCase(Value, "true", FormatStyle::SBPO_ControlStatements);
}
};
template <> struct MappingTraits<FormatStyle> {
static void mapping(IO &IO, FormatStyle &Style) {
// When reading, read the language first, we need it for getPredefinedStyle.
IO.mapOptional("Language", Style.Language);
if (IO.outputting()) {
StringRef StylesArray[] = {"LLVM", "Google", "Chromium",
"Mozilla", "WebKit", "GNU"};
ArrayRef<StringRef> Styles(StylesArray);
for (size_t i = 0, e = Styles.size(); i < e; ++i) {
StringRef StyleName(Styles[i]);
FormatStyle PredefinedStyle;
if (getPredefinedStyle(StyleName, Style.Language, &PredefinedStyle) &&
Style == PredefinedStyle) {
IO.mapOptional("# BasedOnStyle", StyleName);
break;
}
}
} else {
StringRef BasedOnStyle;
IO.mapOptional("BasedOnStyle", BasedOnStyle);
if (!BasedOnStyle.empty()) {
FormatStyle::LanguageKind OldLanguage = Style.Language;
FormatStyle::LanguageKind Language =
((FormatStyle *)IO.getContext())->Language;
if (!getPredefinedStyle(BasedOnStyle, Language, &Style)) {
IO.setError(Twine("Unknown value for BasedOnStyle: ", BasedOnStyle));
return;
}
Style.Language = OldLanguage;
}
}
// For backward compatibility.
if (!IO.outputting()) {
IO.mapOptional("DerivePointerBinding", Style.DerivePointerAlignment);
IO.mapOptional("IndentFunctionDeclarationAfterType",
Style.IndentWrappedFunctionNames);
IO.mapOptional("PointerBindsToType", Style.PointerAlignment);
IO.mapOptional("SpaceAfterControlStatementKeyword",
Style.SpaceBeforeParens);
}
IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlinesLeft);
IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine",
Style.AllowAllParametersOfDeclarationOnNextLine);
IO.mapOptional("AllowShortBlocksOnASingleLine",
Style.AllowShortBlocksOnASingleLine);
IO.mapOptional("AllowShortCaseLabelsOnASingleLine",
Style.AllowShortCaseLabelsOnASingleLine);
IO.mapOptional("AllowShortFunctionsOnASingleLine",
Style.AllowShortFunctionsOnASingleLine);
IO.mapOptional("AllowShortIfStatementsOnASingleLine",
Style.AllowShortIfStatementsOnASingleLine);
IO.mapOptional("AllowShortLoopsOnASingleLine",
Style.AllowShortLoopsOnASingleLine);
IO.mapOptional("AlwaysBreakAfterDefinitionReturnType",
Style.AlwaysBreakAfterDefinitionReturnType);
IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
Style.AlwaysBreakBeforeMultilineStrings);
IO.mapOptional("AlwaysBreakTemplateDeclarations",
Style.AlwaysBreakTemplateDeclarations);
IO.mapOptional("BinPackArguments", Style.BinPackArguments);
IO.mapOptional("BinPackParameters", Style.BinPackParameters);
IO.mapOptional("BreakBeforeBinaryOperators",
Style.BreakBeforeBinaryOperators);
IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
IO.mapOptional("BreakBeforeTernaryOperators",
Style.BreakBeforeTernaryOperators);
IO.mapOptional("BreakConstructorInitializersBeforeComma",
Style.BreakConstructorInitializersBeforeComma);
IO.mapOptional("ColumnLimit", Style.ColumnLimit);
IO.mapOptional("CommentPragmas", Style.CommentPragmas);
IO.mapOptional("ConstructorInitializerAllOnOneLineOrOnePerLine",
Style.ConstructorInitializerAllOnOneLineOrOnePerLine);
IO.mapOptional("ConstructorInitializerIndentWidth",
Style.ConstructorInitializerIndentWidth);
IO.mapOptional("ContinuationIndentWidth", Style.ContinuationIndentWidth);
IO.mapOptional("Cpp11BracedListStyle", Style.Cpp11BracedListStyle);
IO.mapOptional("DerivePointerAlignment", Style.DerivePointerAlignment);
IO.mapOptional("DisableFormat", Style.DisableFormat);
IO.mapOptional("ExperimentalAutoDetectBinPacking",
Style.ExperimentalAutoDetectBinPacking);
IO.mapOptional("ForEachMacros", Style.ForEachMacros);
IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
Style.KeepEmptyLinesAtTheStartOfBlocks);
IO.mapOptional("MacroBlockBegin", Style.MacroBlockBegin);
IO.mapOptional("MacroBlockEnd", Style.MacroBlockEnd);
IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
IO.mapOptional("ObjCSpaceBeforeProtocolList",
Style.ObjCSpaceBeforeProtocolList);
IO.mapOptional("PenaltyBreakBeforeFirstCallParameter",
Style.PenaltyBreakBeforeFirstCallParameter);
IO.mapOptional("PenaltyBreakComment", Style.PenaltyBreakComment);
IO.mapOptional("PenaltyBreakFirstLessLess",
Style.PenaltyBreakFirstLessLess);
IO.mapOptional("PenaltyBreakString", Style.PenaltyBreakString);
IO.mapOptional("PenaltyExcessCharacter", Style.PenaltyExcessCharacter);
IO.mapOptional("PenaltyReturnTypeOnItsOwnLine",
Style.PenaltyReturnTypeOnItsOwnLine);
IO.mapOptional("PointerAlignment", Style.PointerAlignment);
IO.mapOptional("SpaceAfterCStyleCast", Style.SpaceAfterCStyleCast);
IO.mapOptional("SpaceBeforeAssignmentOperators",
Style.SpaceBeforeAssignmentOperators);
IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
IO.mapOptional("SpaceInEmptyParentheses", Style.SpaceInEmptyParentheses);
IO.mapOptional("SpacesBeforeTrailingComments",
Style.SpacesBeforeTrailingComments);
IO.mapOptional("SpacesInAngles", Style.SpacesInAngles);
IO.mapOptional("SpacesInContainerLiterals",
Style.SpacesInContainerLiterals);
IO.mapOptional("SpacesInCStyleCastParentheses",
Style.SpacesInCStyleCastParentheses);
IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
IO.mapOptional("Standard", Style.Standard);
IO.mapOptional("TabWidth", Style.TabWidth);
IO.mapOptional("UseTab", Style.UseTab);
}
};
// Allows to read vector<FormatStyle> while keeping default values.
// IO.getContext() should contain a pointer to the FormatStyle structure, that
// will be used to get default values for missing keys.
// If the first element has no Language specified, it will be treated as the
// default one for the following elements.
template <> struct DocumentListTraits<std::vector<FormatStyle>> {
static size_t size(IO &IO, std::vector<FormatStyle> &Seq) {
return Seq.size();
}
static FormatStyle &element(IO &IO, std::vector<FormatStyle> &Seq,
size_t Index) {
if (Index >= Seq.size()) {
assert(Index == Seq.size());
FormatStyle Template;
if (Seq.size() > 0 && Seq[0].Language == FormatStyle::LK_None) {
Template = Seq[0];
} else {
Template = *((const FormatStyle *)IO.getContext());
Template.Language = FormatStyle::LK_None;
}
Seq.resize(Index + 1, Template);
}
return Seq[Index];
}
};
}
}
namespace clang {
namespace format {
const std::error_category &getParseCategory() {
static ParseErrorCategory C;
return C;
}
std::error_code make_error_code(ParseError e) {
return std::error_code(static_cast<int>(e), getParseCategory());
}
const char *ParseErrorCategory::name() const LLVM_NOEXCEPT {
return "clang-format.parse_error";
}
std::string ParseErrorCategory::message(int EV) const {
switch (static_cast<ParseError>(EV)) {
case ParseError::Success:
return "Success";
case ParseError::Error:
return "Invalid argument";
case ParseError::Unsuitable:
return "Unsuitable";
}
llvm_unreachable("unexpected parse error");
}
FormatStyle getLLVMStyle() {
FormatStyle LLVMStyle;
LLVMStyle.Language = FormatStyle::LK_Cpp;
LLVMStyle.AccessModifierOffset = -2;
LLVMStyle.AlignEscapedNewlinesLeft = false;
LLVMStyle.AlignAfterOpenBracket = true;
LLVMStyle.AlignOperands = true;
LLVMStyle.AlignTrailingComments = true;
LLVMStyle.AlignConsecutiveAssignments = false;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortBlocksOnASingleLine = false;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
LLVMStyle.AllowShortIfStatementsOnASingleLine = false;
LLVMStyle.AllowShortLoopsOnASingleLine = false;
LLVMStyle.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_None;
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
LLVMStyle.AlwaysBreakTemplateDeclarations = false;
LLVMStyle.BinPackParameters = true;
LLVMStyle.BinPackArguments = true;
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
LLVMStyle.BreakConstructorInitializersBeforeComma = false;
LLVMStyle.ColumnLimit = 80;
LLVMStyle.CommentPragmas = "^ IWYU pragma:";
LLVMStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = false;
LLVMStyle.ConstructorInitializerIndentWidth = 4;
LLVMStyle.ContinuationIndentWidth = 4;
LLVMStyle.Cpp11BracedListStyle = true;
LLVMStyle.DerivePointerAlignment = false;
LLVMStyle.ExperimentalAutoDetectBinPacking = false;
LLVMStyle.ForEachMacros.push_back("foreach");
LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
LLVMStyle.TabWidth = 8;
LLVMStyle.MaxEmptyLinesToKeep = 1;
LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
LLVMStyle.ObjCBlockIndentWidth = 2;
LLVMStyle.ObjCSpaceAfterProperty = false;
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.Standard = FormatStyle::LS_Cpp11;
LLVMStyle.UseTab = FormatStyle::UT_Never;
LLVMStyle.SpacesInParentheses = false;
LLVMStyle.SpacesInSquareBrackets = false;
LLVMStyle.SpaceInEmptyParentheses = false;
LLVMStyle.SpacesInContainerLiterals = true;
LLVMStyle.SpacesInCStyleCastParentheses = false;
LLVMStyle.SpaceAfterCStyleCast = false;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
LLVMStyle.SpaceBeforeAssignmentOperators = true;
LLVMStyle.SpacesInAngles = false;
LLVMStyle.PenaltyBreakComment = 300;
LLVMStyle.PenaltyBreakFirstLessLess = 120;
LLVMStyle.PenaltyBreakString = 1000;
LLVMStyle.PenaltyExcessCharacter = 1000000;
LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
LLVMStyle.DisableFormat = false;
return LLVMStyle;
}
FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
FormatStyle GoogleStyle = getLLVMStyle();
GoogleStyle.Language = Language;
GoogleStyle.AccessModifierOffset = -1;
GoogleStyle.AlignEscapedNewlinesLeft = true;
GoogleStyle.AllowShortIfStatementsOnASingleLine = true;
GoogleStyle.AllowShortLoopsOnASingleLine = true;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
GoogleStyle.AlwaysBreakTemplateDeclarations = true;
GoogleStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
GoogleStyle.DerivePointerAlignment = true;
GoogleStyle.IndentCaseLabels = true;
GoogleStyle.KeepEmptyLinesAtTheStartOfBlocks = false;
GoogleStyle.ObjCSpaceAfterProperty = false;
GoogleStyle.ObjCSpaceBeforeProtocolList = false;
GoogleStyle.PointerAlignment = FormatStyle::PAS_Left;
GoogleStyle.SpacesBeforeTrailingComments = 2;
GoogleStyle.Standard = FormatStyle::LS_Auto;
GoogleStyle.PenaltyReturnTypeOnItsOwnLine = 200;
GoogleStyle.PenaltyBreakBeforeFirstCallParameter = 1;
if (Language == FormatStyle::LK_Java) {
GoogleStyle.AlignAfterOpenBracket = false;
GoogleStyle.AlignOperands = false;
GoogleStyle.AlignTrailingComments = false;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AllowShortIfStatementsOnASingleLine = false;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_NonAssignment;
GoogleStyle.ColumnLimit = 100;
GoogleStyle.SpaceAfterCStyleCast = true;
GoogleStyle.SpacesBeforeTrailingComments = 1;
} else if (Language == FormatStyle::LK_JavaScript) {
GoogleStyle.BreakBeforeTernaryOperators = false;
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.SpacesInContainerLiterals = false;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
} else if (Language == FormatStyle::LK_Proto) {
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
GoogleStyle.SpacesInContainerLiterals = false;
}
return GoogleStyle;
}
FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
FormatStyle ChromiumStyle = getGoogleStyle(Language);
if (Language == FormatStyle::LK_Java) {
ChromiumStyle.AllowShortIfStatementsOnASingleLine = true;
ChromiumStyle.IndentWidth = 4;
ChromiumStyle.ContinuationIndentWidth = 8;
} else {
ChromiumStyle.AllowAllParametersOfDeclarationOnNextLine = false;
ChromiumStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
ChromiumStyle.AllowShortIfStatementsOnASingleLine = false;
ChromiumStyle.AllowShortLoopsOnASingleLine = false;
ChromiumStyle.BinPackParameters = false;
ChromiumStyle.DerivePointerAlignment = false;
}
ChromiumStyle.MacroBlockBegin = "^IPC_BEGIN_MESSAGE_MAP$";
ChromiumStyle.MacroBlockBegin = "^IPC_END_MESSAGE_MAP$";
return ChromiumStyle;
}
FormatStyle getMozillaStyle() {
FormatStyle MozillaStyle = getLLVMStyle();
MozillaStyle.AllowAllParametersOfDeclarationOnNextLine = false;
MozillaStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
MozillaStyle.AlwaysBreakAfterDefinitionReturnType =
FormatStyle::DRTBS_TopLevel;
MozillaStyle.AlwaysBreakTemplateDeclarations = true;
MozillaStyle.BreakBeforeBraces = FormatStyle::BS_Mozilla;
MozillaStyle.BreakConstructorInitializersBeforeComma = true;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
MozillaStyle.ContinuationIndentWidth = 2;
MozillaStyle.Cpp11BracedListStyle = false;
MozillaStyle.IndentCaseLabels = true;
MozillaStyle.ObjCSpaceAfterProperty = true;
MozillaStyle.ObjCSpaceBeforeProtocolList = false;
MozillaStyle.PenaltyReturnTypeOnItsOwnLine = 200;
MozillaStyle.PointerAlignment = FormatStyle::PAS_Left;
return MozillaStyle;
}
FormatStyle getWebKitStyle() {
FormatStyle Style = getLLVMStyle();
Style.AccessModifierOffset = -4;
Style.AlignAfterOpenBracket = false;
Style.AlignOperands = false;
Style.AlignTrailingComments = false;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_Stroustrup;
Style.BreakConstructorInitializersBeforeComma = true;
Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 0;
Style.IndentWidth = 4;
Style.NamespaceIndentation = FormatStyle::NI_Inner;
Style.ObjCBlockIndentWidth = 4;
Style.ObjCSpaceAfterProperty = true;
Style.PointerAlignment = FormatStyle::PAS_Left;
Style.Standard = FormatStyle::LS_Cpp03;
return Style;
}
FormatStyle getGNUStyle() {
FormatStyle Style = getLLVMStyle();
Style.AlwaysBreakAfterDefinitionReturnType = FormatStyle::DRTBS_All;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
Style.Cpp11BracedListStyle = false;
Style.ColumnLimit = 79;
Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
Style.Standard = FormatStyle::LS_Cpp03;
return Style;
}
FormatStyle getNoStyle() {
FormatStyle NoStyle = getLLVMStyle();
NoStyle.DisableFormat = true;
return NoStyle;
}
bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
FormatStyle *Style) {
if (Name.equals_lower("llvm")) {
*Style = getLLVMStyle();
} else if (Name.equals_lower("chromium")) {
*Style = getChromiumStyle(Language);
} else if (Name.equals_lower("mozilla")) {
*Style = getMozillaStyle();
} else if (Name.equals_lower("google")) {
*Style = getGoogleStyle(Language);
} else if (Name.equals_lower("webkit")) {
*Style = getWebKitStyle();
} else if (Name.equals_lower("gnu")) {
*Style = getGNUStyle();
} else if (Name.equals_lower("none")) {
*Style = getNoStyle();
} else {
return false;
}
Style->Language = Language;
return true;
}
std::error_code parseConfiguration(StringRef Text, FormatStyle *Style) {
assert(Style);
FormatStyle::LanguageKind Language = Style->Language;
assert(Language != FormatStyle::LK_None);
if (Text.trim().empty())
return make_error_code(ParseError::Error);
std::vector<FormatStyle> Styles;
llvm::yaml::Input Input(Text);
// DocumentListTraits<vector<FormatStyle>> uses the context to get default
// values for the fields, keys for which are missing from the configuration.
// Mapping also uses the context to get the language to find the correct
// base style.
Input.setContext(Style);
Input >> Styles;
if (Input.error())
return Input.error();
for (unsigned i = 0; i < Styles.size(); ++i) {
// Ensures that only the first configuration can skip the Language option.
if (Styles[i].Language == FormatStyle::LK_None && i != 0)
return make_error_code(ParseError::Error);
// Ensure that each language is configured at most once.
for (unsigned j = 0; j < i; ++j) {
if (Styles[i].Language == Styles[j].Language) {
DEBUG(llvm::dbgs()
<< "Duplicate languages in the config file on positions " << j
<< " and " << i << "\n");
return make_error_code(ParseError::Error);
}
}
}
// Look for a suitable configuration starting from the end, so we can
// find the configuration for the specific language first, and the default
// configuration (which can only be at slot 0) after it.
for (int i = Styles.size() - 1; i >= 0; --i) {
if (Styles[i].Language == Language ||
Styles[i].Language == FormatStyle::LK_None) {
*Style = Styles[i];
Style->Language = Language;
return make_error_code(ParseError::Success);
}
}
return make_error_code(ParseError::Unsuitable);
}
std::string configurationAsText(const FormatStyle &Style) {
std::string Text;
llvm::raw_string_ostream Stream(Text);
llvm::yaml::Output Output(Stream);
// We use the same mapping method for input and output, so we need a non-const
// reference here.
FormatStyle NonConstStyle = Style;
Output << NonConstStyle;
return Stream.str();
}
namespace {
class FormatTokenLexer {
public:
FormatTokenLexer(SourceManager &SourceMgr, FileID ID, FormatStyle &Style,
encoding::Encoding Encoding)
: FormatTok(nullptr), IsFirstToken(true), GreaterStashed(false),
LessStashed(false), Column(0), TrailingWhitespace(0),
SourceMgr(SourceMgr), ID(ID), Style(Style),
IdentTable(getFormattingLangOpts(Style)), Keywords(IdentTable),
Encoding(Encoding), FirstInLineIndex(0), FormattingDisabled(false),
MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
getFormattingLangOpts(Style)));
Lex->SetKeepWhitespaceMode(true);
for (const std::string &ForEachMacro : Style.ForEachMacros)
ForEachMacros.push_back(&IdentTable.get(ForEachMacro));
std::sort(ForEachMacros.begin(), ForEachMacros.end());
}
ArrayRef<FormatToken *> lex() {
assert(Tokens.empty());
assert(FirstInLineIndex == 0);
do {
Tokens.push_back(getNextToken());
tryMergePreviousTokens();
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
FirstInLineIndex = Tokens.size() - 1;
} while (Tokens.back()->Tok.isNot(tok::eof));
return Tokens;
}
const AdditionalKeywords &getKeywords() { return Keywords; }
private:
void tryMergePreviousTokens() {
if (tryMerge_TMacro())
return;
if (tryMergeConflictMarkers())
return;
if (tryMergeLessLess())
return;
if (Style.Language == FormatStyle::LK_JavaScript) {
if (tryMergeJSRegexLiteral())
return;
if (tryMergeEscapeSequence())
return;
if (tryMergeTemplateString())
return;
static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal,
tok::equal};
static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
tok::greaterequal};
static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
// FIXME: Investigate what token type gives the correct operator priority.
if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
return;
if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator))
return;
if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
return;
if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
return;
}
}
bool tryMergeLessLess() {
// Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
if (Tokens.size() < 3)
return false;
bool FourthTokenIsLess = false;
if (Tokens.size() > 3)
FourthTokenIsLess = (Tokens.end() - 4)[0]->is(tok::less);
auto First = Tokens.end() - 3;
if (First[2]->is(tok::less) || First[1]->isNot(tok::less) ||
First[0]->isNot(tok::less) || FourthTokenIsLess)
return false;
// Only merge if there currently is no whitespace between the two "<".
if (First[1]->WhitespaceRange.getBegin() !=
First[1]->WhitespaceRange.getEnd())
return false;
First[0]->Tok.setKind(tok::lessless);
First[0]->TokenText = "<<";
First[0]->ColumnWidth += 1;
Tokens.erase(Tokens.end() - 2);
return true;
}
bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds, TokenType NewType) {
if (Tokens.size() < Kinds.size())
return false;
SmallVectorImpl<FormatToken *>::const_iterator First =
Tokens.end() - Kinds.size();
if (!First[0]->is(Kinds[0]))
return false;
unsigned AddLength = 0;
for (unsigned i = 1; i < Kinds.size(); ++i) {
if (!First[i]->is(Kinds[i]) ||
First[i]->WhitespaceRange.getBegin() !=
First[i]->WhitespaceRange.getEnd())
return false;
AddLength += First[i]->TokenText.size();
}
Tokens.resize(Tokens.size() - Kinds.size() + 1);
First[0]->TokenText = StringRef(First[0]->TokenText.data(),
First[0]->TokenText.size() + AddLength);
First[0]->ColumnWidth += AddLength;
First[0]->Type = NewType;
return true;
}
// Tries to merge an escape sequence, i.e. a "\\" and the following
// character. Use e.g. inside JavaScript regex literals.
bool tryMergeEscapeSequence() {
if (Tokens.size() < 2)
return false;
FormatToken *Previous = Tokens[Tokens.size() - 2];
if (Previous->isNot(tok::unknown) || Previous->TokenText != "\\")
return false;
++Previous->ColumnWidth;
StringRef Text = Previous->TokenText;
Previous->TokenText = StringRef(Text.data(), Text.size() + 1);
resetLexer(SourceMgr.getFileOffset(Tokens.back()->Tok.getLocation()) + 1);
Tokens.resize(Tokens.size() - 1);
Column = Previous->OriginalColumn + Previous->ColumnWidth;
return true;
}
// Try to determine whether the current token ends a JavaScript regex literal.
// We heuristically assume that this is a regex literal if we find two
// unescaped slashes on a line and the token before the first slash is one of
// "(;,{}![:?", a binary operator or 'return', as those cannot be followed by
// a division.
bool tryMergeJSRegexLiteral() {
if (Tokens.size() < 2)
return false;
// If this is a string literal with a slash inside, compute the slash's
// offset and try to find the beginning of the regex literal.
// Also look at tok::unknown, as it can be an unterminated char literal.
size_t SlashInStringPos = StringRef::npos;
if (Tokens.back()->isOneOf(tok::string_literal, tok::char_constant,
tok::unknown)) {
// Start search from position 1 as otherwise, this is an unknown token
// for an unterminated /*-comment which is handled elsewhere.
SlashInStringPos = Tokens.back()->TokenText.find('/', 1);
if (SlashInStringPos == StringRef::npos)
return false;
}
// If a regex literal ends in "\//", this gets represented by an unknown
// token "\" and a comment.
bool MightEndWithEscapedSlash =
Tokens.back()->is(tok::comment) &&
Tokens.back()->TokenText.startswith("//") &&
Tokens[Tokens.size() - 2]->TokenText == "\\";
if (!MightEndWithEscapedSlash && SlashInStringPos == StringRef::npos &&
(Tokens.back()->isNot(tok::slash) ||
(Tokens[Tokens.size() - 2]->is(tok::unknown) &&
Tokens[Tokens.size() - 2]->TokenText == "\\")))
return false;
unsigned TokenCount = 0;
for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; ++I) {
++TokenCount;
auto Prev = I + 1;
while (Prev != E && Prev[0]->is(tok::comment))
++Prev;
if (I[0]->isOneOf(tok::slash, tok::slashequal) &&
(Prev == E ||
((Prev[0]->isOneOf(tok::l_paren, tok::semi, tok::l_brace,
tok::r_brace, tok::exclaim, tok::l_square,
tok::colon, tok::comma, tok::question,
tok::kw_return) ||
Prev[0]->isBinaryOperator())))) {
unsigned LastColumn = Tokens.back()->OriginalColumn;
SourceLocation Loc = Tokens.back()->Tok.getLocation();
if (MightEndWithEscapedSlash) {
// This regex literal ends in '\//'. Skip past the '//' of the last
// token and re-start lexing from there.
resetLexer(SourceMgr.getFileOffset(Loc) + 2);
} else if (SlashInStringPos != StringRef::npos) {
// This regex literal ends in a string_literal with a slash inside.
// Calculate end column and reset lexer appropriately.
resetLexer(SourceMgr.getFileOffset(Loc) + SlashInStringPos + 1);
LastColumn += SlashInStringPos;
}
Tokens.resize(Tokens.size() - TokenCount);
Tokens.back()->Tok.setKind(tok::unknown);
Tokens.back()->Type = TT_RegexLiteral;
// Treat regex literals like other string_literals.
Tokens.back()->Tok.setKind(tok::string_literal);
Tokens.back()->ColumnWidth += LastColumn - I[0]->OriginalColumn;
return true;
}
// There can't be a newline inside a regex literal.
if (I[0]->NewlinesBefore > 0)
return false;
}
return false;
}
bool tryMergeTemplateString() {
if (Tokens.size() < 2)
return false;
FormatToken *EndBacktick = Tokens.back();
// Backticks get lexed as tok::unknown tokens. If a template string contains
// a comment start, it gets lexed as a tok::comment, or tok::unknown if
// unterminated.
if (!EndBacktick->isOneOf(tok::comment, tok::string_literal,
tok::char_constant, tok::unknown))
return false;
size_t CommentBacktickPos = EndBacktick->TokenText.find('`');
// Unknown token that's not actually a backtick, or a comment that doesn't
// contain a backtick.
if (CommentBacktickPos == StringRef::npos)
return false;
unsigned TokenCount = 0;
bool IsMultiline = false;
unsigned EndColumnInFirstLine =
EndBacktick->OriginalColumn + EndBacktick->ColumnWidth;
for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; I++) {
++TokenCount;
if (I[0]->IsMultiline)
IsMultiline = true;
// If there was a preceding template string, this must be the start of a
// template string, not the end.
if (I[0]->is(TT_TemplateString))
return false;
if (I[0]->isNot(tok::unknown) || I[0]->TokenText != "`") {
// Keep track of the rhs offset of the last token to wrap across lines -
// its the rhs offset of the first line of the template string, used to
// determine its width.
if (I[0]->IsMultiline)
EndColumnInFirstLine = I[0]->OriginalColumn + I[0]->ColumnWidth;
// If the token has newlines, the token before it (if it exists) is the
// rhs end of the previous line.
if (I[0]->NewlinesBefore > 0 && (I + 1 != E)) {
EndColumnInFirstLine = I[1]->OriginalColumn + I[1]->ColumnWidth;
IsMultiline = true;
}
continue;
}
Tokens.resize(Tokens.size() - TokenCount);
Tokens.back()->Type = TT_TemplateString;
const char *EndOffset =
EndBacktick->TokenText.data() + 1 + CommentBacktickPos;
if (CommentBacktickPos != 0) {
// If the backtick was not the first character (e.g. in a comment),
// re-lex after the backtick position.
SourceLocation Loc = EndBacktick->Tok.getLocation();
resetLexer(SourceMgr.getFileOffset(Loc) + CommentBacktickPos + 1);
}
Tokens.back()->TokenText =
StringRef(Tokens.back()->TokenText.data(),
EndOffset - Tokens.back()->TokenText.data());
unsigned EndOriginalColumn = EndBacktick->OriginalColumn;
if (EndOriginalColumn == 0) {
SourceLocation Loc = EndBacktick->Tok.getLocation();
EndOriginalColumn = SourceMgr.getSpellingColumnNumber(Loc);
}
// If the ` is further down within the token (e.g. in a comment).
EndOriginalColumn += CommentBacktickPos;
if (IsMultiline) {
// ColumnWidth is from backtick to last token in line.
// LastLineColumnWidth is 0 to backtick.
// x = `some content
// until here`;
Tokens.back()->ColumnWidth =
EndColumnInFirstLine - Tokens.back()->OriginalColumn;
// +1 for the ` itself.
Tokens.back()->LastLineColumnWidth = EndOriginalColumn + 1;
Tokens.back()->IsMultiline = true;
} else {
// Token simply spans from start to end, +1 for the ` itself.
Tokens.back()->ColumnWidth =
EndOriginalColumn - Tokens.back()->OriginalColumn + 1;
}
return true;
}
return false;
}
bool tryMerge_TMacro() {
if (Tokens.size() < 4)
return false;
FormatToken *Last = Tokens.back();
if (!Last->is(tok::r_paren))
return false;
FormatToken *String = Tokens[Tokens.size() - 2];
if (!String->is(tok::string_literal) || String->IsMultiline)
return false;
if (!Tokens[Tokens.size() - 3]->is(tok::l_paren))
return false;
FormatToken *Macro = Tokens[Tokens.size() - 4];
if (Macro->TokenText != "_T")
return false;
const char *Start = Macro->TokenText.data();
const char *End = Last->TokenText.data() + Last->TokenText.size();
String->TokenText = StringRef(Start, End - Start);
String->IsFirst = Macro->IsFirst;
String->LastNewlineOffset = Macro->LastNewlineOffset;
String->WhitespaceRange = Macro->WhitespaceRange;
String->OriginalColumn = Macro->OriginalColumn;
String->ColumnWidth = encoding::columnWidthWithTabs(
String->TokenText, String->OriginalColumn, Style.TabWidth, Encoding);
String->NewlinesBefore = Macro->NewlinesBefore;
String->HasUnescapedNewline = Macro->HasUnescapedNewline;
Tokens.pop_back();
Tokens.pop_back();
Tokens.pop_back();
Tokens.back() = String;
return true;
}
bool tryMergeConflictMarkers() {
if (Tokens.back()->NewlinesBefore == 0 && Tokens.back()->isNot(tok::eof))
return false;
// Conflict lines look like:
// <marker> <text from the vcs>
// For example:
// >>>>>>> /file/in/file/system at revision 1234
//
// We merge all tokens in a line that starts with a conflict marker
// into a single token with a special token type that the unwrapped line
// parser will use to correctly rebuild the underlying code.
FileID ID;
// Get the position of the first token in the line.
unsigned FirstInLineOffset;
std::tie(ID, FirstInLineOffset) = SourceMgr.getDecomposedLoc(
Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
StringRef Buffer = SourceMgr.getBuffer(ID)->getBuffer();
// Calculate the offset of the start of the current line.
auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
if (LineOffset == StringRef::npos) {
LineOffset = 0;
} else {
++LineOffset;
}
auto FirstSpace = Buffer.find_first_of(" \n", LineOffset);
StringRef LineStart;
if (FirstSpace == StringRef::npos) {
LineStart = Buffer.substr(LineOffset);
} else {
LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
}
TokenType Type = TT_Unknown;
if (LineStart == "<<<<<<<" || LineStart == ">>>>") {
Type = TT_ConflictStart;
} else if (LineStart == "|||||||" || LineStart == "=======" ||
LineStart == "====") {
Type = TT_ConflictAlternative;
} else if (LineStart == ">>>>>>>" || LineStart == "<<<<") {
Type = TT_ConflictEnd;
}
if (Type != TT_Unknown) {
FormatToken *Next = Tokens.back();
Tokens.resize(FirstInLineIndex + 1);
// We do not need to build a complete token here, as we will skip it
// during parsing anyway (as we must not touch whitespace around conflict
// markers).
Tokens.back()->Type = Type;
Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
Tokens.push_back(Next);
return true;
}
return false;
}
FormatToken *getStashedToken() {
// Create a synthesized second '>' or '<' token.
Token Tok = FormatTok->Tok;
StringRef TokenText = FormatTok->TokenText;
unsigned OriginalColumn = FormatTok->OriginalColumn;
FormatTok = new (Allocator.Allocate()) FormatToken;
FormatTok->Tok = Tok;
SourceLocation TokLocation =
FormatTok->Tok.getLocation().getLocWithOffset(Tok.getLength() - 1);
FormatTok->Tok.setLocation(TokLocation);
FormatTok->WhitespaceRange = SourceRange(TokLocation, TokLocation);
FormatTok->TokenText = TokenText;
FormatTok->ColumnWidth = 1;
FormatTok->OriginalColumn = OriginalColumn + 1;
return FormatTok;
}
FormatToken *getNextToken() {
if (GreaterStashed) {
GreaterStashed = false;
return getStashedToken();
}
if (LessStashed) {
LessStashed = false;
return getStashedToken();
}
FormatTok = new (Allocator.Allocate()) FormatToken;
readRawToken(*FormatTok);
SourceLocation WhitespaceStart =
FormatTok->Tok.getLocation().getLocWithOffset(-TrailingWhitespace);
FormatTok->IsFirst = IsFirstToken;
IsFirstToken = false;
// Consume and record whitespace until we find a significant token.
unsigned WhitespaceLength = TrailingWhitespace;
while (FormatTok->Tok.is(tok::unknown)) {
StringRef Text = FormatTok->TokenText;
auto EscapesNewline = [&](int pos) {
// A '\r' here is just part of '\r\n'. Skip it.
if (pos >= 0 && Text[pos] == '\r')
--pos;
// See whether there is an odd number of '\' before this.
unsigned count = 0;
for (; pos >= 0; --pos, ++count)
if (Text[pos] != '\\')
break;
return count & 1;
};
// FIXME: This miscounts tok:unknown tokens that are not just
// whitespace, e.g. a '`' character.
for (int i = 0, e = Text.size(); i != e; ++i) {
switch (Text[i]) {
case '\n':
++FormatTok->NewlinesBefore;
FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1);
FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
Column = 0;
break;
case '\r':
FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
Column = 0;
break;
case '\f':
case '\v':
Column = 0;
break;
case ' ':
++Column;
break;
case '\t':
Column += Style.TabWidth - Column % Style.TabWidth;
break;
case '\\':
if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
FormatTok->Type = TT_ImplicitStringLiteral;
break;
default:
FormatTok->Type = TT_ImplicitStringLiteral;
break;
}
}
if (FormatTok->is(TT_ImplicitStringLiteral))
break;
WhitespaceLength += FormatTok->Tok.getLength();
readRawToken(*FormatTok);
}
// In case the token starts with escaped newlines, we want to
// take them into account as whitespace - this pattern is quite frequent
// in macro definitions.
// FIXME: Add a more explicit test.
while (FormatTok->TokenText.size() > 1 && FormatTok->TokenText[0] == '\\' &&
FormatTok->TokenText[1] == '\n') {
++FormatTok->NewlinesBefore;
WhitespaceLength += 2;
FormatTok->LastNewlineOffset = 2;
Column = 0;
FormatTok->TokenText = FormatTok->TokenText.substr(2);
}
FormatTok->WhitespaceRange = SourceRange(
WhitespaceStart, WhitespaceStart.getLocWithOffset(WhitespaceLength));
FormatTok->OriginalColumn = Column;
TrailingWhitespace = 0;
if (FormatTok->Tok.is(tok::comment)) {
// FIXME: Add the trimmed whitespace to Column.
StringRef UntrimmedText = FormatTok->TokenText;
FormatTok->TokenText = FormatTok->TokenText.rtrim(" \t\v\f");
TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
} else if (FormatTok->Tok.is(tok::raw_identifier)) {
IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
FormatTok->Tok.setIdentifierInfo(&Info);
FormatTok->Tok.setKind(Info.getTokenID());
if (Style.Language == FormatStyle::LK_Java &&
FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete)) {
FormatTok->Tok.setKind(tok::identifier);
FormatTok->Tok.setIdentifierInfo(nullptr);
}
} else if (FormatTok->Tok.is(tok::greatergreater)) {
FormatTok->Tok.setKind(tok::greater);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
GreaterStashed = true;
} else if (FormatTok->Tok.is(tok::lessless)) {
FormatTok->Tok.setKind(tok::less);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
LessStashed = true;
}
// Now FormatTok is the next non-whitespace token.
StringRef Text = FormatTok->TokenText;
size_t FirstNewlinePos = Text.find('\n');
if (FirstNewlinePos == StringRef::npos) {
// FIXME: ColumnWidth actually depends on the start column, we need to
// take this into account when the token is moved.
FormatTok->ColumnWidth =
encoding::columnWidthWithTabs(Text, Column, Style.TabWidth, Encoding);
Column += FormatTok->ColumnWidth;
} else {
FormatTok->IsMultiline = true;
// FIXME: ColumnWidth actually depends on the start column, we need to
// take this into account when the token is moved.
FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
Text.substr(0, FirstNewlinePos), Column, Style.TabWidth, Encoding);
// The last line of the token always starts in column 0.
// Thus, the length can be precomputed even in the presence of tabs.
FormatTok->LastLineColumnWidth = encoding::columnWidthWithTabs(
Text.substr(Text.find_last_of('\n') + 1), 0, Style.TabWidth,
Encoding);
Column = FormatTok->LastLineColumnWidth;
}
if (Style.Language == FormatStyle::LK_Cpp) {
if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
std::find(ForEachMacros.begin(), ForEachMacros.end(),
FormatTok->Tok.getIdentifierInfo()) != ForEachMacros.end()) {
FormatTok->Type = TT_ForEachMacro;
} else if (FormatTok->is(tok::identifier)) {
if (MacroBlockBeginRegex.match(Text)) {
FormatTok->Type = TT_MacroBlockBegin;
} else if (MacroBlockEndRegex.match(Text)) {
FormatTok->Type = TT_MacroBlockEnd;
}
}
}
return FormatTok;
}
FormatToken *FormatTok;
bool IsFirstToken;
bool GreaterStashed, LessStashed;
unsigned Column;
unsigned TrailingWhitespace;
std::unique_ptr<Lexer> Lex;
SourceManager &SourceMgr;
FileID ID;
FormatStyle &Style;
IdentifierTable IdentTable;
AdditionalKeywords Keywords;
encoding::Encoding Encoding;
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
// Index (in 'Tokens') of the last token that starts a new line.
unsigned FirstInLineIndex;
SmallVector<FormatToken *, 16> Tokens;
SmallVector<IdentifierInfo *, 8> ForEachMacros;
bool FormattingDisabled;
llvm::Regex MacroBlockBeginRegex;
llvm::Regex MacroBlockEndRegex;
void readRawToken(FormatToken &Tok) {
Lex->LexFromRawLexer(Tok.Tok);
Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
Tok.Tok.getLength());
// For formatting, treat unterminated string literals like normal string
// literals.
if (Tok.is(tok::unknown)) {
if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
Tok.Tok.setKind(tok::string_literal);
Tok.IsUnterminatedLiteral = true;
} else if (Style.Language == FormatStyle::LK_JavaScript &&
Tok.TokenText == "''") {
Tok.Tok.setKind(tok::char_constant);
}
}
if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
Tok.TokenText == "/* clang-format on */")) {
FormattingDisabled = false;
}
Tok.Finalized = FormattingDisabled;
if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
Tok.TokenText == "/* clang-format off */")) {
FormattingDisabled = true;
}
}
void resetLexer(unsigned Offset) {
StringRef Buffer = SourceMgr.getBufferData(ID);
Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID),
getFormattingLangOpts(Style), Buffer.begin(),
Buffer.begin() + Offset, Buffer.end()));
Lex->SetKeepWhitespaceMode(true);
TrailingWhitespace = 0;
}
};
static StringRef getLanguageName(FormatStyle::LanguageKind Language) {
switch (Language) {
case FormatStyle::LK_Cpp:
return "C++";
case FormatStyle::LK_Java:
return "Java";
case FormatStyle::LK_JavaScript:
return "JavaScript";
case FormatStyle::LK_Proto:
return "Proto";
default:
return "Unknown";
}
}
class Formatter : public UnwrappedLineConsumer {
public:
Formatter(const FormatStyle &Style, SourceManager &SourceMgr, FileID ID,
ArrayRef<CharSourceRange> Ranges)
: Style(Style), ID(ID), SourceMgr(SourceMgr),
Whitespaces(SourceMgr, Style,
inputUsesCRLF(SourceMgr.getBufferData(ID))),
Ranges(Ranges.begin(), Ranges.end()), UnwrappedLines(1),
Encoding(encoding::detectEncoding(SourceMgr.getBufferData(ID))) {
DEBUG(llvm::dbgs() << "File encoding: "
<< (Encoding == encoding::Encoding_UTF8 ? "UTF8"
: "unknown")
<< "\n");
DEBUG(llvm::dbgs() << "Language: " << getLanguageName(Style.Language)
<< "\n");
}
tooling::Replacements format(bool *IncompleteFormat) {
tooling::Replacements Result;
FormatTokenLexer Tokens(SourceMgr, ID, Style, Encoding);
UnwrappedLineParser Parser(Style, Tokens.getKeywords(), Tokens.lex(),
*this);
Parser.parse();
assert(UnwrappedLines.rbegin()->empty());
for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE;
++Run) {
DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
SmallVector<AnnotatedLine *, 16> AnnotatedLines;
for (unsigned i = 0, e = UnwrappedLines[Run].size(); i != e; ++i) {
AnnotatedLines.push_back(new AnnotatedLine(UnwrappedLines[Run][i]));
}
tooling::Replacements RunResult =
format(AnnotatedLines, Tokens, IncompleteFormat);
DEBUG({
llvm::dbgs() << "Replacements for run " << Run << ":\n";
for (tooling::Replacements::iterator I = RunResult.begin(),
E = RunResult.end();
I != E; ++I) {
llvm::dbgs() << I->toString() << "\n";
}
});
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
delete AnnotatedLines[i];
}
Result.insert(RunResult.begin(), RunResult.end());
Whitespaces.reset();
}
return Result;
}
tooling::Replacements format(SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens,
bool *IncompleteFormat) {
TokenAnnotator Annotator(Style, Tokens.getKeywords());
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
Annotator.annotate(*AnnotatedLines[i]);
}
deriveLocalStyle(AnnotatedLines);
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
}
computeAffectedLines(AnnotatedLines.begin(), AnnotatedLines.end());
Annotator.setCommentLineLevels(AnnotatedLines);
ContinuationIndenter Indenter(Style, Tokens.getKeywords(), SourceMgr,
Whitespaces, Encoding,
BinPackInconclusiveFunctions);
UnwrappedLineFormatter(&Indenter, &Whitespaces, Style, Tokens.getKeywords(),
IncompleteFormat)
.format(AnnotatedLines);
return Whitespaces.generateReplacements();
}
private:
// Determines which lines are affected by the SourceRanges given as input.
// Returns \c true if at least one line between I and E or one of their
// children is affected.
bool computeAffectedLines(SmallVectorImpl<AnnotatedLine *>::iterator I,
SmallVectorImpl<AnnotatedLine *>::iterator E) {
bool SomeLineAffected = false;
const AnnotatedLine *PreviousLine = nullptr;
while (I != E) {
AnnotatedLine *Line = *I;
Line->LeadingEmptyLinesAffected = affectsLeadingEmptyLines(*Line->First);
// If a line is part of a preprocessor directive, it needs to be formatted
// if any token within the directive is affected.
if (Line->InPPDirective) {
FormatToken *Last = Line->Last;
SmallVectorImpl<AnnotatedLine *>::iterator PPEnd = I + 1;
while (PPEnd != E && !(*PPEnd)->First->HasUnescapedNewline) {
Last = (*PPEnd)->Last;
++PPEnd;
}
if (affectsTokenRange(*Line->First, *Last,
/*IncludeLeadingNewlines=*/false)) {
SomeLineAffected = true;
markAllAsAffected(I, PPEnd);
}
I = PPEnd;
continue;
}
if (nonPPLineAffected(Line, PreviousLine))
SomeLineAffected = true;
PreviousLine = Line;
++I;
}
return SomeLineAffected;
}
// Determines whether 'Line' is affected by the SourceRanges given as input.
// Returns \c true if line or one if its children is affected.
bool nonPPLineAffected(AnnotatedLine *Line,
const AnnotatedLine *PreviousLine) {
bool SomeLineAffected = false;
Line->ChildrenAffected =
computeAffectedLines(Line->Children.begin(), Line->Children.end());
if (Line->ChildrenAffected)
SomeLineAffected = true;
// Stores whether one of the line's tokens is directly affected.
bool SomeTokenAffected = false;
// Stores whether we need to look at the leading newlines of the next token
// in order to determine whether it was affected.
bool IncludeLeadingNewlines = false;
// Stores whether the first child line of any of this line's tokens is
// affected.
bool SomeFirstChildAffected = false;
for (FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
// Determine whether 'Tok' was affected.
if (affectsTokenRange(*Tok, *Tok, IncludeLeadingNewlines))
SomeTokenAffected = true;
// Determine whether the first child of 'Tok' was affected.
if (!Tok->Children.empty() && Tok->Children.front()->Affected)
SomeFirstChildAffected = true;
IncludeLeadingNewlines = Tok->Children.empty();
}
// Was this line moved, i.e. has it previously been on the same line as an
// affected line?
bool LineMoved = PreviousLine && PreviousLine->Affected &&
Line->First->NewlinesBefore == 0;
bool IsContinuedComment =
Line->First->is(tok::comment) && Line->First->Next == nullptr &&
Line->First->NewlinesBefore < 2 && PreviousLine &&
PreviousLine->Affected && PreviousLine->Last->is(tok::comment);
if (SomeTokenAffected || SomeFirstChildAffected || LineMoved ||
IsContinuedComment) {
Line->Affected = true;
SomeLineAffected = true;
}
return SomeLineAffected;
}
// Marks all lines between I and E as well as all their children as affected.
void markAllAsAffected(SmallVectorImpl<AnnotatedLine *>::iterator I,
SmallVectorImpl<AnnotatedLine *>::iterator E) {
while (I != E) {
(*I)->Affected = true;
markAllAsAffected((*I)->Children.begin(), (*I)->Children.end());
++I;
}
}
// Returns true if the range from 'First' to 'Last' intersects with one of the
// input ranges.
bool affectsTokenRange(const FormatToken &First, const FormatToken &Last,
bool IncludeLeadingNewlines) {
SourceLocation Start = First.WhitespaceRange.getBegin();
if (!IncludeLeadingNewlines)
Start = Start.getLocWithOffset(First.LastNewlineOffset);
SourceLocation End = Last.getStartOfNonWhitespace();
End = End.getLocWithOffset(Last.TokenText.size());
CharSourceRange Range = CharSourceRange::getCharRange(Start, End);
return affectsCharSourceRange(Range);
}
// Returns true if one of the input ranges intersect the leading empty lines
// before 'Tok'.
bool affectsLeadingEmptyLines(const FormatToken &Tok) {
CharSourceRange EmptyLineRange = CharSourceRange::getCharRange(
Tok.WhitespaceRange.getBegin(),
Tok.WhitespaceRange.getBegin().getLocWithOffset(Tok.LastNewlineOffset));
return affectsCharSourceRange(EmptyLineRange);
}
// Returns true if 'Range' intersects with one of the input ranges.
bool affectsCharSourceRange(const CharSourceRange &Range) {
for (SmallVectorImpl<CharSourceRange>::const_iterator I = Ranges.begin(),
E = Ranges.end();
I != E; ++I) {
if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), I->getBegin()) &&
!SourceMgr.isBeforeInTranslationUnit(I->getEnd(), Range.getBegin()))
return true;
}
return false;
}
static bool inputUsesCRLF(StringRef Text) {
return Text.count('\r') * 2 > Text.count('\n');
}
void
deriveLocalStyle(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
unsigned CountBoundToVariable = 0;
unsigned CountBoundToType = 0;
bool HasCpp03IncompatibleFormat = false;
bool HasBinPackedFunction = false;
bool HasOnePerLineFunction = false;
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
if (!AnnotatedLines[i]->First->Next)
continue;
FormatToken *Tok = AnnotatedLines[i]->First->Next;
while (Tok->Next) {
if (Tok->is(TT_PointerOrReference)) {
bool SpacesBefore =
Tok->WhitespaceRange.getBegin() != Tok->WhitespaceRange.getEnd();
bool SpacesAfter = Tok->Next->WhitespaceRange.getBegin() !=
Tok->Next->WhitespaceRange.getEnd();
if (SpacesBefore && !SpacesAfter)
++CountBoundToVariable;
else if (!SpacesBefore && SpacesAfter)
++CountBoundToType;
}
if (Tok->WhitespaceRange.getBegin() == Tok->WhitespaceRange.getEnd()) {
if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
HasCpp03IncompatibleFormat = true;
if (Tok->is(TT_TemplateCloser) &&
Tok->Previous->is(TT_TemplateCloser))
HasCpp03IncompatibleFormat = true;
}
if (Tok->PackingKind == PPK_BinPacked)
HasBinPackedFunction = true;
if (Tok->PackingKind == PPK_OnePerLine)
HasOnePerLineFunction = true;
Tok = Tok->Next;
}
}
if (Style.DerivePointerAlignment) {
if (CountBoundToType > CountBoundToVariable)
Style.PointerAlignment = FormatStyle::PAS_Left;
else if (CountBoundToType < CountBoundToVariable)
Style.PointerAlignment = FormatStyle::PAS_Right;
}
if (Style.Standard == FormatStyle::LS_Auto) {
Style.Standard = HasCpp03IncompatibleFormat ? FormatStyle::LS_Cpp11
: FormatStyle::LS_Cpp03;
}
BinPackInconclusiveFunctions =
HasBinPackedFunction || !HasOnePerLineFunction;
}
void consumeUnwrappedLine(const UnwrappedLine &TheLine) override {
assert(!UnwrappedLines.empty());
UnwrappedLines.back().push_back(TheLine);
}
void finishRun() override {
UnwrappedLines.push_back(SmallVector<UnwrappedLine, 16>());
}
FormatStyle Style;
FileID ID;
SourceManager &SourceMgr;
WhitespaceManager Whitespaces;
SmallVector<CharSourceRange, 8> Ranges;
SmallVector<SmallVector<UnwrappedLine, 16>, 2> UnwrappedLines;
encoding::Encoding Encoding;
bool BinPackInconclusiveFunctions;
};
} // end anonymous namespace
tooling::Replacements reformat(const FormatStyle &Style,
SourceManager &SourceMgr, FileID ID,
ArrayRef<CharSourceRange> Ranges,
bool *IncompleteFormat) {
if (Style.DisableFormat)
return tooling::Replacements();
Formatter formatter(Style, SourceMgr, ID, Ranges);
return formatter.format(IncompleteFormat);
}
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName, bool *IncompleteFormat) {
if (Style.DisableFormat)
return tooling::Replacements();
FileManager Files((FileSystemOptions()));
DiagnosticsEngine Diagnostics(
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
new DiagnosticOptions);
SourceManager SourceMgr(Diagnostics, Files);
std::unique_ptr<llvm::MemoryBuffer> Buf =
llvm::MemoryBuffer::getMemBuffer(Code, FileName);
const clang::FileEntry *Entry =
Files.getVirtualFile(FileName, Buf->getBufferSize(), 0);
SourceMgr.overrideFileContents(Entry, std::move(Buf));
FileID ID =
SourceMgr.createFileID(Entry, SourceLocation(), clang::SrcMgr::C_User);
SourceLocation StartOfFile = SourceMgr.getLocForStartOfFile(ID);
std::vector<CharSourceRange> CharRanges;
for (const tooling::Range &Range : Ranges) {
SourceLocation Start = StartOfFile.getLocWithOffset(Range.getOffset());
SourceLocation End = Start.getLocWithOffset(Range.getLength());
CharRanges.push_back(CharSourceRange::getCharRange(Start, End));
}
return reformat(Style, SourceMgr, ID, CharRanges, IncompleteFormat);
}
LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOptions LangOpts;
#ifdef MS_SUPPORT_VARIABLE_LANGOPTS
LangOpts.CPlusPlus = 1;
LangOpts.CPlusPlus11 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
LangOpts.CPlusPlus14 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1;
LangOpts.LineComment = 1;
bool AlternativeOperators = Style.Language == FormatStyle::LK_Cpp;
LangOpts.CXXOperatorNames = AlternativeOperators ? 1 : 0;
LangOpts.Bool = 1;
LangOpts.ObjC1 = 1;
LangOpts.ObjC2 = 1;
LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally.
#endif
return LangOpts;
}
const char *StyleOptionHelpDescription =
"Coding style, currently supports:\n"
" LLVM, Google, Chromium, Mozilla, WebKit.\n"
"Use -style=file to load style configuration from\n"
".clang-format file located in one of the parent\n"
"directories of the source file (or current\n"
"directory for stdin).\n"
"Use -style=\"{key: value, ...}\" to set specific\n"
"parameters, e.g.:\n"
" -style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
if (FileName.endswith(".java")) {
return FormatStyle::LK_Java;
} else if (FileName.endswith_lower(".js") || FileName.endswith_lower(".ts")) {
// JavaScript or TypeScript.
return FormatStyle::LK_JavaScript;
} else if (FileName.endswith_lower(".proto") ||
FileName.endswith_lower(".protodevel")) {
return FormatStyle::LK_Proto;
}
return FormatStyle::LK_Cpp;
}
FormatStyle getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyle) {
FormatStyle Style = getLLVMStyle();
Style.Language = getLanguageByFileName(FileName);
if (!getPredefinedStyle(FallbackStyle, Style.Language, &Style)) {
llvm::errs() << "Invalid fallback style \"" << FallbackStyle
<< "\" using LLVM style\n";
return Style;
}
if (StyleName.startswith("{")) {
// Parse YAML/JSON style from the command line.
if (std::error_code ec = parseConfiguration(StyleName, &Style)) {
llvm::errs() << "Error parsing -style: " << ec.message() << ", using "
<< FallbackStyle << " style\n";
}
return Style;
}
if (!StyleName.equals_lower("file")) {
if (!getPredefinedStyle(StyleName, Style.Language, &Style))
llvm::errs() << "Invalid value for -style, using " << FallbackStyle
<< " style\n";
return Style;
}
// Look for .clang-format/_clang-format file in the file's parent directories.
SmallString<128> UnsuitableConfigFiles;
SmallString<128> Path(FileName);
llvm::sys::fs::make_absolute(Path);
for (StringRef Directory = Path; !Directory.empty();
Directory = llvm::sys::path::parent_path(Directory)) {
if (!llvm::sys::fs::is_directory(Directory))
continue;
SmallString<128> ConfigFile(Directory);
llvm::sys::path::append(ConfigFile, ".clang-format");
DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
bool IsFile = false;
// Ignore errors from is_regular_file: we only need to know if we can read
// the file or not.
llvm::sys::fs::is_regular_file(Twine(ConfigFile), IsFile);
if (!IsFile) {
// Try _clang-format too, since dotfiles are not commonly used on Windows.
ConfigFile = Directory;
llvm::sys::path::append(ConfigFile, "_clang-format");
DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
llvm::sys::fs::is_regular_file(Twine(ConfigFile), IsFile);
}
if (IsFile) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
llvm::MemoryBuffer::getFile(ConfigFile.c_str());
if (std::error_code EC = Text.getError()) {
llvm::errs() << EC.message() << "\n";
break;
}
if (std::error_code ec =
parseConfiguration(Text.get()->getBuffer(), &Style)) {
if (ec == ParseError::Unsuitable) {
if (!UnsuitableConfigFiles.empty())
UnsuitableConfigFiles.append(", ");
UnsuitableConfigFiles.append(ConfigFile);
continue;
}
llvm::errs() << "Error reading " << ConfigFile << ": " << ec.message()
<< "\n";
break;
}
DEBUG(llvm::dbgs() << "Using configuration file " << ConfigFile << "\n");
return Style;
}
}
if (!UnsuitableConfigFiles.empty()) {
llvm::errs() << "Configuration file(s) do(es) not support "
<< getLanguageName(Style.Language) << ": "
<< UnsuitableConfigFiles << "\n";
}
return Style;
}
} // namespace format
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/UnwrappedLineFormatter.cpp | //===--- UnwrappedLineFormatter.cpp - Format C++ code ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "UnwrappedLineFormatter.h"
#include "WhitespaceManager.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "format-formatter"
namespace clang {
namespace format {
namespace {
bool startsExternCBlock(const AnnotatedLine &Line) {
const FormatToken *Next = Line.First->getNextNonComment();
const FormatToken *NextNext = Next ? Next->getNextNonComment() : nullptr;
return Line.startsWith(tok::kw_extern) && Next && Next->isStringLiteral() &&
NextNext && NextNext->is(tok::l_brace);
}
/// \brief Tracks the indent level of \c AnnotatedLines across levels.
///
/// \c nextLine must be called for each \c AnnotatedLine, after which \c
/// getIndent() will return the indent for the last line \c nextLine was called
/// with.
/// If the line is not formatted (and thus the indent does not change), calling
/// \c adjustToUnmodifiedLine after the call to \c nextLine will cause
/// subsequent lines on the same level to be indented at the same level as the
/// given line.
class LevelIndentTracker {
public:
LevelIndentTracker(const FormatStyle &Style,
const AdditionalKeywords &Keywords, unsigned StartLevel,
int AdditionalIndent)
: Style(Style), Keywords(Keywords), AdditionalIndent(AdditionalIndent) {
for (unsigned i = 0; i != StartLevel; ++i)
IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent);
}
/// \brief Returns the indent for the current line.
unsigned getIndent() const { return Indent; }
/// \brief Update the indent state given that \p Line is going to be formatted
/// next.
void nextLine(const AnnotatedLine &Line) {
Offset = getIndentOffset(*Line.First);
// Update the indent level cache size so that we can rely on it
// having the right size in adjustToUnmodifiedline.
while (IndentForLevel.size() <= Line.Level)
IndentForLevel.push_back(-1);
if (Line.InPPDirective) {
Indent = Line.Level * Style.IndentWidth + AdditionalIndent;
} else {
IndentForLevel.resize(Line.Level + 1);
Indent = getIndent(IndentForLevel, Line.Level);
}
if (static_cast<int>(Indent) + Offset >= 0)
Indent += Offset;
}
/// \brief Update the level indent to adapt to the given \p Line.
///
/// When a line is not formatted, we move the subsequent lines on the same
/// level to the same indent.
/// Note that \c nextLine must have been called before this method.
void adjustToUnmodifiedLine(const AnnotatedLine &Line) {
unsigned LevelIndent = Line.First->OriginalColumn;
if (static_cast<int>(LevelIndent) - Offset >= 0)
LevelIndent -= Offset;
if ((!Line.First->is(tok::comment) || IndentForLevel[Line.Level] == -1) &&
!Line.InPPDirective)
IndentForLevel[Line.Level] = LevelIndent;
}
private:
/// \brief Get the offset of the line relatively to the level.
///
/// For example, 'public:' labels in classes are offset by 1 or 2
/// characters to the left from their level.
int getIndentOffset(const FormatToken &RootToken) {
if (Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript)
return 0;
if (RootToken.isAccessSpecifier(false) ||
RootToken.isObjCAccessSpecifier() ||
(RootToken.is(Keywords.kw_signals) && RootToken.Next &&
RootToken.Next->is(tok::colon)))
return Style.AccessModifierOffset;
return 0;
}
/// \brief Get the indent of \p Level from \p IndentForLevel.
///
/// \p IndentForLevel must contain the indent for the level \c l
/// at \p IndentForLevel[l], or a value < 0 if the indent for
/// that level is unknown.
unsigned getIndent(ArrayRef<int> IndentForLevel, unsigned Level) {
if (IndentForLevel[Level] != -1)
return IndentForLevel[Level];
if (Level == 0)
return 0;
return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth;
}
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
const unsigned AdditionalIndent;
/// \brief The indent in characters for each level.
std::vector<int> IndentForLevel;
/// \brief Offset of the current line relative to the indent level.
///
/// For example, the 'public' keywords is often indented with a negative
/// offset.
int Offset = 0;
/// \brief The current line's indent.
unsigned Indent = 0;
};
class LineJoiner {
public:
LineJoiner(const FormatStyle &Style, const AdditionalKeywords &Keywords,
const SmallVectorImpl<AnnotatedLine *> &Lines)
: Style(Style), Keywords(Keywords), End(Lines.end()),
Next(Lines.begin()) {}
/// \brief Returns the next line, merging multiple lines into one if possible.
const AnnotatedLine *getNextMergedLine(bool DryRun,
LevelIndentTracker &IndentTracker) {
if (Next == End)
return nullptr;
const AnnotatedLine *Current = *Next;
IndentTracker.nextLine(*Current);
unsigned MergedLines =
tryFitMultipleLinesInOne(IndentTracker.getIndent(), Next, End);
if (MergedLines > 0 && Style.ColumnLimit == 0)
// Disallow line merging if there is a break at the start of one of the
// input lines.
for (unsigned i = 0; i < MergedLines; ++i)
if (Next[i + 1]->First->NewlinesBefore > 0)
MergedLines = 0;
if (!DryRun)
for (unsigned i = 0; i < MergedLines; ++i)
join(*Next[i], *Next[i + 1]);
Next = Next + MergedLines + 1;
return Current;
}
private:
/// \brief Calculates how many lines can be merged into 1 starting at \p I.
unsigned
tryFitMultipleLinesInOne(unsigned Indent,
SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E) {
// Can't join the last line with anything.
if (I + 1 == E)
return 0;
// We can never merge stuff if there are trailing line comments.
const AnnotatedLine *TheLine = *I;
if (TheLine->Last->is(TT_LineComment))
return 0;
if (I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore)
return 0;
if (TheLine->InPPDirective &&
(!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline))
return 0;
if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit)
return 0;
unsigned Limit =
Style.ColumnLimit == 0 ? UINT_MAX : Style.ColumnLimit - Indent;
// If we already exceed the column limit, we set 'Limit' to 0. The different
// tryMerge..() functions can then decide whether to still do merging.
Limit = TheLine->Last->TotalLength > Limit
? 0
: Limit - TheLine->Last->TotalLength;
// FIXME: TheLine->Level != 0 might or might not be the right check to do.
// If necessary, change to something smarter.
bool MergeShortFunctions =
Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All ||
(Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
I[1]->First->is(tok::r_brace)) ||
(Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Inline &&
TheLine->Level != 0);
if (TheLine->Last->is(TT_FunctionLBrace) &&
TheLine->First != TheLine->Last) {
return MergeShortFunctions ? tryMergeSimpleBlock(I, E, Limit) : 0;
}
if (TheLine->Last->is(tok::l_brace)) {
return Style.BreakBeforeBraces == FormatStyle::BS_Attach
? tryMergeSimpleBlock(I, E, Limit)
: 0;
}
if (I[1]->First->is(TT_FunctionLBrace) &&
Style.BreakBeforeBraces != FormatStyle::BS_Attach) {
if (I[1]->Last->is(TT_LineComment))
return 0;
// Check for Limit <= 2 to account for the " {".
if (Limit <= 2 || (Style.ColumnLimit == 0 && containsMustBreak(TheLine)))
return 0;
Limit -= 2;
unsigned MergedLines = 0;
if (MergeShortFunctions) {
MergedLines = tryMergeSimpleBlock(I + 1, E, Limit);
// If we managed to merge the block, count the function header, which is
// on a separate line.
if (MergedLines > 0)
++MergedLines;
}
return MergedLines;
}
if (TheLine->First->is(tok::kw_if)) {
return Style.AllowShortIfStatementsOnASingleLine
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
}
if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while)) {
return Style.AllowShortLoopsOnASingleLine
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
}
if (TheLine->First->isOneOf(tok::kw_case, tok::kw_default)) {
return Style.AllowShortCaseLabelsOnASingleLine
? tryMergeShortCaseLabels(I, E, Limit)
: 0;
}
if (TheLine->InPPDirective &&
(TheLine->First->HasUnescapedNewline || TheLine->First->IsFirst)) {
return tryMergeSimplePPDirective(I, E, Limit);
}
return 0;
}
unsigned
tryMergeSimplePPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
if (Limit == 0)
return 0;
if (I + 2 != E && I[2]->InPPDirective && !I[2]->First->HasUnescapedNewline)
return 0;
if (1 + I[1]->Last->TotalLength > Limit)
return 0;
return 1;
}
unsigned tryMergeSimpleControlStatement(
SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) {
if (Limit == 0)
return 0;
if ((Style.BreakBeforeBraces == FormatStyle::BS_Allman ||
Style.BreakBeforeBraces == FormatStyle::BS_GNU) &&
(I[1]->First->is(tok::l_brace) && !Style.AllowShortBlocksOnASingleLine))
return 0;
if (I[1]->InPPDirective != (*I)->InPPDirective ||
(I[1]->InPPDirective && I[1]->First->HasUnescapedNewline))
return 0;
Limit = limitConsideringMacros(I + 1, E, Limit);
AnnotatedLine &Line = **I;
if (Line.Last->isNot(tok::r_paren))
return 0;
if (1 + I[1]->Last->TotalLength > Limit)
return 0;
if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, tok::kw_while,
TT_LineComment))
return 0;
// Only inline simple if's (no nested if or else).
if (I + 2 != E && Line.startsWith(tok::kw_if) &&
I[2]->First->is(tok::kw_else))
return 0;
return 1;
}
unsigned
tryMergeShortCaseLabels(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
if (Limit == 0 || I + 1 == E ||
I[1]->First->isOneOf(tok::kw_case, tok::kw_default))
return 0;
unsigned NumStmts = 0;
unsigned Length = 0;
bool InPPDirective = I[0]->InPPDirective;
for (; NumStmts < 3; ++NumStmts) {
if (I + 1 + NumStmts == E)
break;
const AnnotatedLine *Line = I[1 + NumStmts];
if (Line->InPPDirective != InPPDirective)
break;
if (Line->First->isOneOf(tok::kw_case, tok::kw_default, tok::r_brace))
break;
if (Line->First->isOneOf(tok::kw_if, tok::kw_for, tok::kw_switch,
tok::kw_while, tok::comment))
return 0;
Length += I[1 + NumStmts]->Last->TotalLength + 1; // 1 for the space.
}
if (NumStmts == 0 || NumStmts == 3 || Length > Limit)
return 0;
return NumStmts;
}
unsigned
tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
AnnotatedLine &Line = **I;
// Don't merge ObjC @ keywords and methods.
// FIXME: If an option to allow short exception handling clauses on a single
// line is added, change this to not return for @try and friends.
if (Style.Language != FormatStyle::LK_Java &&
Line.First->isOneOf(tok::at, tok::minus, tok::plus))
return 0;
// Check that the current line allows merging. This depends on whether we
// are in a control flow statements as well as several style flags.
if (Line.First->isOneOf(tok::kw_else, tok::kw_case) ||
(Line.First->Next && Line.First->Next->is(tok::kw_else)))
return 0;
if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try,
tok::kw___try, tok::kw_catch, tok::kw___finally,
tok::kw_for, tok::r_brace, Keywords.kw___except)) {
if (!Style.AllowShortBlocksOnASingleLine)
return 0;
if (!Style.AllowShortIfStatementsOnASingleLine &&
Line.startsWith(tok::kw_if))
return 0;
if (!Style.AllowShortLoopsOnASingleLine &&
Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for))
return 0;
// FIXME: Consider an option to allow short exception handling clauses on
// a single line.
// FIXME: This isn't covered by tests.
// FIXME: For catch, __except, __finally the first token on the line
// is '}', so this isn't correct here.
if (Line.First->isOneOf(tok::kw_try, tok::kw___try, tok::kw_catch,
Keywords.kw___except, tok::kw___finally))
return 0;
}
FormatToken *Tok = I[1]->First;
if (Tok->is(tok::r_brace) && !Tok->MustBreakBefore &&
(Tok->getNextNonComment() == nullptr ||
Tok->getNextNonComment()->is(tok::semi))) {
// We merge empty blocks even if the line exceeds the column limit.
Tok->SpacesRequiredBefore = 0;
Tok->CanBreakBefore = true;
return 1;
} else if (Limit != 0 && !Line.startsWith(tok::kw_namespace) &&
!startsExternCBlock(Line)) {
// We don't merge short records.
if (Line.First->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct,
Keywords.kw_interface))
return 0;
// Check that we still have three lines and they fit into the limit.
if (I + 2 == E || I[2]->Type == LT_Invalid)
return 0;
Limit = limitConsideringMacros(I + 2, E, Limit);
if (!nextTwoLinesFitInto(I, Limit))
return 0;
// Second, check that the next line does not contain any braces - if it
// does, readability declines when putting it into a single line.
if (I[1]->Last->is(TT_LineComment))
return 0;
do {
if (Tok->is(tok::l_brace) && Tok->BlockKind != BK_BracedInit)
return 0;
Tok = Tok->Next;
} while (Tok);
// Last, check that the third line starts with a closing brace.
Tok = I[2]->First;
if (Tok->isNot(tok::r_brace))
return 0;
// Don't merge "if (a) { .. } else {".
if (Tok->Next && Tok->Next->is(tok::kw_else))
return 0;
return 2;
}
return 0;
}
/// Returns the modified column limit for \p I if it is inside a macro and
/// needs a trailing '\'.
unsigned
limitConsideringMacros(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
if (I[0]->InPPDirective && I + 1 != E &&
!I[1]->First->HasUnescapedNewline && !I[1]->First->is(tok::eof)) {
return Limit < 2 ? 0 : Limit - 2;
}
return Limit;
}
bool nextTwoLinesFitInto(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
unsigned Limit) {
if (I[1]->First->MustBreakBefore || I[2]->First->MustBreakBefore)
return false;
return 1 + I[1]->Last->TotalLength + 1 + I[2]->Last->TotalLength <= Limit;
}
bool containsMustBreak(const AnnotatedLine *Line) {
for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
if (Tok->MustBreakBefore)
return true;
}
return false;
}
void join(AnnotatedLine &A, const AnnotatedLine &B) {
assert(!A.Last->Next);
assert(!B.First->Previous);
if (B.Affected)
A.Affected = true;
A.Last->Next = B.First;
B.First->Previous = A.Last;
B.First->CanBreakBefore = true;
unsigned LengthA = A.Last->TotalLength + B.First->SpacesRequiredBefore;
for (FormatToken *Tok = B.First; Tok; Tok = Tok->Next) {
Tok->TotalLength += LengthA;
A.Last = Tok;
}
}
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
const SmallVectorImpl<AnnotatedLine *>::const_iterator End;
SmallVectorImpl<AnnotatedLine *>::const_iterator Next;
};
static void markFinalized(FormatToken *Tok) {
for (; Tok; Tok = Tok->Next) {
Tok->Finalized = true;
for (AnnotatedLine *Child : Tok->Children)
markFinalized(Child->First);
}
}
#ifndef NDEBUG
static void printLineState(const LineState &State) {
llvm::dbgs() << "State: ";
for (const ParenState &P : State.Stack) {
llvm::dbgs() << P.Indent << "|" << P.LastSpace << "|" << P.NestedBlockIndent
<< " ";
}
llvm::dbgs() << State.NextToken->TokenText << "\n";
}
#endif
/// \brief Base class for classes that format one \c AnnotatedLine.
class LineFormatter {
public:
LineFormatter(ContinuationIndenter *Indenter, WhitespaceManager *Whitespaces,
const FormatStyle &Style,
UnwrappedLineFormatter *BlockFormatter)
: Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
BlockFormatter(BlockFormatter) {}
virtual ~LineFormatter() {}
/// \brief Formats an \c AnnotatedLine and returns the penalty.
///
/// If \p DryRun is \c false, directly applies the changes.
virtual unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
bool DryRun) = 0;
protected:
/// \brief If the \p State's next token is an r_brace closing a nested block,
/// format the nested block before it.
///
/// Returns \c true if all children could be placed successfully and adapts
/// \p Penalty as well as \p State. If \p DryRun is false, also directly
/// creates changes using \c Whitespaces.
///
/// The crucial idea here is that children always get formatted upon
/// encountering the closing brace right after the nested block. Now, if we
/// are currently trying to keep the "}" on the same line (i.e. \p NewLine is
/// \c false), the entire block has to be kept on the same line (which is only
/// possible if it fits on the line, only contains a single statement, etc.
///
/// If \p NewLine is true, we format the nested block on separate lines, i.e.
/// break after the "{", format all lines with correct indentation and the put
/// the closing "}" on yet another new line.
///
/// This enables us to keep the simple structure of the
/// \c UnwrappedLineFormatter, where we only have two options for each token:
/// break or don't break.
bool formatChildren(LineState &State, bool NewLine, bool DryRun,
unsigned &Penalty) {
const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
FormatToken &Previous = *State.NextToken->Previous;
if (!LBrace || LBrace->isNot(tok::l_brace) ||
LBrace->BlockKind != BK_Block || Previous.Children.size() == 0)
// The previous token does not open a block. Nothing to do. We don't
// assert so that we can simply call this function for all tokens.
return true;
if (NewLine) {
int AdditionalIndent = State.Stack.back().Indent -
Previous.Children[0]->Level * Style.IndentWidth;
Penalty +=
BlockFormatter->format(Previous.Children, DryRun, AdditionalIndent,
/*FixBadIndentation=*/true);
return true;
}
if (Previous.Children[0]->First->MustBreakBefore)
return false;
// Cannot merge multiple statements into a single line.
if (Previous.Children.size() > 1)
return false;
// Cannot merge into one line if this line ends on a comment.
if (Previous.is(tok::comment))
return false;
// We can't put the closing "}" on a line with a trailing comment.
if (Previous.Children[0]->Last->isTrailingComment())
return false;
// If the child line exceeds the column limit, we wouldn't want to merge it.
// We add +2 for the trailing " }".
if (Style.ColumnLimit > 0 &&
Previous.Children[0]->Last->TotalLength + State.Column + 2 >
Style.ColumnLimit)
return false;
if (!DryRun) {
Whitespaces->replaceWhitespace(
*Previous.Children[0]->First,
/*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1,
/*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective);
}
Penalty += formatLine(*Previous.Children[0], State.Column + 1, DryRun);
State.Column += 1 + Previous.Children[0]->Last->TotalLength;
return true;
}
ContinuationIndenter *Indenter;
private:
WhitespaceManager *Whitespaces;
const FormatStyle &Style;
UnwrappedLineFormatter *BlockFormatter;
};
/// \brief Formatter that keeps the existing line breaks.
class NoColumnLimitLineFormatter : public LineFormatter {
public:
NoColumnLimitLineFormatter(ContinuationIndenter *Indenter,
WhitespaceManager *Whitespaces,
const FormatStyle &Style,
UnwrappedLineFormatter *BlockFormatter)
: LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
/// \brief Formats the line, simply keeping all of the input's line breaking
/// decisions.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
bool DryRun) override {
assert(!DryRun);
LineState State =
Indenter->getInitialState(FirstIndent, &Line, /*DryRun=*/false);
while (State.NextToken) {
bool Newline =
Indenter->mustBreak(State) ||
(Indenter->canBreak(State) && State.NextToken->NewlinesBefore > 0);
unsigned Penalty = 0;
formatChildren(State, Newline, /*DryRun=*/false, Penalty);
Indenter->addTokenToState(State, Newline, /*DryRun=*/false);
}
return 0;
}
};
/// \brief Formatter that puts all tokens into a single line without breaks.
class NoLineBreakFormatter : public LineFormatter {
public:
NoLineBreakFormatter(ContinuationIndenter *Indenter,
WhitespaceManager *Whitespaces, const FormatStyle &Style,
UnwrappedLineFormatter *BlockFormatter)
: LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
/// \brief Puts all tokens into a single line.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
bool DryRun) {
unsigned Penalty = 0;
LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
while (State.NextToken) {
formatChildren(State, /*Newline=*/false, DryRun, Penalty);
Indenter->addTokenToState(State, /*Newline=*/false, DryRun);
}
return Penalty;
}
};
/// \brief Finds the best way to break lines.
class OptimizingLineFormatter : public LineFormatter {
public:
OptimizingLineFormatter(ContinuationIndenter *Indenter,
WhitespaceManager *Whitespaces,
const FormatStyle &Style,
UnwrappedLineFormatter *BlockFormatter)
: LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {}
/// \brief Formats the line by finding the best line breaks with line lengths
/// below the column limit.
unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent,
bool DryRun) {
LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun);
// If the ObjC method declaration does not fit on a line, we should format
// it with one arg per line.
if (State.Line->Type == LT_ObjCMethodDecl)
State.Stack.back().BreakBeforeParameter = true;
// Find best solution in solution space.
return analyzeSolutionSpace(State, DryRun);
}
private:
struct CompareLineStatePointers {
bool operator()(LineState *obj1, LineState *obj2) const {
return *obj1 < *obj2;
}
};
/// \brief A pair of <penalty, count> that is used to prioritize the BFS on.
///
/// In case of equal penalties, we want to prefer states that were inserted
/// first. During state generation we make sure that we insert states first
/// that break the line as late as possible.
typedef std::pair<unsigned, unsigned> OrderedPenalty;
/// \brief An edge in the solution space from \c Previous->State to \c State,
/// inserting a newline dependent on the \c NewLine.
struct StateNode {
StateNode(const LineState &State, bool NewLine, StateNode *Previous)
: State(State), NewLine(NewLine), Previous(Previous) {}
LineState State;
bool NewLine;
StateNode *Previous;
};
/// \brief An item in the prioritized BFS search queue. The \c StateNode's
/// \c State has the given \c OrderedPenalty.
typedef std::pair<OrderedPenalty, StateNode *> QueueItem;
/// \brief The BFS queue type.
typedef std::priority_queue<QueueItem, std::vector<QueueItem>,
std::greater<QueueItem>> QueueType;
/// \brief Analyze the entire solution space starting from \p InitialState.
///
/// This implements a variant of Dijkstra's algorithm on the graph that spans
/// the solution space (\c LineStates are the nodes). The algorithm tries to
/// find the shortest path (the one with lowest penalty) from \p InitialState
/// to a state where all tokens are placed. Returns the penalty.
///
/// If \p DryRun is \c false, directly applies the changes.
unsigned analyzeSolutionSpace(LineState &InitialState, bool DryRun) {
std::set<LineState *, CompareLineStatePointers> Seen;
// Increasing count of \c StateNode items we have created. This is used to
// create a deterministic order independent of the container.
unsigned Count = 0;
QueueType Queue;
// Insert start element into queue.
StateNode *Node =
new (Allocator.Allocate()) StateNode(InitialState, false, nullptr);
Queue.push(QueueItem(OrderedPenalty(0, Count), Node));
++Count;
unsigned Penalty = 0;
// While not empty, take first element and follow edges.
while (!Queue.empty()) {
Penalty = Queue.top().first.first;
StateNode *Node = Queue.top().second;
if (!Node->State.NextToken) {
DEBUG(llvm::dbgs() << "\n---\nPenalty for line: " << Penalty << "\n");
break;
}
Queue.pop();
// Cut off the analysis of certain solutions if the analysis gets too
// complex. See description of IgnoreStackForComparison.
if (Count > 10000)
Node->State.IgnoreStackForComparison = true;
if (!Seen.insert(&Node->State).second)
// State already examined with lower penalty.
continue;
FormatDecision LastFormat = Node->State.NextToken->Decision;
if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue);
if (LastFormat == FD_Unformatted || LastFormat == FD_Break)
addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue);
}
if (Queue.empty()) {
// We were unable to find a solution, do nothing.
// FIXME: Add diagnostic?
DEBUG(llvm::dbgs() << "Could not find a solution.\n");
return 0;
}
// Reconstruct the solution.
if (!DryRun)
reconstructPath(InitialState, Queue.top().second);
DEBUG(llvm::dbgs() << "Total number of analyzed states: " << Count << "\n");
DEBUG(llvm::dbgs() << "---\n");
return Penalty;
}
/// \brief Add the following state to the analysis queue \c Queue.
///
/// Assume the current state is \p PreviousNode and has been reached with a
/// penalty of \p Penalty. Insert a line break if \p NewLine is \c true.
void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode,
bool NewLine, unsigned *Count, QueueType *Queue) {
if (NewLine && !Indenter->canBreak(PreviousNode->State))
return;
if (!NewLine && Indenter->mustBreak(PreviousNode->State))
return;
StateNode *Node = new (Allocator.Allocate())
StateNode(PreviousNode->State, NewLine, PreviousNode);
if (!formatChildren(Node->State, NewLine, /*DryRun=*/true, Penalty))
return;
Penalty += Indenter->addTokenToState(Node->State, NewLine, true);
Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node));
++(*Count);
}
/// \brief Applies the best formatting by reconstructing the path in the
/// solution space that leads to \c Best.
void reconstructPath(LineState &State, StateNode *Best) {
std::deque<StateNode *> Path;
// We do not need a break before the initial token.
while (Best->Previous) {
Path.push_front(Best);
Best = Best->Previous;
}
for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end();
I != E; ++I) {
unsigned Penalty = 0;
formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
DEBUG({
printLineState((*I)->Previous->State);
if ((*I)->NewLine) {
llvm::dbgs() << "Penalty for placing "
<< (*I)->Previous->State.NextToken->Tok.getName() << ": "
<< Penalty << "\n";
}
});
}
}
llvm::SpecificBumpPtrAllocator<StateNode> Allocator;
};
} // namespace
unsigned
UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
bool DryRun, int AdditionalIndent,
bool FixBadIndentation) {
LineJoiner Joiner(Style, Keywords, Lines);
// Try to look up already computed penalty in DryRun-mode.
std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned> CacheKey(
&Lines, AdditionalIndent);
auto CacheIt = PenaltyCache.find(CacheKey);
if (DryRun && CacheIt != PenaltyCache.end())
return CacheIt->second;
assert(!Lines.empty());
unsigned Penalty = 0;
LevelIndentTracker IndentTracker(Style, Keywords, Lines[0]->Level,
AdditionalIndent);
const AnnotatedLine *PreviousLine = nullptr;
const AnnotatedLine *NextLine = nullptr;
for (const AnnotatedLine *Line =
Joiner.getNextMergedLine(DryRun, IndentTracker);
Line; Line = NextLine) {
const AnnotatedLine &TheLine = *Line;
unsigned Indent = IndentTracker.getIndent();
bool FixIndentation =
FixBadIndentation && (Indent != TheLine.First->OriginalColumn);
bool ShouldFormat = TheLine.Affected || FixIndentation;
// We cannot format this line; if the reason is that the line had a
// parsing error, remember that.
if (ShouldFormat && TheLine.Type == LT_Invalid && IncompleteFormat)
*IncompleteFormat = true;
if (ShouldFormat && TheLine.Type != LT_Invalid) {
if (!DryRun)
formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level, Indent,
TheLine.InPPDirective);
NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
unsigned ColumnLimit = getColumnLimit(TheLine.InPPDirective, NextLine);
bool FitsIntoOneLine =
TheLine.Last->TotalLength + Indent <= ColumnLimit ||
TheLine.Type == LT_ImportStatement;
if (Style.ColumnLimit == 0)
NoColumnLimitLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, Indent, DryRun);
else if (FitsIntoOneLine)
Penalty += NoLineBreakFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, Indent, DryRun);
else
Penalty += OptimizingLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, Indent, DryRun);
} else {
// If no token in the current line is affected, we still need to format
// affected children.
if (TheLine.ChildrenAffected)
format(TheLine.Children, DryRun);
// Adapt following lines on the current indent level to the same level
// unless the current \c AnnotatedLine is not at the beginning of a line.
bool StartsNewLine =
TheLine.First->NewlinesBefore > 0 || TheLine.First->IsFirst;
if (StartsNewLine)
IndentTracker.adjustToUnmodifiedLine(TheLine);
if (!DryRun) {
bool ReformatLeadingWhitespace =
StartsNewLine && ((PreviousLine && PreviousLine->Affected) ||
TheLine.LeadingEmptyLinesAffected);
// Format the first token.
if (ReformatLeadingWhitespace)
formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level,
TheLine.First->OriginalColumn,
TheLine.InPPDirective);
else
Whitespaces->addUntouchableToken(*TheLine.First,
TheLine.InPPDirective);
// Notify the WhitespaceManager about the unchanged whitespace.
for (FormatToken *Tok = TheLine.First->Next; Tok; Tok = Tok->Next)
Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective);
}
NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
}
if (!DryRun)
markFinalized(TheLine.First);
PreviousLine = &TheLine;
}
PenaltyCache[CacheKey] = Penalty;
return Penalty;
}
void UnwrappedLineFormatter::formatFirstToken(FormatToken &RootToken,
const AnnotatedLine *PreviousLine,
unsigned IndentLevel,
unsigned Indent,
bool InPPDirective) {
if (RootToken.is(tok::eof)) {
unsigned Newlines = std::min(RootToken.NewlinesBefore, 1u);
Whitespaces->replaceWhitespace(RootToken, Newlines, /*IndentLevel=*/0,
/*Spaces=*/0, /*TargetColumn=*/0);
return;
}
unsigned Newlines =
std::min(RootToken.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1);
// Remove empty lines before "}" where applicable.
if (RootToken.is(tok::r_brace) &&
(!RootToken.Next ||
(RootToken.Next->is(tok::semi) && !RootToken.Next->Next)))
Newlines = std::min(Newlines, 1u);
if (Newlines == 0 && !RootToken.IsFirst)
Newlines = 1;
if (RootToken.IsFirst && !RootToken.HasUnescapedNewline)
Newlines = 0;
// Remove empty lines after "{".
if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
PreviousLine->Last->is(tok::l_brace) &&
PreviousLine->First->isNot(tok::kw_namespace) &&
!startsExternCBlock(*PreviousLine))
Newlines = 1;
// Insert extra new line before access specifiers.
if (PreviousLine && PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
RootToken.isAccessSpecifier() && RootToken.NewlinesBefore == 1)
++Newlines;
// Remove empty lines after access specifiers.
if (PreviousLine && PreviousLine->First->isAccessSpecifier() &&
(!PreviousLine->InPPDirective || !RootToken.HasUnescapedNewline))
Newlines = std::min(1u, Newlines);
Whitespaces->replaceWhitespace(RootToken, Newlines, IndentLevel, Indent,
Indent, InPPDirective &&
!RootToken.HasUnescapedNewline);
}
unsigned
UnwrappedLineFormatter::getColumnLimit(bool InPPDirective,
const AnnotatedLine *NextLine) const {
// In preprocessor directives reserve two chars for trailing " \" if the
// next line continues the preprocessor directive.
bool ContinuesPPDirective =
InPPDirective &&
// If there is no next line, this is likely a child line and the parent
// continues the preprocessor directive.
(!NextLine ||
(NextLine->InPPDirective &&
// If there is an unescaped newline between this line and the next, the
// next line starts a new preprocessor directive.
!NextLine->First->HasUnescapedNewline));
return Style.ColumnLimit - (ContinuesPPDirective ? 2 : 0);
}
} // namespace format
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Format/BreakableToken.cpp | //===--- BreakableToken.cpp - Format C++ code -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Contains implementation of BreakableToken class and classes derived
/// from it.
///
//===----------------------------------------------------------------------===//
#include "BreakableToken.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include <algorithm>
#define DEBUG_TYPE "format-token-breaker"
namespace clang {
namespace format {
static const char *const Blanks = " \t\v\f\r";
static bool IsBlank(char C) {
switch (C) {
case ' ':
case '\t':
case '\v':
case '\f':
case '\r':
return true;
default:
return false;
}
}
static BreakableToken::Split getCommentSplit(StringRef Text,
unsigned ContentStartColumn,
unsigned ColumnLimit,
unsigned TabWidth,
encoding::Encoding Encoding) {
if (ColumnLimit <= ContentStartColumn + 1)
return BreakableToken::Split(StringRef::npos, 0);
unsigned MaxSplit = ColumnLimit - ContentStartColumn + 1;
unsigned MaxSplitBytes = 0;
for (unsigned NumChars = 0;
NumChars < MaxSplit && MaxSplitBytes < Text.size();) {
unsigned BytesInChar =
encoding::getCodePointNumBytes(Text[MaxSplitBytes], Encoding);
NumChars +=
encoding::columnWidthWithTabs(Text.substr(MaxSplitBytes, BytesInChar),
ContentStartColumn, TabWidth, Encoding);
MaxSplitBytes += BytesInChar;
}
StringRef::size_type SpaceOffset = Text.find_last_of(Blanks, MaxSplitBytes);
if (SpaceOffset == StringRef::npos ||
// Don't break at leading whitespace.
Text.find_last_not_of(Blanks, SpaceOffset) == StringRef::npos) {
// Make sure that we don't break at leading whitespace that
// reaches past MaxSplit.
StringRef::size_type FirstNonWhitespace = Text.find_first_not_of(Blanks);
if (FirstNonWhitespace == StringRef::npos)
// If the comment is only whitespace, we cannot split.
return BreakableToken::Split(StringRef::npos, 0);
SpaceOffset = Text.find_first_of(
Blanks, std::max<unsigned>(MaxSplitBytes, FirstNonWhitespace));
}
if (SpaceOffset != StringRef::npos && SpaceOffset != 0) {
StringRef BeforeCut = Text.substr(0, SpaceOffset).rtrim(Blanks);
StringRef AfterCut = Text.substr(SpaceOffset).ltrim(Blanks);
return BreakableToken::Split(BeforeCut.size(),
AfterCut.begin() - BeforeCut.end());
}
return BreakableToken::Split(StringRef::npos, 0);
}
static BreakableToken::Split
getStringSplit(StringRef Text, unsigned UsedColumns, unsigned ColumnLimit,
unsigned TabWidth, encoding::Encoding Encoding) {
// FIXME: Reduce unit test case.
if (Text.empty())
return BreakableToken::Split(StringRef::npos, 0);
if (ColumnLimit <= UsedColumns)
return BreakableToken::Split(StringRef::npos, 0);
unsigned MaxSplit = ColumnLimit - UsedColumns;
StringRef::size_type SpaceOffset = 0;
StringRef::size_type SlashOffset = 0;
StringRef::size_type WordStartOffset = 0;
StringRef::size_type SplitPoint = 0;
for (unsigned Chars = 0;;) {
unsigned Advance;
if (Text[0] == '\\') {
Advance = encoding::getEscapeSequenceLength(Text);
Chars += Advance;
} else {
Advance = encoding::getCodePointNumBytes(Text[0], Encoding);
Chars += encoding::columnWidthWithTabs(
Text.substr(0, Advance), UsedColumns + Chars, TabWidth, Encoding);
}
if (Chars > MaxSplit || Text.size() <= Advance)
break;
if (IsBlank(Text[0]))
SpaceOffset = SplitPoint;
if (Text[0] == '/')
SlashOffset = SplitPoint;
if (Advance == 1 && !isAlphanumeric(Text[0]))
WordStartOffset = SplitPoint;
SplitPoint += Advance;
Text = Text.substr(Advance);
}
if (SpaceOffset != 0)
return BreakableToken::Split(SpaceOffset + 1, 0);
if (SlashOffset != 0)
return BreakableToken::Split(SlashOffset + 1, 0);
if (WordStartOffset != 0)
return BreakableToken::Split(WordStartOffset + 1, 0);
if (SplitPoint != 0)
return BreakableToken::Split(SplitPoint, 0);
return BreakableToken::Split(StringRef::npos, 0);
}
unsigned BreakableSingleLineToken::getLineCount() const { return 1; }
unsigned BreakableSingleLineToken::getLineLengthAfterSplit(
unsigned LineIndex, unsigned Offset, StringRef::size_type Length) const {
return StartColumn + Prefix.size() + Postfix.size() +
encoding::columnWidthWithTabs(Line.substr(Offset, Length),
StartColumn + Prefix.size(),
Style.TabWidth, Encoding);
}
BreakableSingleLineToken::BreakableSingleLineToken(
const FormatToken &Tok, unsigned IndentLevel, unsigned StartColumn,
StringRef Prefix, StringRef Postfix, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style)
: BreakableToken(Tok, IndentLevel, InPPDirective, Encoding, Style),
StartColumn(StartColumn), Prefix(Prefix), Postfix(Postfix) {
assert(Tok.TokenText.endswith(Postfix));
Line = Tok.TokenText.substr(
Prefix.size(), Tok.TokenText.size() - Prefix.size() - Postfix.size());
}
BreakableStringLiteral::BreakableStringLiteral(
const FormatToken &Tok, unsigned IndentLevel, unsigned StartColumn,
StringRef Prefix, StringRef Postfix, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style)
: BreakableSingleLineToken(Tok, IndentLevel, StartColumn, Prefix, Postfix,
InPPDirective, Encoding, Style) {}
BreakableToken::Split
BreakableStringLiteral::getSplit(unsigned LineIndex, unsigned TailOffset,
unsigned ColumnLimit) const {
return getStringSplit(Line.substr(TailOffset),
StartColumn + Prefix.size() + Postfix.size(),
ColumnLimit, Style.TabWidth, Encoding);
}
void BreakableStringLiteral::insertBreak(unsigned LineIndex,
unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) {
unsigned LeadingSpaces = StartColumn;
// The '@' of an ObjC string literal (@"Test") does not become part of the
// string token.
// FIXME: It might be a cleaner solution to merge the tokens as a
// precomputation step.
if (Prefix.startswith("@"))
--LeadingSpaces;
Whitespaces.replaceWhitespaceInToken(
Tok, Prefix.size() + TailOffset + Split.first, Split.second, Postfix,
Prefix, InPPDirective, 1, IndentLevel, LeadingSpaces);
}
static StringRef getLineCommentIndentPrefix(StringRef Comment) {
static const char *const KnownPrefixes[] = {"///", "//", "//!"};
StringRef LongestPrefix;
for (StringRef KnownPrefix : KnownPrefixes) {
if (Comment.startswith(KnownPrefix)) {
size_t PrefixLength = KnownPrefix.size();
while (PrefixLength < Comment.size() && Comment[PrefixLength] == ' ')
++PrefixLength;
if (PrefixLength > LongestPrefix.size())
LongestPrefix = Comment.substr(0, PrefixLength);
}
}
return LongestPrefix;
}
BreakableLineComment::BreakableLineComment(
const FormatToken &Token, unsigned IndentLevel, unsigned StartColumn,
bool InPPDirective, encoding::Encoding Encoding, const FormatStyle &Style)
: BreakableSingleLineToken(Token, IndentLevel, StartColumn,
getLineCommentIndentPrefix(Token.TokenText), "",
InPPDirective, Encoding, Style) {
OriginalPrefix = Prefix;
if (Token.TokenText.size() > Prefix.size() &&
isAlphanumeric(Token.TokenText[Prefix.size()])) {
if (Prefix == "//")
Prefix = "// ";
else if (Prefix == "///")
Prefix = "/// ";
else if (Prefix == "//!")
Prefix = "//! ";
}
}
BreakableToken::Split
BreakableLineComment::getSplit(unsigned LineIndex, unsigned TailOffset,
unsigned ColumnLimit) const {
return getCommentSplit(Line.substr(TailOffset), StartColumn + Prefix.size(),
ColumnLimit, Style.TabWidth, Encoding);
}
void BreakableLineComment::insertBreak(unsigned LineIndex, unsigned TailOffset,
Split Split,
WhitespaceManager &Whitespaces) {
Whitespaces.replaceWhitespaceInToken(
Tok, OriginalPrefix.size() + TailOffset + Split.first, Split.second,
Postfix, Prefix, InPPDirective, /*Newlines=*/1, IndentLevel, StartColumn);
}
void BreakableLineComment::replaceWhitespace(unsigned LineIndex,
unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) {
Whitespaces.replaceWhitespaceInToken(
Tok, OriginalPrefix.size() + TailOffset + Split.first, Split.second, "",
"", /*InPPDirective=*/false, /*Newlines=*/0, /*IndentLevel=*/0,
/*Spaces=*/1);
}
void BreakableLineComment::replaceWhitespaceBefore(
unsigned LineIndex, WhitespaceManager &Whitespaces) {
if (OriginalPrefix != Prefix) {
Whitespaces.replaceWhitespaceInToken(Tok, OriginalPrefix.size(), 0, "", "",
/*InPPDirective=*/false,
/*Newlines=*/0, /*IndentLevel=*/0,
/*Spaces=*/1);
}
}
BreakableBlockComment::BreakableBlockComment(
const FormatToken &Token, unsigned IndentLevel, unsigned StartColumn,
unsigned OriginalStartColumn, bool FirstInLine, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style)
: BreakableToken(Token, IndentLevel, InPPDirective, Encoding, Style) {
StringRef TokenText(Token.TokenText);
assert(TokenText.startswith("/*") && TokenText.endswith("*/"));
TokenText.substr(2, TokenText.size() - 4).split(Lines, "\n");
int IndentDelta = StartColumn - OriginalStartColumn;
LeadingWhitespace.resize(Lines.size());
StartOfLineColumn.resize(Lines.size());
StartOfLineColumn[0] = StartColumn + 2;
for (size_t i = 1; i < Lines.size(); ++i)
adjustWhitespace(i, IndentDelta);
Decoration = "* ";
if (Lines.size() == 1 && !FirstInLine) {
// Comments for which FirstInLine is false can start on arbitrary column,
// and available horizontal space can be too small to align consecutive
// lines with the first one.
// FIXME: We could, probably, align them to current indentation level, but
// now we just wrap them without stars.
Decoration = "";
}
for (size_t i = 1, e = Lines.size(); i < e && !Decoration.empty(); ++i) {
// If the last line is empty, the closing "*/" will have a star.
if (i + 1 == e && Lines[i].empty())
break;
if (!Lines[i].empty() && i + 1 != e && Decoration.startswith(Lines[i]))
continue;
while (!Lines[i].startswith(Decoration))
Decoration = Decoration.substr(0, Decoration.size() - 1);
}
LastLineNeedsDecoration = true;
IndentAtLineBreak = StartOfLineColumn[0] + 1;
for (size_t i = 1; i < Lines.size(); ++i) {
if (Lines[i].empty()) {
if (i + 1 == Lines.size()) {
// Empty last line means that we already have a star as a part of the
// trailing */. We also need to preserve whitespace, so that */ is
// correctly indented.
LastLineNeedsDecoration = false;
} else if (Decoration.empty()) {
// For all other lines, set the start column to 0 if they're empty, so
// we do not insert trailing whitespace anywhere.
StartOfLineColumn[i] = 0;
}
continue;
}
// The first line already excludes the star.
// For all other lines, adjust the line to exclude the star and
// (optionally) the first whitespace.
unsigned DecorationSize =
Decoration.startswith(Lines[i]) ? Lines[i].size() : Decoration.size();
StartOfLineColumn[i] += DecorationSize;
Lines[i] = Lines[i].substr(DecorationSize);
LeadingWhitespace[i] += DecorationSize;
if (!Decoration.startswith(Lines[i]))
IndentAtLineBreak =
std::min<int>(IndentAtLineBreak, std::max(0, StartOfLineColumn[i]));
}
IndentAtLineBreak = std::max<unsigned>(IndentAtLineBreak, Decoration.size());
DEBUG({
llvm::dbgs() << "IndentAtLineBreak " << IndentAtLineBreak << "\n";
for (size_t i = 0; i < Lines.size(); ++i) {
llvm::dbgs() << i << " |" << Lines[i] << "| " << LeadingWhitespace[i]
<< "\n";
}
});
}
void BreakableBlockComment::adjustWhitespace(unsigned LineIndex,
int IndentDelta) {
// When in a preprocessor directive, the trailing backslash in a block comment
// is not needed, but can serve a purpose of uniformity with necessary escaped
// newlines outside the comment. In this case we remove it here before
// trimming the trailing whitespace. The backslash will be re-added later when
// inserting a line break.
size_t EndOfPreviousLine = Lines[LineIndex - 1].size();
if (InPPDirective && Lines[LineIndex - 1].endswith("\\"))
--EndOfPreviousLine;
// Calculate the end of the non-whitespace text in the previous line.
EndOfPreviousLine =
Lines[LineIndex - 1].find_last_not_of(Blanks, EndOfPreviousLine);
if (EndOfPreviousLine == StringRef::npos)
EndOfPreviousLine = 0;
else
++EndOfPreviousLine;
// Calculate the start of the non-whitespace text in the current line.
size_t StartOfLine = Lines[LineIndex].find_first_not_of(Blanks);
if (StartOfLine == StringRef::npos)
StartOfLine = Lines[LineIndex].rtrim("\r\n").size();
StringRef Whitespace = Lines[LineIndex].substr(0, StartOfLine);
// Adjust Lines to only contain relevant text.
Lines[LineIndex - 1] = Lines[LineIndex - 1].substr(0, EndOfPreviousLine);
Lines[LineIndex] = Lines[LineIndex].substr(StartOfLine);
// Adjust LeadingWhitespace to account all whitespace between the lines
// to the current line.
LeadingWhitespace[LineIndex] =
Lines[LineIndex].begin() - Lines[LineIndex - 1].end();
// Adjust the start column uniformly across all lines.
StartOfLineColumn[LineIndex] =
encoding::columnWidthWithTabs(Whitespace, 0, Style.TabWidth, Encoding) +
IndentDelta;
}
unsigned BreakableBlockComment::getLineCount() const { return Lines.size(); }
unsigned BreakableBlockComment::getLineLengthAfterSplit(
unsigned LineIndex, unsigned Offset, StringRef::size_type Length) const {
unsigned ContentStartColumn = getContentStartColumn(LineIndex, Offset);
return ContentStartColumn +
encoding::columnWidthWithTabs(Lines[LineIndex].substr(Offset, Length),
ContentStartColumn, Style.TabWidth,
Encoding) +
// The last line gets a "*/" postfix.
(LineIndex + 1 == Lines.size() ? 2 : 0);
}
BreakableToken::Split
BreakableBlockComment::getSplit(unsigned LineIndex, unsigned TailOffset,
unsigned ColumnLimit) const {
return getCommentSplit(Lines[LineIndex].substr(TailOffset),
getContentStartColumn(LineIndex, TailOffset),
ColumnLimit, Style.TabWidth, Encoding);
}
void BreakableBlockComment::insertBreak(unsigned LineIndex, unsigned TailOffset,
Split Split,
WhitespaceManager &Whitespaces) {
StringRef Text = Lines[LineIndex].substr(TailOffset);
StringRef Prefix = Decoration;
if (LineIndex + 1 == Lines.size() &&
Text.size() == Split.first + Split.second) {
// For the last line we need to break before "*/", but not to add "* ".
Prefix = "";
}
unsigned BreakOffsetInToken =
Text.data() - Tok.TokenText.data() + Split.first;
unsigned CharsToRemove = Split.second;
assert(IndentAtLineBreak >= Decoration.size());
Whitespaces.replaceWhitespaceInToken(
Tok, BreakOffsetInToken, CharsToRemove, "", Prefix, InPPDirective, 1,
IndentLevel, IndentAtLineBreak - Decoration.size());
}
void BreakableBlockComment::replaceWhitespace(unsigned LineIndex,
unsigned TailOffset, Split Split,
WhitespaceManager &Whitespaces) {
StringRef Text = Lines[LineIndex].substr(TailOffset);
unsigned BreakOffsetInToken =
Text.data() - Tok.TokenText.data() + Split.first;
unsigned CharsToRemove = Split.second;
Whitespaces.replaceWhitespaceInToken(
Tok, BreakOffsetInToken, CharsToRemove, "", "", /*InPPDirective=*/false,
/*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1);
}
void BreakableBlockComment::replaceWhitespaceBefore(
unsigned LineIndex, WhitespaceManager &Whitespaces) {
if (LineIndex == 0)
return;
StringRef Prefix = Decoration;
if (Lines[LineIndex].empty()) {
if (LineIndex + 1 == Lines.size()) {
if (!LastLineNeedsDecoration) {
// If the last line was empty, we don't need a prefix, as the */ will
// line up with the decoration (if it exists).
Prefix = "";
}
} else if (!Decoration.empty()) {
// For other empty lines, if we do have a decoration, adapt it to not
// contain a trailing whitespace.
Prefix = Prefix.substr(0, 1);
}
} else {
if (StartOfLineColumn[LineIndex] == 1) {
// This line starts immediately after the decorating *.
Prefix = Prefix.substr(0, 1);
}
}
unsigned WhitespaceOffsetInToken = Lines[LineIndex].data() -
Tok.TokenText.data() -
LeadingWhitespace[LineIndex];
Whitespaces.replaceWhitespaceInToken(
Tok, WhitespaceOffsetInToken, LeadingWhitespace[LineIndex], "", Prefix,
InPPDirective, 1, IndentLevel,
StartOfLineColumn[LineIndex] - Prefix.size());
}
unsigned
BreakableBlockComment::getContentStartColumn(unsigned LineIndex,
unsigned TailOffset) const {
// If we break, we always break at the predefined indent.
if (TailOffset != 0)
return IndentAtLineBreak;
return std::max(0, StartOfLineColumn[LineIndex]);
}
} // namespace format
} // namespace clang
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaCodeComplete.cpp | //===---------------- SemaCodeComplete.cpp - Code Completion ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the code-completion semantic actions.
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include <list>
#include <map>
#include <vector>
// //
///////////////////////////////////////////////////////////////////////////////
using namespace clang;
using namespace sema;
namespace {
/// \brief A container of code-completion results.
class ResultBuilder {
public:
/// \brief The type of a name-lookup filter, which can be provided to the
/// name-lookup routines to specify which declarations should be included in
/// the result set (when it returns true) and which declarations should be
/// filtered out (returns false).
typedef bool (ResultBuilder::*LookupFilter)(const NamedDecl *) const;
typedef CodeCompletionResult Result;
private:
/// \brief The actual results we have found.
std::vector<Result> Results;
/// \brief A record of all of the declarations we have found and placed
/// into the result set, used to ensure that no declaration ever gets into
/// the result set twice.
llvm::SmallPtrSet<const Decl*, 16> AllDeclsFound;
typedef std::pair<const NamedDecl *, unsigned> DeclIndexPair;
/// \brief An entry in the shadow map, which is optimized to store
/// a single (declaration, index) mapping (the common case) but
/// can also store a list of (declaration, index) mappings.
class ShadowMapEntry {
typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
/// \brief Contains either the solitary NamedDecl * or a vector
/// of (declaration, index) pairs.
llvm::PointerUnion<const NamedDecl *, DeclIndexPairVector*> DeclOrVector;
/// \brief When the entry contains a single declaration, this is
/// the index associated with that entry.
unsigned SingleDeclIndex;
public:
ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) { }
void Add(const NamedDecl *ND, unsigned Index) {
if (DeclOrVector.isNull()) {
// 0 - > 1 elements: just set the single element information.
DeclOrVector = ND;
SingleDeclIndex = Index;
return;
}
if (const NamedDecl *PrevND =
DeclOrVector.dyn_cast<const NamedDecl *>()) {
// 1 -> 2 elements: create the vector of results and push in the
// existing declaration.
DeclIndexPairVector *Vec = new DeclIndexPairVector;
Vec->push_back(DeclIndexPair(PrevND, SingleDeclIndex));
DeclOrVector = Vec;
}
// Add the new element to the end of the vector.
DeclOrVector.get<DeclIndexPairVector*>()->push_back(
DeclIndexPair(ND, Index));
}
void Destroy() {
if (DeclIndexPairVector *Vec
= DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
delete Vec;
DeclOrVector = ((NamedDecl *)nullptr);
}
}
// Iteration.
class iterator;
iterator begin() const;
iterator end() const;
};
/// \brief A mapping from declaration names to the declarations that have
/// this name within a particular scope and their index within the list of
/// results.
typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
/// \brief The semantic analysis object for which results are being
/// produced.
Sema &SemaRef;
/// \brief The allocator used to allocate new code-completion strings.
CodeCompletionAllocator &Allocator;
CodeCompletionTUInfo &CCTUInfo;
/// \brief If non-NULL, a filter function used to remove any code-completion
/// results that are not desirable.
LookupFilter Filter;
/// \brief Whether we should allow declarations as
/// nested-name-specifiers that would otherwise be filtered out.
bool AllowNestedNameSpecifiers;
/// \brief If set, the type that we would prefer our resulting value
/// declarations to have.
///
/// Closely matching the preferred type gives a boost to a result's
/// priority.
CanQualType PreferredType;
/// \brief A list of shadow maps, which is used to model name hiding at
/// different levels of, e.g., the inheritance hierarchy.
std::list<ShadowMap> ShadowMaps;
/// \brief If we're potentially referring to a C++ member function, the set
/// of qualifiers applied to the object type.
Qualifiers ObjectTypeQualifiers;
/// \brief Whether the \p ObjectTypeQualifiers field is active.
bool HasObjectTypeQualifiers;
/// \brief The selector that we prefer.
Selector PreferredSelector;
/// \brief The completion context in which we are gathering results.
CodeCompletionContext CompletionContext;
/// \brief If we are in an instance method definition, the \@implementation
/// object.
ObjCImplementationDecl *ObjCImplementation;
void AdjustResultPriorityForDecl(Result &R);
void MaybeAddConstructorResults(Result R);
public:
explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
const CodeCompletionContext &CompletionContext,
LookupFilter Filter = nullptr)
: SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
Filter(Filter),
AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
CompletionContext(CompletionContext),
ObjCImplementation(nullptr)
{
// If this is an Objective-C instance method definition, dig out the
// corresponding implementation.
switch (CompletionContext.getKind()) {
case CodeCompletionContext::CCC_Expression:
case CodeCompletionContext::CCC_ObjCMessageReceiver:
case CodeCompletionContext::CCC_ParenthesizedExpression:
case CodeCompletionContext::CCC_Statement:
case CodeCompletionContext::CCC_Recovery:
if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
if (Method->isInstanceMethod())
if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
ObjCImplementation = Interface->getImplementation();
break;
default:
break;
}
}
/// \brief Determine the priority for a reference to the given declaration.
unsigned getBasePriority(const NamedDecl *D);
/// \brief Whether we should include code patterns in the completion
/// results.
bool includeCodePatterns() const {
return SemaRef.CodeCompleter &&
SemaRef.CodeCompleter->includeCodePatterns();
}
/// \brief Set the filter used for code-completion results.
void setFilter(LookupFilter Filter) {
this->Filter = Filter;
}
Result *data() { return Results.empty()? nullptr : &Results.front(); }
unsigned size() const { return Results.size(); }
bool empty() const { return Results.empty(); }
/// \brief Specify the preferred type.
void setPreferredType(QualType T) {
PreferredType = SemaRef.Context.getCanonicalType(T);
}
/// \brief Set the cv-qualifiers on the object type, for us in filtering
/// calls to member functions.
///
/// When there are qualifiers in this set, they will be used to filter
/// out member functions that aren't available (because there will be a
/// cv-qualifier mismatch) or prefer functions with an exact qualifier
/// match.
void setObjectTypeQualifiers(Qualifiers Quals) {
ObjectTypeQualifiers = Quals;
HasObjectTypeQualifiers = true;
}
/// \brief Set the preferred selector.
///
/// When an Objective-C method declaration result is added, and that
/// method's selector matches this preferred selector, we give that method
/// a slight priority boost.
void setPreferredSelector(Selector Sel) {
PreferredSelector = Sel;
}
/// \brief Retrieve the code-completion context for which results are
/// being collected.
const CodeCompletionContext &getCompletionContext() const {
return CompletionContext;
}
/// \brief Specify whether nested-name-specifiers are allowed.
void allowNestedNameSpecifiers(bool Allow = true) {
AllowNestedNameSpecifiers = Allow;
}
/// \brief Return the semantic analysis object for which we are collecting
/// code completion results.
Sema &getSema() const { return SemaRef; }
/// \brief Retrieve the allocator used to allocate code completion strings.
CodeCompletionAllocator &getAllocator() const { return Allocator; }
CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
/// \brief Determine whether the given declaration is at all interesting
/// as a code-completion result.
///
/// \param ND the declaration that we are inspecting.
///
/// \param AsNestedNameSpecifier will be set true if this declaration is
/// only interesting when it is a nested-name-specifier.
bool isInterestingDecl(const NamedDecl *ND,
bool &AsNestedNameSpecifier) const;
/// \brief Check whether the result is hidden by the Hiding declaration.
///
/// \returns true if the result is hidden and cannot be found, false if
/// the hidden result could still be found. When false, \p R may be
/// modified to describe how the result can be found (e.g., via extra
/// qualification).
bool CheckHiddenResult(Result &R, DeclContext *CurContext,
const NamedDecl *Hiding);
/// \brief Add a new result to this result set (if it isn't already in one
/// of the shadow maps), or replace an existing result (for, e.g., a
/// redeclaration).
///
/// \param R the result to add (if it is unique).
///
/// \param CurContext the context in which this result will be named.
void MaybeAddResult(Result R, DeclContext *CurContext = nullptr);
/// \brief Add a new result to this result set, where we already know
/// the hiding declaration (if any).
///
/// \param R the result to add (if it is unique).
///
/// \param CurContext the context in which this result will be named.
///
/// \param Hiding the declaration that hides the result.
///
/// \param InBaseClass whether the result was found in a base
/// class of the searched context.
void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
bool InBaseClass);
/// \brief Add a new non-declaration result to this result set.
void AddResult(Result R);
/// \brief Enter into a new scope.
void EnterNewScope();
/// \brief Exit from the current scope.
void ExitScope();
/// \brief Ignore this declaration, if it is seen again.
void Ignore(const Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
/// \name Name lookup predicates
///
/// These predicates can be passed to the name lookup functions to filter the
/// results of name lookup. All of the predicates have the same type, so that
///
//@{
bool IsOrdinaryName(const NamedDecl *ND) const;
bool IsOrdinaryNonTypeName(const NamedDecl *ND) const;
bool IsIntegralConstantValue(const NamedDecl *ND) const;
bool IsOrdinaryNonValueName(const NamedDecl *ND) const;
bool IsNestedNameSpecifier(const NamedDecl *ND) const;
bool IsEnum(const NamedDecl *ND) const;
bool IsClassOrStruct(const NamedDecl *ND) const;
bool IsUnion(const NamedDecl *ND) const;
bool IsNamespace(const NamedDecl *ND) const;
bool IsNamespaceOrAlias(const NamedDecl *ND) const;
bool IsType(const NamedDecl *ND) const;
bool IsMember(const NamedDecl *ND) const;
bool IsObjCIvar(const NamedDecl *ND) const;
bool IsObjCMessageReceiver(const NamedDecl *ND) const;
bool IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const;
bool IsObjCCollection(const NamedDecl *ND) const;
bool IsImpossibleToSatisfy(const NamedDecl *ND) const;
//@}
};
}
class ResultBuilder::ShadowMapEntry::iterator {
llvm::PointerUnion<const NamedDecl *, const DeclIndexPair *> DeclOrIterator;
unsigned SingleDeclIndex;
public:
typedef DeclIndexPair value_type;
typedef value_type reference;
typedef std::ptrdiff_t difference_type;
typedef std::input_iterator_tag iterator_category;
class pointer {
DeclIndexPair Value;
public:
pointer(const DeclIndexPair &Value) : Value(Value) { }
const DeclIndexPair *operator->() const {
return &Value;
}
};
iterator() : DeclOrIterator((NamedDecl *)nullptr), SingleDeclIndex(0) {}
iterator(const NamedDecl *SingleDecl, unsigned Index)
: DeclOrIterator(SingleDecl), SingleDeclIndex(Index) { }
iterator(const DeclIndexPair *Iterator)
: DeclOrIterator(Iterator), SingleDeclIndex(0) { }
iterator &operator++() {
if (DeclOrIterator.is<const NamedDecl *>()) {
DeclOrIterator = (NamedDecl *)nullptr;
SingleDeclIndex = 0;
return *this;
}
const DeclIndexPair *I = DeclOrIterator.get<const DeclIndexPair*>();
++I;
DeclOrIterator = I;
return *this;
}
/*iterator operator++(int) {
iterator tmp(*this);
++(*this);
return tmp;
}*/
reference operator*() const {
if (const NamedDecl *ND = DeclOrIterator.dyn_cast<const NamedDecl *>())
return reference(ND, SingleDeclIndex);
return *DeclOrIterator.get<const DeclIndexPair*>();
}
pointer operator->() const {
return pointer(**this);
}
friend bool operator==(const iterator &X, const iterator &Y) {
return X.DeclOrIterator.getOpaqueValue()
== Y.DeclOrIterator.getOpaqueValue() &&
X.SingleDeclIndex == Y.SingleDeclIndex;
}
friend bool operator!=(const iterator &X, const iterator &Y) {
return !(X == Y);
}
};
ResultBuilder::ShadowMapEntry::iterator
ResultBuilder::ShadowMapEntry::begin() const {
if (DeclOrVector.isNull())
return iterator();
if (const NamedDecl *ND = DeclOrVector.dyn_cast<const NamedDecl *>())
return iterator(ND, SingleDeclIndex);
return iterator(DeclOrVector.get<DeclIndexPairVector *>()->begin());
}
ResultBuilder::ShadowMapEntry::iterator
ResultBuilder::ShadowMapEntry::end() const {
if (DeclOrVector.is<const NamedDecl *>() || DeclOrVector.isNull())
return iterator();
return iterator(DeclOrVector.get<DeclIndexPairVector *>()->end());
}
/// \brief Compute the qualification required to get from the current context
/// (\p CurContext) to the target context (\p TargetContext).
///
/// \param Context the AST context in which the qualification will be used.
///
/// \param CurContext the context where an entity is being named, which is
/// typically based on the current scope.
///
/// \param TargetContext the context in which the named entity actually
/// resides.
///
/// \returns a nested name specifier that refers into the target context, or
/// NULL if no qualification is needed.
static NestedNameSpecifier *
getRequiredQualification(ASTContext &Context,
const DeclContext *CurContext,
const DeclContext *TargetContext) {
SmallVector<const DeclContext *, 4> TargetParents;
for (const DeclContext *CommonAncestor = TargetContext;
CommonAncestor && !CommonAncestor->Encloses(CurContext);
CommonAncestor = CommonAncestor->getLookupParent()) {
if (CommonAncestor->isTransparentContext() ||
CommonAncestor->isFunctionOrMethod())
continue;
TargetParents.push_back(CommonAncestor);
}
NestedNameSpecifier *Result = nullptr;
while (!TargetParents.empty()) {
const DeclContext *Parent = TargetParents.pop_back_val();
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Parent)) {
if (!Namespace->getIdentifier())
continue;
Result = NestedNameSpecifier::Create(Context, Result, Namespace);
}
else if (const TagDecl *TD = dyn_cast<TagDecl>(Parent))
Result = NestedNameSpecifier::Create(Context, Result,
false,
Context.getTypeDeclType(TD).getTypePtr());
}
return Result;
}
/// Determine whether \p Id is a name reserved for the implementation (C99
/// 7.1.3, C++ [lib.global.names]).
static bool isReservedName(const IdentifierInfo *Id) {
if (Id->getLength() < 2)
return false;
const char *Name = Id->getNameStart();
return Name[0] == '_' &&
(Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z'));
}
bool ResultBuilder::isInterestingDecl(const NamedDecl *ND,
bool &AsNestedNameSpecifier) const {
AsNestedNameSpecifier = false;
ND = ND->getUnderlyingDecl();
// Skip unnamed entities.
if (!ND->getDeclName())
return false;
// Friend declarations and declarations introduced due to friends are never
// added as results.
if (ND->getFriendObjectKind() == Decl::FOK_Undeclared)
return false;
// Class template (partial) specializations are never added as results.
if (isa<ClassTemplateSpecializationDecl>(ND) ||
isa<ClassTemplatePartialSpecializationDecl>(ND))
return false;
// Using declarations themselves are never added as results.
if (isa<UsingDecl>(ND))
return false;
// Some declarations have reserved names that we don't want to ever show.
// Filter out names reserved for the implementation if they come from a
// system header.
// TODO: Add a predicate for this.
if (const IdentifierInfo *Id = ND->getIdentifier())
if (isReservedName(Id) &&
(ND->getLocation().isInvalid() ||
SemaRef.SourceMgr.isInSystemHeader(
SemaRef.SourceMgr.getSpellingLoc(ND->getLocation()))))
return false;
if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
((isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) &&
Filter != &ResultBuilder::IsNamespace &&
Filter != &ResultBuilder::IsNamespaceOrAlias &&
Filter != nullptr))
AsNestedNameSpecifier = true;
// Filter out any unwanted results.
if (Filter && !(this->*Filter)(ND)) {
// Check whether it is interesting as a nested-name-specifier.
if (AllowNestedNameSpecifiers && SemaRef.getLangOpts().CPlusPlus &&
IsNestedNameSpecifier(ND) &&
(Filter != &ResultBuilder::IsMember ||
(isa<CXXRecordDecl>(ND) &&
cast<CXXRecordDecl>(ND)->isInjectedClassName()))) {
AsNestedNameSpecifier = true;
return true;
}
return false;
}
// ... then it must be interesting!
return true;
}
bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
const NamedDecl *Hiding) {
// In C, there is no way to refer to a hidden name.
// FIXME: This isn't true; we can find a tag name hidden by an ordinary
// name if we introduce the tag type.
if (!SemaRef.getLangOpts().CPlusPlus)
return true;
const DeclContext *HiddenCtx =
R.Declaration->getDeclContext()->getRedeclContext();
// There is no way to qualify a name declared in a function or method.
if (HiddenCtx->isFunctionOrMethod())
return true;
if (HiddenCtx == Hiding->getDeclContext()->getRedeclContext())
return true;
// We can refer to the result with the appropriate qualification. Do it.
R.Hidden = true;
R.QualifierIsInformative = false;
if (!R.Qualifier)
R.Qualifier = getRequiredQualification(SemaRef.Context,
CurContext,
R.Declaration->getDeclContext());
return false;
}
/// \brief A simplified classification of types used to determine whether two
/// types are "similar enough" when adjusting priorities.
SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
switch (T->getTypeClass()) {
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
case BuiltinType::Void:
return STC_Void;
case BuiltinType::NullPtr:
return STC_Pointer;
case BuiltinType::Overload:
case BuiltinType::Dependent:
return STC_Other;
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
return STC_ObjectiveC;
default:
return STC_Arithmetic;
}
case Type::Complex:
return STC_Arithmetic;
case Type::Pointer:
return STC_Pointer;
case Type::BlockPointer:
return STC_Block;
case Type::LValueReference:
case Type::RValueReference:
return getSimplifiedTypeClass(T->getAs<ReferenceType>()->getPointeeType());
case Type::ConstantArray:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
return STC_Array;
case Type::DependentSizedExtVector:
case Type::Vector:
case Type::ExtVector:
return STC_Arithmetic;
case Type::FunctionProto:
case Type::FunctionNoProto:
return STC_Function;
case Type::Record:
return STC_Record;
case Type::Enum:
return STC_Arithmetic;
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
return STC_ObjectiveC;
default:
return STC_Other;
}
}
/// \brief Get the type that a given expression will have if this declaration
/// is used as an expression in its "typical" code-completion form.
QualType clang::getDeclUsageType(ASTContext &C, const NamedDecl *ND) {
ND = cast<NamedDecl>(ND->getUnderlyingDecl());
if (const TypeDecl *Type = dyn_cast<TypeDecl>(ND))
return C.getTypeDeclType(Type);
if (const ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
return C.getObjCInterfaceType(Iface);
QualType T;
if (const FunctionDecl *Function = ND->getAsFunction())
T = Function->getCallResultType();
else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
T = Method->getSendResultType();
else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
T = Property->getType();
else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND))
T = Value->getType();
else
return QualType();
// Dig through references, function pointers, and block pointers to
// get down to the likely type of an expression when the entity is
// used.
do {
if (const ReferenceType *Ref = T->getAs<ReferenceType>()) {
T = Ref->getPointeeType();
continue;
}
if (const PointerType *Pointer = T->getAs<PointerType>()) {
if (Pointer->getPointeeType()->isFunctionType()) {
T = Pointer->getPointeeType();
continue;
}
break;
}
if (const BlockPointerType *Block = T->getAs<BlockPointerType>()) {
T = Block->getPointeeType();
continue;
}
if (const FunctionType *Function = T->getAs<FunctionType>()) {
T = Function->getReturnType();
continue;
}
break;
} while (true);
return T;
}
unsigned ResultBuilder::getBasePriority(const NamedDecl *ND) {
if (!ND)
return CCP_Unlikely;
// Context-based decisions.
const DeclContext *LexicalDC = ND->getLexicalDeclContext();
if (LexicalDC->isFunctionOrMethod()) {
// _cmd is relatively rare
if (const ImplicitParamDecl *ImplicitParam =
dyn_cast<ImplicitParamDecl>(ND))
if (ImplicitParam->getIdentifier() &&
ImplicitParam->getIdentifier()->isStr("_cmd"))
return CCP_ObjC_cmd;
return CCP_LocalDeclaration;
}
const DeclContext *DC = ND->getDeclContext()->getRedeclContext();
if (DC->isRecord() || isa<ObjCContainerDecl>(DC))
return CCP_MemberDeclaration;
// Content-based decisions.
if (isa<EnumConstantDecl>(ND))
return CCP_Constant;
// Use CCP_Type for type declarations unless we're in a statement, Objective-C
// message receiver, or parenthesized expression context. There, it's as
// likely that the user will want to write a type as other declarations.
if ((isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
!(CompletionContext.getKind() == CodeCompletionContext::CCC_Statement ||
CompletionContext.getKind()
== CodeCompletionContext::CCC_ObjCMessageReceiver ||
CompletionContext.getKind()
== CodeCompletionContext::CCC_ParenthesizedExpression))
return CCP_Type;
return CCP_Declaration;
}
void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
// If this is an Objective-C method declaration whose selector matches our
// preferred selector, give it a priority boost.
if (!PreferredSelector.isNull())
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
if (PreferredSelector == Method->getSelector())
R.Priority += CCD_SelectorMatch;
// If we have a preferred type, adjust the priority for results with exactly-
// matching or nearly-matching types.
if (!PreferredType.isNull()) {
QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
if (!T.isNull()) {
CanQualType TC = SemaRef.Context.getCanonicalType(T);
// Check for exactly-matching types (modulo qualifiers).
if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC))
R.Priority /= CCF_ExactTypeMatch;
// Check for nearly-matching types, based on classification of each.
else if ((getSimplifiedTypeClass(PreferredType)
== getSimplifiedTypeClass(TC)) &&
!(PreferredType->isEnumeralType() && TC->isEnumeralType()))
R.Priority /= CCF_SimilarTypeMatch;
}
}
}
void ResultBuilder::MaybeAddConstructorResults(Result R) {
if (!SemaRef.getLangOpts().CPlusPlus || !R.Declaration ||
!CompletionContext.wantConstructorResults())
return;
ASTContext &Context = SemaRef.Context;
const NamedDecl *D = R.Declaration;
const CXXRecordDecl *Record = nullptr;
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(D))
Record = ClassTemplate->getTemplatedDecl();
else if ((Record = dyn_cast<CXXRecordDecl>(D))) {
// Skip specializations and partial specializations.
if (isa<ClassTemplateSpecializationDecl>(Record))
return;
} else {
// There are no constructors here.
return;
}
Record = Record->getDefinition();
if (!Record)
return;
QualType RecordTy = Context.getTypeDeclType(Record);
DeclarationName ConstructorName
= Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(RecordTy));
DeclContext::lookup_result Ctors = Record->lookup(ConstructorName);
for (DeclContext::lookup_iterator I = Ctors.begin(),
E = Ctors.end();
I != E; ++I) {
R.Declaration = *I;
R.CursorKind = getCursorKindForDecl(R.Declaration);
Results.push_back(R);
}
}
void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
assert(!ShadowMaps.empty() && "Must enter into a results scope");
if (R.Kind != Result::RK_Declaration) {
// For non-declaration results, just add the result.
Results.push_back(R);
return;
}
// Look through using declarations.
if (const UsingShadowDecl *Using =
dyn_cast<UsingShadowDecl>(R.Declaration)) {
MaybeAddResult(Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
R.Qualifier),
CurContext);
return;
}
const Decl *CanonDecl = R.Declaration->getCanonicalDecl();
unsigned IDNS = CanonDecl->getIdentifierNamespace();
bool AsNestedNameSpecifier = false;
if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
return;
// C++ constructors are never found by name lookup.
if (isa<CXXConstructorDecl>(R.Declaration))
return;
ShadowMap &SMap = ShadowMaps.back();
ShadowMapEntry::iterator I, IEnd;
ShadowMap::iterator NamePos = SMap.find(R.Declaration->getDeclName());
if (NamePos != SMap.end()) {
I = NamePos->second.begin();
IEnd = NamePos->second.end();
}
for (; I != IEnd; ++I) {
const NamedDecl *ND = I->first;
unsigned Index = I->second;
if (ND->getCanonicalDecl() == CanonDecl) {
// This is a redeclaration. Always pick the newer declaration.
Results[Index].Declaration = R.Declaration;
// We're done.
return;
}
}
// This is a new declaration in this scope. However, check whether this
// declaration name is hidden by a similarly-named declaration in an outer
// scope.
std::list<ShadowMap>::iterator SM, SMEnd = ShadowMaps.end();
--SMEnd;
for (SM = ShadowMaps.begin(); SM != SMEnd; ++SM) {
ShadowMapEntry::iterator I, IEnd;
ShadowMap::iterator NamePos = SM->find(R.Declaration->getDeclName());
if (NamePos != SM->end()) {
I = NamePos->second.begin();
IEnd = NamePos->second.end();
}
for (; I != IEnd; ++I) {
// A tag declaration does not hide a non-tag declaration.
if (I->first->hasTagIdentifierNamespace() &&
(IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
Decl::IDNS_LocalExtern | Decl::IDNS_ObjCProtocol)))
continue;
// Protocols are in distinct namespaces from everything else.
if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
|| (IDNS & Decl::IDNS_ObjCProtocol)) &&
I->first->getIdentifierNamespace() != IDNS)
continue;
// The newly-added result is hidden by an entry in the shadow map.
if (CheckHiddenResult(R, CurContext, I->first))
return;
break;
}
}
// Make sure that any given declaration only shows up in the result set once.
if (!AllDeclsFound.insert(CanonDecl).second)
return;
// If the filter is for nested-name-specifiers, then this result starts a
// nested-name-specifier.
if (AsNestedNameSpecifier) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
} else
AdjustResultPriorityForDecl(R);
// If this result is supposed to have an informative qualifier, add one.
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
Namespace);
else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
false, SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
else
R.QualifierIsInformative = false;
}
// Insert this result into the set of results and into the current shadow
// map.
SMap[R.Declaration->getDeclName()].Add(R.Declaration, Results.size());
Results.push_back(R);
if (!AsNestedNameSpecifier)
MaybeAddConstructorResults(R);
}
void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
NamedDecl *Hiding, bool InBaseClass = false) {
if (R.Kind != Result::RK_Declaration) {
// For non-declaration results, just add the result.
Results.push_back(R);
return;
}
// Look through using declarations.
if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
AddResult(Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
R.Qualifier),
CurContext, Hiding);
return;
}
bool AsNestedNameSpecifier = false;
if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
return;
// C++ constructors are never found by name lookup.
if (isa<CXXConstructorDecl>(R.Declaration))
return;
if (Hiding && CheckHiddenResult(R, CurContext, Hiding))
return;
// Make sure that any given declaration only shows up in the result set once.
if (!AllDeclsFound.insert(R.Declaration->getCanonicalDecl()).second)
return;
// If the filter is for nested-name-specifiers, then this result starts a
// nested-name-specifier.
if (AsNestedNameSpecifier) {
R.StartsNestedNameSpecifier = true;
R.Priority = CCP_NestedNameSpecifier;
}
else if (Filter == &ResultBuilder::IsMember && !R.Qualifier && InBaseClass &&
isa<CXXRecordDecl>(R.Declaration->getDeclContext()
->getRedeclContext()))
R.QualifierIsInformative = true;
// If this result is supposed to have an informative qualifier, add one.
if (R.QualifierIsInformative && !R.Qualifier &&
!R.StartsNestedNameSpecifier) {
const DeclContext *Ctx = R.Declaration->getDeclContext();
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr,
Namespace);
else if (const TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, nullptr, false,
SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
else
R.QualifierIsInformative = false;
}
// Adjust the priority if this result comes from a base class.
if (InBaseClass)
R.Priority += CCD_InBaseClass;
AdjustResultPriorityForDecl(R);
if (HasObjectTypeQualifiers)
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
if (Method->isInstance()) {
Qualifiers MethodQuals
= Qualifiers::fromCVRMask(Method->getTypeQualifiers());
if (ObjectTypeQualifiers == MethodQuals)
R.Priority += CCD_ObjectQualifierMatch;
else if (ObjectTypeQualifiers - MethodQuals) {
// The method cannot be invoked, because doing so would drop
// qualifiers.
return;
}
}
// Insert this result into the set of results.
Results.push_back(R);
if (!AsNestedNameSpecifier)
MaybeAddConstructorResults(R);
}
void ResultBuilder::AddResult(Result R) {
assert(R.Kind != Result::RK_Declaration &&
"Declaration results need more context");
Results.push_back(R);
}
/// \brief Enter into a new scope.
void ResultBuilder::EnterNewScope() { ShadowMaps.emplace_back(); }
/// \brief Exit from the current scope.
void ResultBuilder::ExitScope() {
for (ShadowMap::iterator E = ShadowMaps.back().begin(),
EEnd = ShadowMaps.back().end();
E != EEnd;
++E)
E->second.Destroy();
ShadowMaps.pop_back();
}
/// \brief Determines whether this given declaration will be found by
/// ordinary name lookup.
bool ResultBuilder::IsOrdinaryName(const NamedDecl *ND) const {
ND = cast<NamedDecl>(ND->getUnderlyingDecl());
// If name lookup finds a local extern declaration, then we are in a
// context where it behaves like an ordinary name.
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
else if (SemaRef.getLangOpts().ObjC1) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
return ND->getIdentifierNamespace() & IDNS;
}
/// \brief Determines whether this given declaration will be found by
/// ordinary name lookup but is not a type name.
bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
ND = cast<NamedDecl>(ND->getUnderlyingDecl());
if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND))
return false;
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
else if (SemaRef.getLangOpts().ObjC1) {
if (isa<ObjCIvarDecl>(ND))
return true;
}
return ND->getIdentifierNamespace() & IDNS;
}
bool ResultBuilder::IsIntegralConstantValue(const NamedDecl *ND) const {
if (!IsOrdinaryNonTypeName(ND))
return 0;
if (const ValueDecl *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
if (VD->getType()->isIntegralOrEnumerationType())
return true;
return false;
}
/// \brief Determines whether this given declaration will be found by
/// ordinary name lookup.
bool ResultBuilder::IsOrdinaryNonValueName(const NamedDecl *ND) const {
ND = cast<NamedDecl>(ND->getUnderlyingDecl());
unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_LocalExtern;
if (SemaRef.getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
return (ND->getIdentifierNamespace() & IDNS) &&
!isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
!isa<ObjCPropertyDecl>(ND);
}
/// \brief Determines whether the given declaration is suitable as the
/// start of a C++ nested-name-specifier, e.g., a class or namespace.
bool ResultBuilder::IsNestedNameSpecifier(const NamedDecl *ND) const {
// Allow us to find class templates, too.
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
return SemaRef.isAcceptableNestedNameSpecifier(ND);
}
/// \brief Determines whether the given declaration is an enumeration.
bool ResultBuilder::IsEnum(const NamedDecl *ND) const {
return isa<EnumDecl>(ND);
}
/// \brief Determines whether the given declaration is a class or struct.
bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
// Allow us to find class templates, too.
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
// For purposes of this check, interfaces match too.
if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
return RD->getTagKind() == TTK_Class ||
RD->getTagKind() == TTK_Struct ||
RD->getTagKind() == TTK_Interface;
return false;
}
/// \brief Determines whether the given declaration is a union.
bool ResultBuilder::IsUnion(const NamedDecl *ND) const {
// Allow us to find class templates, too.
if (const ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
ND = ClassTemplate->getTemplatedDecl();
if (const RecordDecl *RD = dyn_cast<RecordDecl>(ND))
return RD->getTagKind() == TTK_Union;
return false;
}
/// \brief Determines whether the given declaration is a namespace.
bool ResultBuilder::IsNamespace(const NamedDecl *ND) const {
return isa<NamespaceDecl>(ND);
}
/// \brief Determines whether the given declaration is a namespace or
/// namespace alias.
bool ResultBuilder::IsNamespaceOrAlias(const NamedDecl *ND) const {
return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
}
/// \brief Determines whether the given declaration is a type.
bool ResultBuilder::IsType(const NamedDecl *ND) const {
if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(ND))
ND = Using->getTargetDecl();
return isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
}
/// \brief Determines which members of a class should be visible via
/// "." or "->". Only value declarations, nested name specifiers, and
/// using declarations thereof should show up.
bool ResultBuilder::IsMember(const NamedDecl *ND) const {
if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(ND))
ND = Using->getTargetDecl();
return isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
isa<ObjCPropertyDecl>(ND);
}
static bool isObjCReceiverType(ASTContext &C, QualType T) {
T = C.getCanonicalType(T);
switch (T->getTypeClass()) {
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
return true;
case Type::Builtin:
switch (cast<BuiltinType>(T)->getKind()) {
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
return true;
default:
break;
}
return false;
default:
break;
}
if (!C.getLangOpts().CPlusPlus)
return false;
// FIXME: We could perform more analysis here to determine whether a
// particular class type has any conversions to Objective-C types. For now,
// just accept all class types.
return T->isDependentType() || T->isRecordType();
}
bool ResultBuilder::IsObjCMessageReceiver(const NamedDecl *ND) const {
QualType T = getDeclUsageType(SemaRef.Context, ND);
if (T.isNull())
return false;
T = SemaRef.Context.getBaseElementType(T);
return isObjCReceiverType(SemaRef.Context, T);
}
bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(const NamedDecl *ND) const {
if (IsObjCMessageReceiver(ND))
return true;
const VarDecl *Var = dyn_cast<VarDecl>(ND);
if (!Var)
return false;
return Var->hasLocalStorage() && !Var->hasAttr<BlocksAttr>();
}
bool ResultBuilder::IsObjCCollection(const NamedDecl *ND) const {
if ((SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryName(ND)) ||
(!SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
return false;
QualType T = getDeclUsageType(SemaRef.Context, ND);
if (T.isNull())
return false;
T = SemaRef.Context.getBaseElementType(T);
return T->isObjCObjectType() || T->isObjCObjectPointerType() ||
T->isObjCIdType() ||
(SemaRef.getLangOpts().CPlusPlus && T->isRecordType());
}
bool ResultBuilder::IsImpossibleToSatisfy(const NamedDecl *ND) const {
return false;
}
/// \brief Determines whether the given declaration is an Objective-C
/// instance variable.
bool ResultBuilder::IsObjCIvar(const NamedDecl *ND) const {
return isa<ObjCIvarDecl>(ND);
}
namespace {
/// \brief Visible declaration consumer that adds a code-completion result
/// for each visible declaration.
class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
ResultBuilder &Results;
DeclContext *CurContext;
public:
CodeCompletionDeclConsumer(ResultBuilder &Results, DeclContext *CurContext)
: Results(Results), CurContext(CurContext) { }
void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
bool InBaseClass) override {
bool Accessible = true;
if (Ctx)
Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
false, Accessible);
Results.AddResult(Result, CurContext, Hiding, InBaseClass);
}
};
}
/// \brief Add type specifiers for the current language as keyword results.
static void AddTypeSpecifierResults(const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
Results.AddResult(Result("short", CCP_Type));
Results.AddResult(Result("long", CCP_Type));
Results.AddResult(Result("signed", CCP_Type));
Results.AddResult(Result("unsigned", CCP_Type));
Results.AddResult(Result("void", CCP_Type));
Results.AddResult(Result("char", CCP_Type));
Results.AddResult(Result("int", CCP_Type));
Results.AddResult(Result("float", CCP_Type));
Results.AddResult(Result("double", CCP_Type));
Results.AddResult(Result("enum", CCP_Type));
Results.AddResult(Result("struct", CCP_Type));
Results.AddResult(Result("union", CCP_Type));
Results.AddResult(Result("const", CCP_Type));
Results.AddResult(Result("volatile", CCP_Type));
if (LangOpts.C99) {
// C99-specific
Results.AddResult(Result("_Complex", CCP_Type));
Results.AddResult(Result("_Imaginary", CCP_Type));
Results.AddResult(Result("_Bool", CCP_Type));
Results.AddResult(Result("restrict", CCP_Type));
}
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (LangOpts.CPlusPlus) {
// C++-specific
Results.AddResult(Result("bool", CCP_Type +
(LangOpts.ObjC1? CCD_bool_in_ObjC : 0)));
Results.AddResult(Result("class", CCP_Type));
Results.AddResult(Result("wchar_t", CCP_Type));
// typename qualified-id
Builder.AddTypedTextChunk("typename");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("qualifier");
Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
if (LangOpts.CPlusPlus11) {
Results.AddResult(Result("auto", CCP_Type));
Results.AddResult(Result("char16_t", CCP_Type));
Results.AddResult(Result("char32_t", CCP_Type));
Builder.AddTypedTextChunk("decltype");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
}
}
// GNU extensions
if (LangOpts.GNUMode) {
// FIXME: Enable when we actually support decimal floating point.
// Results.AddResult(Result("_Decimal32"));
// Results.AddResult(Result("_Decimal64"));
// Results.AddResult(Result("_Decimal128"));
Builder.AddTypedTextChunk("typeof");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
Builder.AddTypedTextChunk("typeof");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
}
// Nullability
Results.AddResult(Result("_Nonnull", CCP_Type));
Results.AddResult(Result("_Null_unspecified", CCP_Type));
Results.AddResult(Result("_Nullable", CCP_Type));
}
static void AddStorageSpecifiers(Sema::ParserCompletionContext CCC,
const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
// Note: we don't suggest either "auto" or "register", because both
// are pointless as storage specifiers. Elsewhere, we suggest "auto"
// in C++0x as a type specifier.
Results.AddResult(Result("extern"));
Results.AddResult(Result("static"));
}
static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
switch (CCC) {
case Sema::PCC_Class:
case Sema::PCC_MemberTemplate:
if (LangOpts.CPlusPlus) {
Results.AddResult(Result("explicit"));
Results.AddResult(Result("friend"));
Results.AddResult(Result("mutable"));
Results.AddResult(Result("virtual"));
}
LLVM_FALLTHROUGH; // HLSL Change
case Sema::PCC_ObjCInterface:
case Sema::PCC_ObjCImplementation:
case Sema::PCC_Namespace:
case Sema::PCC_Template:
if (LangOpts.CPlusPlus || LangOpts.C99)
Results.AddResult(Result("inline"));
break;
case Sema::PCC_ObjCInstanceVariableList:
case Sema::PCC_Expression:
case Sema::PCC_Statement:
case Sema::PCC_ForInit:
case Sema::PCC_Condition:
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Type:
case Sema::PCC_ParenthesizedExpression:
case Sema::PCC_LocalDeclarationSpecifiers:
break;
}
}
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt);
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt);
static void AddObjCImplementationResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt);
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt);
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
static void AddTypedefResult(ResultBuilder &Results) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("typedef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
const LangOptions &LangOpts) {
switch (CCC) {
case Sema::PCC_Namespace:
case Sema::PCC_Class:
case Sema::PCC_ObjCInstanceVariableList:
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
case Sema::PCC_Statement:
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Type:
case Sema::PCC_ParenthesizedExpression:
case Sema::PCC_LocalDeclarationSpecifiers:
return true;
case Sema::PCC_Expression:
case Sema::PCC_Condition:
return LangOpts.CPlusPlus;
case Sema::PCC_ObjCInterface:
case Sema::PCC_ObjCImplementation:
return false;
case Sema::PCC_ForInit:
return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
}
llvm_unreachable("Invalid ParserCompletionContext!");
}
static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
const Preprocessor &PP) {
PrintingPolicy Policy = Sema::getPrintingPolicy(Context, PP);
Policy.AnonymousTagLocations = false;
Policy.SuppressStrongLifetime = true;
Policy.SuppressUnwrittenScope = true;
return Policy;
}
/// \brief Retrieve a printing policy suitable for code completion.
static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
return getCompletionPrintingPolicy(S.Context, S.PP);
}
/// \brief Retrieve the string representation of the given type as a string
/// that has the appropriate lifetime for code completion.
///
/// This routine provides a fast path where we provide constant strings for
/// common type names.
static const char *GetCompletionTypeString(QualType T,
ASTContext &Context,
const PrintingPolicy &Policy,
CodeCompletionAllocator &Allocator) {
if (!T.getLocalQualifiers()) {
// Built-in type names are constant strings.
if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
return BT->getNameAsCString(Policy);
// Anonymous tag types are constant strings.
if (const TagType *TagT = dyn_cast<TagType>(T))
if (TagDecl *Tag = TagT->getDecl())
if (!Tag->hasNameForLinkage()) {
switch (Tag->getTagKind()) {
case TTK_Struct: return "struct <anonymous>";
case TTK_Interface: return "__interface <anonymous>";
case TTK_Class: return "class <anonymous>";
case TTK_Union: return "union <anonymous>";
case TTK_Enum: return "enum <anonymous>";
}
}
}
// Slow path: format the type as a string.
std::string Result;
T.getAsStringInternal(Result, Policy);
return Allocator.CopyString(Result);
}
/// \brief Add a completion for "this", if we're in a member function.
static void addThisCompletion(Sema &S, ResultBuilder &Results) {
QualType ThisTy = S.getCurrentThisType();
if (ThisTy.isNull())
return;
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
S.Context,
Policy,
Allocator));
Builder.AddTypedTextChunk("this");
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
/// \brief Add language constructs that show up for "ordinary" names.
static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
Scope *S,
Sema &SemaRef,
ResultBuilder &Results) {
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(SemaRef);
typedef CodeCompletionResult Result;
switch (CCC) {
case Sema::PCC_Namespace:
if (SemaRef.getLangOpts().CPlusPlus) {
if (Results.includeCodePatterns()) {
// namespace <identifier> { declarations }
Builder.AddTypedTextChunk("namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("identifier");
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("declarations");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
// namespace identifier = identifier ;
Builder.AddTypedTextChunk("namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
Builder.AddChunk(CodeCompletionString::CK_Equal);
Builder.AddPlaceholderChunk("namespace");
Results.AddResult(Result(Builder.TakeString()));
// Using directives
Builder.AddTypedTextChunk("using");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("identifier");
Results.AddResult(Result(Builder.TakeString()));
// asm(string-literal)
Builder.AddTypedTextChunk("asm");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("string-literal");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// Explicit template instantiation
Builder.AddTypedTextChunk("template");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("declaration");
Results.AddResult(Result(Builder.TakeString()));
}
}
if (SemaRef.getLangOpts().ObjC1)
AddObjCTopLevelResults(Results, true);
AddTypedefResult(Results);
LLVM_FALLTHROUGH; // HLSL Change
case Sema::PCC_Class:
if (SemaRef.getLangOpts().CPlusPlus) {
// Using declaration
Builder.AddTypedTextChunk("using");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("qualifier");
Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
// using typename qualifier::name (only in a dependent context)
if (SemaRef.CurContext->isDependentContext()) {
Builder.AddTypedTextChunk("using");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("typename");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("qualifier");
Builder.AddTextChunk("::");
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
}
if (CCC == Sema::PCC_Class) {
AddTypedefResult(Results);
// public:
Builder.AddTypedTextChunk("public");
if (Results.includeCodePatterns())
Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
// protected:
Builder.AddTypedTextChunk("protected");
if (Results.includeCodePatterns())
Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
// private:
Builder.AddTypedTextChunk("private");
if (Results.includeCodePatterns())
Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
}
}
LLVM_FALLTHROUGH; // HLSL Change
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns()) {
// template < parameters >
Builder.AddTypedTextChunk("template");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
Builder.AddPlaceholderChunk("parameters");
Builder.AddChunk(CodeCompletionString::CK_RightAngle);
Results.AddResult(Result(Builder.TakeString()));
}
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
case Sema::PCC_ObjCInterface:
AddObjCInterfaceResults(SemaRef.getLangOpts(), Results, true);
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
case Sema::PCC_ObjCImplementation:
AddObjCImplementationResults(SemaRef.getLangOpts(), Results, true);
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
break;
case Sema::PCC_ObjCInstanceVariableList:
AddObjCVisibilityResults(SemaRef.getLangOpts(), Results, true);
break;
case Sema::PCC_RecoveryInFunction:
case Sema::PCC_Statement: {
AddTypedefResult(Results);
if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns() &&
SemaRef.getLangOpts().CXXExceptions) {
Builder.AddTypedTextChunk("try");
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Builder.AddTextChunk("catch");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("declaration");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
if (SemaRef.getLangOpts().ObjC1)
AddObjCStatementResults(Results, true);
if (Results.includeCodePatterns()) {
// if (condition) { statements }
Builder.AddTypedTextChunk("if");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
// switch (condition) { }
Builder.AddTypedTextChunk("switch");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
// Switch-specific statements.
if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
// case expression:
Builder.AddTypedTextChunk("case");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
// default:
Builder.AddTypedTextChunk("default");
Builder.AddChunk(CodeCompletionString::CK_Colon);
Results.AddResult(Result(Builder.TakeString()));
}
if (Results.includeCodePatterns()) {
/// while (condition) { statements }
Builder.AddTypedTextChunk("while");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
// do { statements } while ( expression );
Builder.AddTypedTextChunk("do");
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Builder.AddTextChunk("while");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// for ( for-init-statement ; condition ; expression ) { statements }
Builder.AddTypedTextChunk("for");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (SemaRef.getLangOpts().CPlusPlus || SemaRef.getLangOpts().C99)
Builder.AddPlaceholderChunk("init-statement");
else
Builder.AddPlaceholderChunk("init-expression");
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Builder.AddPlaceholderChunk("condition");
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
Builder.AddPlaceholderChunk("inc-expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
if (S->getContinueParent()) {
// continue ;
Builder.AddTypedTextChunk("continue");
Results.AddResult(Result(Builder.TakeString()));
}
if (S->getBreakParent()) {
// break ;
Builder.AddTypedTextChunk("break");
Results.AddResult(Result(Builder.TakeString()));
}
// "return expression ;" or "return ;", depending on whether we
// know the function is void or not.
bool isVoid = false;
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
isVoid = Function->getReturnType()->isVoidType();
else if (ObjCMethodDecl *Method
= dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
isVoid = Method->getReturnType()->isVoidType();
else if (SemaRef.getCurBlock() &&
!SemaRef.getCurBlock()->ReturnType.isNull())
isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
Builder.AddTypedTextChunk("return");
if (!isVoid) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
}
Results.AddResult(Result(Builder.TakeString()));
// goto identifier ;
Builder.AddTypedTextChunk("goto");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("label");
Results.AddResult(Result(Builder.TakeString()));
// Using directives
Builder.AddTypedTextChunk("using");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("namespace");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("identifier");
Results.AddResult(Result(Builder.TakeString()));
}
// Fall through (for statement expressions).
LLVM_FALLTHROUGH; // HLSL Change
case Sema::PCC_ForInit:
case Sema::PCC_Condition:
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
// Fall through: conditions and statements can have expressions.
LLVM_FALLTHROUGH; // HLSL Change
case Sema::PCC_ParenthesizedExpression:
if (SemaRef.getLangOpts().ObjCAutoRefCount &&
CCC == Sema::PCC_ParenthesizedExpression) {
// (__bridge <type>)<expression>
Builder.AddTypedTextChunk("__bridge");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
// (__bridge_transfer <Objective-C type>)<expression>
Builder.AddTypedTextChunk("__bridge_transfer");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("Objective-C type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
// (__bridge_retained <CF type>)<expression>
Builder.AddTypedTextChunk("__bridge_retained");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("CF type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
}
LLVM_FALLTHROUGH; // HLSL Change
case Sema::PCC_Expression: {
if (SemaRef.getLangOpts().CPlusPlus) {
// 'this', if we're in a non-static member function.
addThisCompletion(SemaRef, Results);
// true
Builder.AddResultTypeChunk("bool");
Builder.AddTypedTextChunk("true");
Results.AddResult(Result(Builder.TakeString()));
// false
Builder.AddResultTypeChunk("bool");
Builder.AddTypedTextChunk("false");
Results.AddResult(Result(Builder.TakeString()));
if (SemaRef.getLangOpts().RTTI) {
// dynamic_cast < type-id > ( expression )
Builder.AddTypedTextChunk("dynamic_cast");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightAngle);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
}
// static_cast < type-id > ( expression )
Builder.AddTypedTextChunk("static_cast");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightAngle);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// reinterpret_cast < type-id > ( expression )
Builder.AddTypedTextChunk("reinterpret_cast");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightAngle);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// const_cast < type-id > ( expression )
Builder.AddTypedTextChunk("const_cast");
Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightAngle);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
if (SemaRef.getLangOpts().RTTI) {
// typeid ( expression-or-type )
Builder.AddResultTypeChunk("std::type_info");
Builder.AddTypedTextChunk("typeid");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression-or-type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
}
// new T ( ... )
Builder.AddTypedTextChunk("new");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expressions");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// new T [ ] ( ... )
Builder.AddTypedTextChunk("new");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
Builder.AddPlaceholderChunk("size");
Builder.AddChunk(CodeCompletionString::CK_RightBracket);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expressions");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// delete expression
Builder.AddResultTypeChunk("void");
Builder.AddTypedTextChunk("delete");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
// delete [] expression
Builder.AddResultTypeChunk("void");
Builder.AddTypedTextChunk("delete");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
Builder.AddChunk(CodeCompletionString::CK_RightBracket);
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
if (SemaRef.getLangOpts().CXXExceptions) {
// throw expression
Builder.AddResultTypeChunk("void");
Builder.AddTypedTextChunk("throw");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
}
// FIXME: Rethrow?
if (SemaRef.getLangOpts().CPlusPlus11) {
// nullptr
Builder.AddResultTypeChunk("std::nullptr_t");
Builder.AddTypedTextChunk("nullptr");
Results.AddResult(Result(Builder.TakeString()));
// alignof
Builder.AddResultTypeChunk("size_t");
Builder.AddTypedTextChunk("alignof");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// noexcept
Builder.AddResultTypeChunk("bool");
Builder.AddTypedTextChunk("noexcept");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// sizeof... expression
Builder.AddResultTypeChunk("size_t");
Builder.AddTypedTextChunk("sizeof...");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("parameter-pack");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
}
}
if (SemaRef.getLangOpts().ObjC1) {
// Add "super", if we're in an Objective-C class with a superclass.
if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
// The interface can be NULL.
if (ObjCInterfaceDecl *ID = Method->getClassInterface())
if (ID->getSuperClass()) {
std::string SuperType;
SuperType = ID->getSuperClass()->getNameAsString();
if (Method->isInstanceMethod())
SuperType += " *";
Builder.AddResultTypeChunk(Allocator.CopyString(SuperType));
Builder.AddTypedTextChunk("super");
Results.AddResult(Result(Builder.TakeString()));
}
}
AddObjCExpressionResults(Results, true);
}
if (SemaRef.getLangOpts().C11) {
// _Alignof
Builder.AddResultTypeChunk("size_t");
if (SemaRef.PP.isMacroDefined("alignof"))
Builder.AddTypedTextChunk("alignof");
else
Builder.AddTypedTextChunk("_Alignof");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
}
// sizeof expression
Builder.AddResultTypeChunk("size_t");
Builder.AddTypedTextChunk("sizeof");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression-or-type");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
break;
}
case Sema::PCC_Type:
case Sema::PCC_LocalDeclarationSpecifiers:
break;
}
if (WantTypesInContext(CCC, SemaRef.getLangOpts()))
AddTypeSpecifierResults(SemaRef.getLangOpts(), Results);
if (SemaRef.getLangOpts().CPlusPlus && CCC != Sema::PCC_Type)
Results.AddResult(Result("operator"));
}
/// \brief If the given declaration has an associated type, add it as a result
/// type chunk.
static void AddResultTypeChunk(ASTContext &Context,
const PrintingPolicy &Policy,
const NamedDecl *ND,
QualType BaseType,
CodeCompletionBuilder &Result) {
if (!ND)
return;
// Skip constructors and conversion functions, which have their return types
// built into their names.
if (isa<CXXConstructorDecl>(ND) || isa<CXXConversionDecl>(ND))
return;
// Determine the type of the declaration (if it has a type).
QualType T;
if (const FunctionDecl *Function = ND->getAsFunction())
T = Function->getReturnType();
else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
if (!BaseType.isNull())
T = Method->getSendResultType(BaseType);
else
T = Method->getReturnType();
} else if (const EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
else if (isa<UnresolvedUsingValueDecl>(ND)) {
/* Do nothing: ignore unresolved using declarations*/
} else if (const ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(ND)) {
if (!BaseType.isNull())
T = Ivar->getUsageType(BaseType);
else
T = Ivar->getType();
} else if (const ValueDecl *Value = dyn_cast<ValueDecl>(ND)) {
T = Value->getType();
} else if (const ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND)) {
if (!BaseType.isNull())
T = Property->getUsageType(BaseType);
else
T = Property->getType();
}
if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
return;
Result.AddResultTypeChunk(GetCompletionTypeString(T, Context, Policy,
Result.getAllocator()));
}
static void MaybeAddSentinel(Preprocessor &PP,
const NamedDecl *FunctionOrMethod,
CodeCompletionBuilder &Result) {
if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>())
if (Sentinel->getSentinel() == 0) {
if (PP.getLangOpts().ObjC1 && PP.isMacroDefined("nil"))
Result.AddTextChunk(", nil");
else if (PP.isMacroDefined("NULL"))
Result.AddTextChunk(", NULL");
else
Result.AddTextChunk(", (void*)0");
}
}
static std::string formatObjCParamQualifiers(unsigned ObjCQuals,
QualType &Type) {
std::string Result;
if (ObjCQuals & Decl::OBJC_TQ_In)
Result += "in ";
else if (ObjCQuals & Decl::OBJC_TQ_Inout)
Result += "inout ";
else if (ObjCQuals & Decl::OBJC_TQ_Out)
Result += "out ";
if (ObjCQuals & Decl::OBJC_TQ_Bycopy)
Result += "bycopy ";
else if (ObjCQuals & Decl::OBJC_TQ_Byref)
Result += "byref ";
if (ObjCQuals & Decl::OBJC_TQ_Oneway)
Result += "oneway ";
if (ObjCQuals & Decl::OBJC_TQ_CSNullability) {
if (auto nullability = AttributedType::stripOuterNullability(Type)) {
switch (*nullability) {
case NullabilityKind::NonNull:
Result += "nonnull ";
break;
case NullabilityKind::Nullable:
Result += "nullable ";
break;
case NullabilityKind::Unspecified:
Result += "null_unspecified ";
break;
}
}
}
return Result;
}
static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
const ParmVarDecl *Param,
bool SuppressName = false,
bool SuppressBlock = false,
Optional<ArrayRef<QualType>> ObjCSubsts = None) {
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
// The argument for a dependent or non-block parameter is a placeholder
// containing that parameter's type.
std::string Result;
if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
Result = Param->getIdentifier()->getName();
QualType Type = Param->getType();
if (ObjCSubsts)
Type = Type.substObjCTypeArgs(Param->getASTContext(), *ObjCSubsts,
ObjCSubstitutionContext::Parameter);
if (ObjCMethodParam) {
Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(),
Type);
Result += Type.getAsString(Policy) + ")";
if (Param->getIdentifier() && !SuppressName)
Result += Param->getIdentifier()->getName();
} else {
Type.getAsStringInternal(Result, Policy);
}
return Result;
}
// The argument for a block pointer parameter is a block literal with
// the appropriate type.
FunctionTypeLoc Block;
FunctionProtoTypeLoc BlockProto;
TypeLoc TL;
if (TypeSourceInfo *TSInfo = Param->getTypeSourceInfo()) {
TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
while (true) {
// Look through typedefs.
if (!SuppressBlock) {
if (TypedefTypeLoc TypedefTL = TL.getAs<TypedefTypeLoc>()) {
if (TypeSourceInfo *InnerTSInfo =
TypedefTL.getTypedefNameDecl()->getTypeSourceInfo()) {
TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
continue;
}
}
// Look through qualified types
if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) {
TL = QualifiedTL.getUnqualifiedLoc();
continue;
}
if (AttributedTypeLoc AttrTL = TL.getAs<AttributedTypeLoc>()) {
TL = AttrTL.getModifiedLoc();
continue;
}
}
// Try to get the function prototype behind the block pointer type,
// then we're done.
if (BlockPointerTypeLoc BlockPtr = TL.getAs<BlockPointerTypeLoc>()) {
TL = BlockPtr.getPointeeLoc().IgnoreParens();
Block = TL.getAs<FunctionTypeLoc>();
BlockProto = TL.getAs<FunctionProtoTypeLoc>();
}
break;
}
}
if (!Block) {
// We were unable to find a FunctionProtoTypeLoc with parameter names
// for the block; just use the parameter type as a placeholder.
std::string Result;
if (!ObjCMethodParam && Param->getIdentifier())
Result = Param->getIdentifier()->getName();
QualType Type = Param->getType().getUnqualifiedType();
if (ObjCMethodParam) {
Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(),
Type);
Result += Type.getAsString(Policy) + Result + ")";
if (Param->getIdentifier())
Result += Param->getIdentifier()->getName();
} else {
Type.getAsStringInternal(Result, Policy);
}
return Result;
}
// We have the function prototype behind the block pointer type, as it was
// written in the source.
std::string Result;
QualType ResultType = Block.getTypePtr()->getReturnType();
if (ObjCSubsts)
ResultType = ResultType.substObjCTypeArgs(Param->getASTContext(),
*ObjCSubsts,
ObjCSubstitutionContext::Result);
if (!ResultType->isVoidType() || SuppressBlock)
ResultType.getAsStringInternal(Result, Policy);
// Format the parameter list.
std::string Params;
if (!BlockProto || Block.getNumParams() == 0) {
if (BlockProto && BlockProto.getTypePtr()->isVariadic())
Params = "(...)";
else
Params = "(void)";
} else {
Params += "(";
for (unsigned I = 0, N = Block.getNumParams(); I != N; ++I) {
if (I)
Params += ", ";
Params += FormatFunctionParameter(Policy, Block.getParam(I),
/*SuppressName=*/false,
/*SuppressBlock=*/true,
ObjCSubsts);
if (I == N - 1 && BlockProto.getTypePtr()->isVariadic())
Params += ", ...";
}
Params += ")";
}
if (SuppressBlock) {
// Format as a parameter.
Result = Result + " (^";
if (Param->getIdentifier())
Result += Param->getIdentifier()->getName();
Result += ")";
Result += Params;
} else {
// Format as a block literal argument.
Result = '^' + Result;
Result += Params;
if (Param->getIdentifier())
Result += Param->getIdentifier()->getName();
}
return Result;
}
/// \brief Add function parameter chunks to the given code completion string.
static void AddFunctionParameterChunks(Preprocessor &PP,
const PrintingPolicy &Policy,
const FunctionDecl *Function,
CodeCompletionBuilder &Result,
unsigned Start = 0,
bool InOptional = false) {
bool FirstParameter = true;
for (unsigned P = Start, N = Function->getNumParams(); P != N; ++P) {
const ParmVarDecl *Param = Function->getParamDecl(P);
if (Param->hasDefaultArg() && !InOptional) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
CodeCompletionBuilder Opt(Result.getAllocator(),
Result.getCodeCompletionTUInfo());
if (!FirstParameter)
Opt.AddChunk(CodeCompletionString::CK_Comma);
AddFunctionParameterChunks(PP, Policy, Function, Opt, P, true);
Result.AddOptionalChunk(Opt.TakeString());
break;
}
if (FirstParameter)
FirstParameter = false;
else
Result.AddChunk(CodeCompletionString::CK_Comma);
InOptional = false;
// Format the placeholder string.
std::string PlaceholderStr = FormatFunctionParameter(Policy, Param);
if (Function->isVariadic() && P == N - 1)
PlaceholderStr += ", ...";
// Add the placeholder string.
Result.AddPlaceholderChunk(
Result.getAllocator().CopyString(PlaceholderStr));
}
if (const FunctionProtoType *Proto
= Function->getType()->getAs<FunctionProtoType>())
if (Proto->isVariadic()) {
if (Proto->getNumParams() == 0)
Result.AddPlaceholderChunk("...");
MaybeAddSentinel(PP, Function, Result);
}
}
/// \brief Add template parameter chunks to the given code completion string.
static void AddTemplateParameterChunks(ASTContext &Context,
const PrintingPolicy &Policy,
const TemplateDecl *Template,
CodeCompletionBuilder &Result,
unsigned MaxParameters = 0,
unsigned Start = 0,
bool InDefaultArg = false) {
bool FirstParameter = true;
// Prefer to take the template parameter names from the first declaration of
// the template.
Template = cast<TemplateDecl>(Template->getCanonicalDecl());
TemplateParameterList *Params = Template->getTemplateParameters();
TemplateParameterList::iterator PEnd = Params->end();
if (MaxParameters)
PEnd = Params->begin() + MaxParameters;
for (TemplateParameterList::iterator P = Params->begin() + Start;
P != PEnd; ++P) {
bool HasDefaultArg = false;
std::string PlaceholderStr;
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
if (TTP->wasDeclaredWithTypename())
PlaceholderStr = "typename";
else
PlaceholderStr = "class";
if (TTP->getIdentifier()) {
PlaceholderStr += ' ';
PlaceholderStr += TTP->getIdentifier()->getName();
}
HasDefaultArg = TTP->hasDefaultArgument();
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
PlaceholderStr = NTTP->getIdentifier()->getName();
NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
HasDefaultArg = NTTP->hasDefaultArgument();
} else {
assert(isa<TemplateTemplateParmDecl>(*P));
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
// Since putting the template argument list into the placeholder would
// be very, very long, we just use an abbreviation.
PlaceholderStr = "template<...> class";
if (TTP->getIdentifier()) {
PlaceholderStr += ' ';
PlaceholderStr += TTP->getIdentifier()->getName();
}
HasDefaultArg = TTP->hasDefaultArgument();
}
if (HasDefaultArg && !InDefaultArg) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
CodeCompletionBuilder Opt(Result.getAllocator(),
Result.getCodeCompletionTUInfo());
if (!FirstParameter)
Opt.AddChunk(CodeCompletionString::CK_Comma);
AddTemplateParameterChunks(Context, Policy, Template, Opt, MaxParameters,
P - Params->begin(), true);
Result.AddOptionalChunk(Opt.TakeString());
break;
}
InDefaultArg = false;
if (FirstParameter)
FirstParameter = false;
else
Result.AddChunk(CodeCompletionString::CK_Comma);
// Add the placeholder string.
Result.AddPlaceholderChunk(
Result.getAllocator().CopyString(PlaceholderStr));
}
}
/// \brief Add a qualifier to the given code-completion string, if the
/// provided nested-name-specifier is non-NULL.
static void
AddQualifierToCompletionString(CodeCompletionBuilder &Result,
NestedNameSpecifier *Qualifier,
bool QualifierIsInformative,
ASTContext &Context,
const PrintingPolicy &Policy) {
if (!Qualifier)
return;
std::string PrintedNNS;
{
llvm::raw_string_ostream OS(PrintedNNS);
Qualifier->print(OS, Policy);
}
if (QualifierIsInformative)
Result.AddInformativeChunk(Result.getAllocator().CopyString(PrintedNNS));
else
Result.AddTextChunk(Result.getAllocator().CopyString(PrintedNNS));
}
static void
AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
const FunctionDecl *Function) {
const FunctionProtoType *Proto
= Function->getType()->getAs<FunctionProtoType>();
if (!Proto || !Proto->getTypeQuals())
return;
// FIXME: Add ref-qualifier!
// Handle single qualifiers without copying
if (Proto->getTypeQuals() == Qualifiers::Const) {
Result.AddInformativeChunk(" const");
return;
}
if (Proto->getTypeQuals() == Qualifiers::Volatile) {
Result.AddInformativeChunk(" volatile");
return;
}
if (Proto->getTypeQuals() == Qualifiers::Restrict) {
Result.AddInformativeChunk(" restrict");
return;
}
// Handle multiple qualifiers.
std::string QualsStr;
if (Proto->isConst())
QualsStr += " const";
if (Proto->isVolatile())
QualsStr += " volatile";
if (Proto->isRestrict())
QualsStr += " restrict";
Result.AddInformativeChunk(Result.getAllocator().CopyString(QualsStr));
}
/// \brief Add the name of the given declaration
static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
const NamedDecl *ND,
CodeCompletionBuilder &Result) {
DeclarationName Name = ND->getDeclName();
if (!Name)
return;
switch (Name.getNameKind()) {
case DeclarationName::CXXOperatorName: {
const char *OperatorName = nullptr;
switch (Name.getCXXOverloadedOperator()) {
case OO_None:
case OO_Conditional:
case NUM_OVERLOADED_OPERATORS:
OperatorName = "operator";
break;
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
case OO_##Name: OperatorName = "operator" Spelling; break;
#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
#include "clang/Basic/OperatorKinds.def"
case OO_New: OperatorName = "operator new"; break;
case OO_Delete: OperatorName = "operator delete"; break;
case OO_Array_New: OperatorName = "operator new[]"; break;
case OO_Array_Delete: OperatorName = "operator delete[]"; break;
case OO_Call: OperatorName = "operator()"; break;
case OO_Subscript: OperatorName = "operator[]"; break;
}
Result.AddTypedTextChunk(OperatorName);
break;
}
case DeclarationName::Identifier:
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXLiteralOperatorName:
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
break;
case DeclarationName::CXXUsingDirective:
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
break;
case DeclarationName::CXXConstructorName: {
CXXRecordDecl *Record = nullptr;
QualType Ty = Name.getCXXNameType();
if (const RecordType *RecordTy = Ty->getAs<RecordType>())
Record = cast<CXXRecordDecl>(RecordTy->getDecl());
else if (const InjectedClassNameType *InjectedTy
= Ty->getAs<InjectedClassNameType>())
Record = InjectedTy->getDecl();
else {
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
break;
}
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Record->getNameAsString()));
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Context, Policy, Template, Result);
Result.AddChunk(CodeCompletionString::CK_RightAngle);
}
break;
}
}
}
CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
const CodeCompletionContext &CCContext,
CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
bool IncludeBriefComments) {
return CreateCodeCompletionString(S.Context, S.PP, CCContext, Allocator,
CCTUInfo, IncludeBriefComments);
}
/// \brief If possible, create a new code completion string for the given
/// result.
///
/// \returns Either a new, heap-allocated code completion string describing
/// how to use this result, or NULL to indicate that the string or name of the
/// result is all that is needed.
CodeCompletionString *
CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
Preprocessor &PP,
const CodeCompletionContext &CCContext,
CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
bool IncludeBriefComments) {
CodeCompletionBuilder Result(Allocator, CCTUInfo, Priority, Availability);
PrintingPolicy Policy = getCompletionPrintingPolicy(Ctx, PP);
if (Kind == RK_Pattern) {
Pattern->Priority = Priority;
Pattern->Availability = Availability;
if (Declaration) {
Result.addParentContext(Declaration->getDeclContext());
Pattern->ParentName = Result.getParentName();
// Provide code completion comment for self.GetterName where
// GetterName is the getter method for a property with name
// different from the property name (declared via a property
// getter attribute.
const NamedDecl *ND = Declaration;
if (const ObjCMethodDecl *M = dyn_cast<ObjCMethodDecl>(ND))
if (M->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl = M->findPropertyDecl())
if (PDecl->getGetterName() == M->getSelector() &&
PDecl->getIdentifier() != M->getIdentifier()) {
if (const RawComment *RC =
Ctx.getRawCommentForAnyRedecl(M)) {
Result.addBriefComment(RC->getBriefText(Ctx));
Pattern->BriefComment = Result.getBriefComment();
}
else if (const RawComment *RC =
Ctx.getRawCommentForAnyRedecl(PDecl)) {
Result.addBriefComment(RC->getBriefText(Ctx));
Pattern->BriefComment = Result.getBriefComment();
}
}
}
return Pattern;
}
if (Kind == RK_Keyword) {
Result.AddTypedTextChunk(Keyword);
return Result.TakeString();
}
if (Kind == RK_Macro) {
const MacroInfo *MI = PP.getMacroInfo(Macro);
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Macro->getName()));
if (!MI || !MI->isFunctionLike())
return Result.TakeString();
// Format a function-like macro with placeholders for the arguments.
Result.AddChunk(CodeCompletionString::CK_LeftParen);
MacroInfo::arg_iterator A = MI->arg_begin(), AEnd = MI->arg_end();
// C99 variadic macros add __VA_ARGS__ at the end. Skip it.
if (MI->isC99Varargs()) {
--AEnd;
if (A == AEnd) {
Result.AddPlaceholderChunk("...");
}
}
for (MacroInfo::arg_iterator A = MI->arg_begin(); A != AEnd; ++A) {
if (A != MI->arg_begin())
Result.AddChunk(CodeCompletionString::CK_Comma);
if (MI->isVariadic() && (A+1) == AEnd) {
SmallString<32> Arg = (*A)->getName();
if (MI->isC99Varargs())
Arg += ", ...";
else
Arg += "...";
Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
break;
}
// Non-variadic macros are simple.
Result.AddPlaceholderChunk(
Result.getAllocator().CopyString((*A)->getName()));
}
Result.AddChunk(CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
assert(Kind == RK_Declaration && "Missed a result kind?");
const NamedDecl *ND = Declaration;
Result.addParentContext(ND->getDeclContext());
if (IncludeBriefComments) {
// Add documentation comment, if it exists.
if (const RawComment *RC = Ctx.getRawCommentForAnyRedecl(ND)) {
Result.addBriefComment(RC->getBriefText(Ctx));
}
else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(ND))
if (OMD->isPropertyAccessor())
if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
if (const RawComment *RC = Ctx.getRawCommentForAnyRedecl(PDecl))
Result.addBriefComment(RC->getBriefText(Ctx));
}
if (StartsNestedNameSpecifier) {
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
Result.AddTextChunk("::");
return Result.TakeString();
}
for (const auto *I : ND->specific_attrs<AnnotateAttr>())
Result.AddAnnotation(Result.getAllocator().CopyString(I->getAnnotation()));
AddResultTypeChunk(Ctx, Policy, ND, CCContext.getBaseType(), Result);
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
AddTypedNameChunk(Ctx, Policy, ND, Result);
Result.AddChunk(CodeCompletionString::CK_LeftParen);
AddFunctionParameterChunks(PP, Policy, Function, Result);
Result.AddChunk(CodeCompletionString::CK_RightParen);
AddFunctionTypeQualsToCompletionString(Result, Function);
return Result.TakeString();
}
if (const FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
FunctionDecl *Function = FunTmpl->getTemplatedDecl();
AddTypedNameChunk(Ctx, Policy, Function, Result);
// Figure out which template parameters are deduced (or have default
// arguments).
llvm::SmallBitVector Deduced;
Sema::MarkDeducedTemplateParameters(Ctx, FunTmpl, Deduced);
unsigned LastDeducibleArgument;
for (LastDeducibleArgument = Deduced.size(); LastDeducibleArgument > 0;
--LastDeducibleArgument) {
if (!Deduced[LastDeducibleArgument - 1]) {
// C++0x: Figure out if the template argument has a default. If so,
// the user doesn't need to type this argument.
// FIXME: We need to abstract template parameters better!
bool HasDefaultArg = false;
NamedDecl *Param = FunTmpl->getTemplateParameters()->getParam(
LastDeducibleArgument - 1);
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
HasDefaultArg = TTP->hasDefaultArgument();
else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(Param))
HasDefaultArg = NTTP->hasDefaultArgument();
else {
assert(isa<TemplateTemplateParmDecl>(Param));
HasDefaultArg
= cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
}
if (!HasDefaultArg)
break;
}
}
if (LastDeducibleArgument) {
// Some of the function template arguments cannot be deduced from a
// function call, so we introduce an explicit template argument list
// containing all of the arguments up to the first deducible argument.
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Ctx, Policy, FunTmpl, Result,
LastDeducibleArgument);
Result.AddChunk(CodeCompletionString::CK_RightAngle);
}
// Add the function parameters
Result.AddChunk(CodeCompletionString::CK_LeftParen);
AddFunctionParameterChunks(PP, Policy, Function, Result);
Result.AddChunk(CodeCompletionString::CK_RightParen);
AddFunctionTypeQualsToCompletionString(Result, Function);
return Result.TakeString();
}
if (const TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(Template->getNameAsString()));
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Ctx, Policy, Template, Result);
Result.AddChunk(CodeCompletionString::CK_RightAngle);
return Result.TakeString();
}
if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
Selector Sel = Method->getSelector();
if (Sel.isUnarySelector()) {
Result.AddTypedTextChunk(Result.getAllocator().CopyString(
Sel.getNameForSlot(0)));
return Result.TakeString();
}
std::string SelName = Sel.getNameForSlot(0).str();
SelName += ':';
if (StartParameter == 0)
Result.AddTypedTextChunk(Result.getAllocator().CopyString(SelName));
else {
Result.AddInformativeChunk(Result.getAllocator().CopyString(SelName));
// If there is only one parameter, and we're past it, add an empty
// typed-text chunk since there is nothing to type.
if (Method->param_size() == 1)
Result.AddTypedTextChunk("");
}
unsigned Idx = 0;
for (ObjCMethodDecl::param_const_iterator P = Method->param_begin(),
PEnd = Method->param_end();
P != PEnd; (void)++P, ++Idx) {
if (Idx > 0) {
std::string Keyword;
if (Idx > StartParameter)
Result.AddChunk(CodeCompletionString::CK_HorizontalSpace);
if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx))
Keyword += II->getName();
Keyword += ":";
if (Idx < StartParameter || AllParametersAreInformative)
Result.AddInformativeChunk(Result.getAllocator().CopyString(Keyword));
else
Result.AddTypedTextChunk(Result.getAllocator().CopyString(Keyword));
}
// If we're before the starting parameter, skip the placeholder.
if (Idx < StartParameter)
continue;
std::string Arg;
QualType ParamType = (*P)->getType();
Optional<ArrayRef<QualType>> ObjCSubsts;
if (!CCContext.getBaseType().isNull())
ObjCSubsts = CCContext.getBaseType()->getObjCSubstitutions(Method);
if (ParamType->isBlockPointerType() && !DeclaringEntity)
Arg = FormatFunctionParameter(Policy, *P, true,
/*SuppressBlock=*/false,
ObjCSubsts);
else {
if (ObjCSubsts)
ParamType = ParamType.substObjCTypeArgs(Ctx, *ObjCSubsts,
ObjCSubstitutionContext::Parameter);
Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier(),
ParamType);
Arg += ParamType.getAsString(Policy) + ")";
if (IdentifierInfo *II = (*P)->getIdentifier())
if (DeclaringEntity || AllParametersAreInformative)
Arg += II->getName();
}
if (Method->isVariadic() && (P + 1) == PEnd)
Arg += ", ...";
if (DeclaringEntity)
Result.AddTextChunk(Result.getAllocator().CopyString(Arg));
else if (AllParametersAreInformative)
Result.AddInformativeChunk(Result.getAllocator().CopyString(Arg));
else
Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
}
if (Method->isVariadic()) {
if (Method->param_size() == 0) {
if (DeclaringEntity)
Result.AddTextChunk(", ...");
else if (AllParametersAreInformative)
Result.AddInformativeChunk(", ...");
else
Result.AddPlaceholderChunk(", ...");
}
MaybeAddSentinel(PP, Method, Result);
}
return Result.TakeString();
}
if (Qualifier)
AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
Ctx, Policy);
Result.AddTypedTextChunk(
Result.getAllocator().CopyString(ND->getNameAsString()));
return Result.TakeString();
}
/// \brief Add function overload parameter chunks to the given code completion
/// string.
static void AddOverloadParameterChunks(ASTContext &Context,
const PrintingPolicy &Policy,
const FunctionDecl *Function,
const FunctionProtoType *Prototype,
CodeCompletionBuilder &Result,
unsigned CurrentArg,
unsigned Start = 0,
bool InOptional = false) {
bool FirstParameter = true;
unsigned NumParams = Function ? Function->getNumParams()
: Prototype->getNumParams();
for (unsigned P = Start; P != NumParams; ++P) {
if (Function && Function->getParamDecl(P)->hasDefaultArg() && !InOptional) {
// When we see an optional default argument, put that argument and
// the remaining default arguments into a new, optional string.
CodeCompletionBuilder Opt(Result.getAllocator(),
Result.getCodeCompletionTUInfo());
if (!FirstParameter)
Opt.AddChunk(CodeCompletionString::CK_Comma);
// Optional sections are nested.
AddOverloadParameterChunks(Context, Policy, Function, Prototype, Opt,
CurrentArg, P, /*InOptional=*/true);
Result.AddOptionalChunk(Opt.TakeString());
return;
}
if (FirstParameter)
FirstParameter = false;
else
Result.AddChunk(CodeCompletionString::CK_Comma);
InOptional = false;
// Format the placeholder string.
std::string Placeholder;
if (Function)
Placeholder = FormatFunctionParameter(Policy, Function->getParamDecl(P));
else
Placeholder = Prototype->getParamType(P).getAsString(Policy);
if (P == CurrentArg)
Result.AddCurrentParameterChunk(
Result.getAllocator().CopyString(Placeholder));
else
Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Placeholder));
}
if (Prototype && Prototype->isVariadic()) {
CodeCompletionBuilder Opt(Result.getAllocator(),
Result.getCodeCompletionTUInfo());
if (!FirstParameter)
Opt.AddChunk(CodeCompletionString::CK_Comma);
if (CurrentArg < NumParams)
Opt.AddPlaceholderChunk("...");
else
Opt.AddCurrentParameterChunk("...");
Result.AddOptionalChunk(Opt.TakeString());
}
}
CodeCompletionString *
CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
unsigned CurrentArg, Sema &S,
CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
bool IncludeBriefComments) const {
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
// FIXME: Set priority, availability appropriately.
CodeCompletionBuilder Result(Allocator,CCTUInfo, 1, CXAvailability_Available);
FunctionDecl *FDecl = getFunction();
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(getFunctionType());
if (!FDecl && !Proto) {
// Function without a prototype. Just give the return type and a
// highlighted ellipsis.
const FunctionType *FT = getFunctionType();
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
FT->getReturnType().getAsString(Policy)));
Result.AddChunk(CodeCompletionString::CK_LeftParen);
Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
Result.AddChunk(CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
if (FDecl) {
if (IncludeBriefComments && CurrentArg < FDecl->getNumParams())
if (auto RC = S.getASTContext().getRawCommentForAnyRedecl(
FDecl->getParamDecl(CurrentArg)))
Result.addBriefComment(RC->getBriefText(S.getASTContext()));
AddResultTypeChunk(S.Context, Policy, FDecl, QualType(), Result);
Result.AddTextChunk(
Result.getAllocator().CopyString(FDecl->getNameAsString()));
} else {
Result.AddResultTypeChunk(
Result.getAllocator().CopyString(
Proto->getReturnType().getAsString(Policy)));
}
Result.AddChunk(CodeCompletionString::CK_LeftParen);
AddOverloadParameterChunks(S.getASTContext(), Policy, FDecl, Proto, Result,
CurrentArg);
Result.AddChunk(CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
unsigned clang::getMacroUsagePriority(StringRef MacroName,
const LangOptions &LangOpts,
bool PreferredTypeIsPointer) {
unsigned Priority = CCP_Macro;
// Treat the "nil", "Nil" and "NULL" macros as null pointer constants.
if (MacroName.equals("nil") || MacroName.equals("NULL") ||
MacroName.equals("Nil")) {
Priority = CCP_Constant;
if (PreferredTypeIsPointer)
Priority = Priority / CCF_SimilarTypeMatch;
}
// Treat "YES", "NO", "true", and "false" as constants.
else if (MacroName.equals("YES") || MacroName.equals("NO") ||
MacroName.equals("true") || MacroName.equals("false"))
Priority = CCP_Constant;
// Treat "bool" as a type.
else if (MacroName.equals("bool"))
Priority = CCP_Type + (LangOpts.ObjC1? CCD_bool_in_ObjC : 0);
return Priority;
}
CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
if (!D)
return CXCursor_UnexposedDecl;
switch (D->getKind()) {
case Decl::Enum: return CXCursor_EnumDecl;
case Decl::EnumConstant: return CXCursor_EnumConstantDecl;
case Decl::Field: return CXCursor_FieldDecl;
case Decl::Function:
return CXCursor_FunctionDecl;
case Decl::ObjCCategory: return CXCursor_ObjCCategoryDecl;
case Decl::ObjCCategoryImpl: return CXCursor_ObjCCategoryImplDecl;
case Decl::ObjCImplementation: return CXCursor_ObjCImplementationDecl;
case Decl::ObjCInterface: return CXCursor_ObjCInterfaceDecl;
case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
case Decl::ObjCMethod:
return cast<ObjCMethodDecl>(D)->isInstanceMethod()
? CXCursor_ObjCInstanceMethodDecl : CXCursor_ObjCClassMethodDecl;
case Decl::CXXMethod: return CXCursor_CXXMethod;
case Decl::CXXConstructor: return CXCursor_Constructor;
case Decl::CXXDestructor: return CXCursor_Destructor;
case Decl::CXXConversion: return CXCursor_ConversionFunction;
case Decl::ObjCProperty: return CXCursor_ObjCPropertyDecl;
case Decl::ObjCProtocol: return CXCursor_ObjCProtocolDecl;
case Decl::ParmVar: return CXCursor_ParmDecl;
case Decl::Typedef: return CXCursor_TypedefDecl;
case Decl::TypeAlias: return CXCursor_TypeAliasDecl;
case Decl::Var: return CXCursor_VarDecl;
case Decl::Namespace: return CXCursor_Namespace;
case Decl::NamespaceAlias: return CXCursor_NamespaceAlias;
case Decl::TemplateTypeParm: return CXCursor_TemplateTypeParameter;
case Decl::NonTypeTemplateParm:return CXCursor_NonTypeTemplateParameter;
case Decl::TemplateTemplateParm:return CXCursor_TemplateTemplateParameter;
case Decl::FunctionTemplate: return CXCursor_FunctionTemplate;
case Decl::ClassTemplate: return CXCursor_ClassTemplate;
case Decl::AccessSpec: return CXCursor_CXXAccessSpecifier;
case Decl::ClassTemplatePartialSpecialization:
return CXCursor_ClassTemplatePartialSpecialization;
case Decl::UsingDirective: return CXCursor_UsingDirective;
case Decl::TranslationUnit: return CXCursor_TranslationUnit;
case Decl::Using:
case Decl::UnresolvedUsingValue:
case Decl::UnresolvedUsingTypename:
return CXCursor_UsingDeclaration;
case Decl::ObjCPropertyImpl:
switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
case ObjCPropertyImplDecl::Dynamic:
return CXCursor_ObjCDynamicDecl;
case ObjCPropertyImplDecl::Synthesize:
return CXCursor_ObjCSynthesizeDecl;
}
case Decl::Import:
return CXCursor_ModuleImportDecl;
case Decl::ObjCTypeParam: return CXCursor_TemplateTypeParameter;
default:
if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
switch (TD->getTagKind()) {
case TTK_Interface: LLVM_FALLTHROUGH; // HLSL Change
case TTK_Struct: return CXCursor_StructDecl;
case TTK_Class: return CXCursor_ClassDecl;
case TTK_Union: return CXCursor_UnionDecl;
case TTK_Enum: return CXCursor_EnumDecl;
}
}
}
return CXCursor_UnexposedDecl;
}
static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
bool IncludeUndefined,
bool TargetTypeIsPointer = false) {
typedef CodeCompletionResult Result;
Results.EnterNewScope();
for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
M != MEnd; ++M) {
auto MD = PP.getMacroDefinition(M->first);
if (IncludeUndefined || MD) {
if (MacroInfo *MI = MD.getMacroInfo())
if (MI->isUsedForHeaderGuard())
continue;
Results.AddResult(Result(M->first,
getMacroUsagePriority(M->first->getName(),
PP.getLangOpts(),
TargetTypeIsPointer)));
}
}
Results.ExitScope();
}
static void AddPrettyFunctionResults(const LangOptions &LangOpts,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
Results.EnterNewScope();
Results.AddResult(Result("__PRETTY_FUNCTION__", CCP_Constant));
Results.AddResult(Result("__FUNCTION__", CCP_Constant));
if (LangOpts.C99 || LangOpts.CPlusPlus11)
Results.AddResult(Result("__func__", CCP_Constant));
Results.ExitScope();
}
static void HandleCodeCompleteResults(Sema *S,
CodeCompleteConsumer *CodeCompleter,
CodeCompletionContext Context,
CodeCompletionResult *Results,
unsigned NumResults) {
if (CodeCompleter)
CodeCompleter->ProcessCodeCompleteResults(*S, Context, Results, NumResults);
}
static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
Sema::ParserCompletionContext PCC) {
switch (PCC) {
case Sema::PCC_Namespace:
return CodeCompletionContext::CCC_TopLevel;
case Sema::PCC_Class:
return CodeCompletionContext::CCC_ClassStructUnion;
case Sema::PCC_ObjCInterface:
return CodeCompletionContext::CCC_ObjCInterface;
case Sema::PCC_ObjCImplementation:
return CodeCompletionContext::CCC_ObjCImplementation;
case Sema::PCC_ObjCInstanceVariableList:
return CodeCompletionContext::CCC_ObjCIvarList;
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
if (S.CurContext->isFileContext())
return CodeCompletionContext::CCC_TopLevel;
if (S.CurContext->isRecord())
return CodeCompletionContext::CCC_ClassStructUnion;
return CodeCompletionContext::CCC_Other;
case Sema::PCC_RecoveryInFunction:
return CodeCompletionContext::CCC_Recovery;
case Sema::PCC_ForInit:
if (S.getLangOpts().CPlusPlus || S.getLangOpts().C99 ||
S.getLangOpts().ObjC1)
return CodeCompletionContext::CCC_ParenthesizedExpression;
else
return CodeCompletionContext::CCC_Expression;
case Sema::PCC_Expression:
case Sema::PCC_Condition:
return CodeCompletionContext::CCC_Expression;
case Sema::PCC_Statement:
return CodeCompletionContext::CCC_Statement;
case Sema::PCC_Type:
return CodeCompletionContext::CCC_Type;
case Sema::PCC_ParenthesizedExpression:
return CodeCompletionContext::CCC_ParenthesizedExpression;
case Sema::PCC_LocalDeclarationSpecifiers:
return CodeCompletionContext::CCC_Type;
}
llvm_unreachable("Invalid ParserCompletionContext!");
}
/// \brief If we're in a C++ virtual member function, add completion results
/// that invoke the functions we override, since it's common to invoke the
/// overridden function as well as adding new functionality.
///
/// \param S The semantic analysis object for which we are generating results.
///
/// \param InContext This context in which the nested-name-specifier preceding
/// the code-completion point
static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
ResultBuilder &Results) {
// Look through blocks.
DeclContext *CurContext = S.CurContext;
while (isa<BlockDecl>(CurContext))
CurContext = CurContext->getParent();
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(CurContext);
if (!Method || !Method->isVirtual())
return;
// We need to have names for all of the parameters, if we're going to
// generate a forwarding call.
for (auto P : Method->params())
if (!P->getDeclName())
return;
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
for (CXXMethodDecl::method_iterator M = Method->begin_overridden_methods(),
MEnd = Method->end_overridden_methods();
M != MEnd; ++M) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
const CXXMethodDecl *Overridden = *M;
if (Overridden->getCanonicalDecl() == Method->getCanonicalDecl())
continue;
// If we need a nested-name-specifier, add one now.
if (!InContext) {
NestedNameSpecifier *NNS
= getRequiredQualification(S.Context, CurContext,
Overridden->getDeclContext());
if (NNS) {
std::string Str;
llvm::raw_string_ostream OS(Str);
NNS->print(OS, Policy);
Builder.AddTextChunk(Results.getAllocator().CopyString(OS.str()));
}
} else if (!InContext->Equals(Overridden->getDeclContext()))
continue;
Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
Overridden->getNameAsString()));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
bool FirstParam = true;
for (auto P : Method->params()) {
if (FirstParam)
FirstParam = false;
else
Builder.AddChunk(CodeCompletionString::CK_Comma);
Builder.AddPlaceholderChunk(
Results.getAllocator().CopyString(P->getIdentifier()->getName()));
}
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(CodeCompletionResult(Builder.TakeString(),
CCP_SuperCompletion,
CXCursor_CXXMethod,
CXAvailability_Available,
Overridden));
Results.Ignore(Overridden);
}
}
void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
ModuleIdPath Path) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
typedef CodeCompletionResult Result;
if (Path.empty()) {
// Enumerate all top-level modules.
SmallVector<Module *, 8> Modules;
PP.getHeaderSearchInfo().collectAllModules(Modules);
for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(Modules[I]->Name));
Results.AddResult(Result(Builder.TakeString(),
CCP_Declaration,
CXCursor_ModuleImportDecl,
Modules[I]->isAvailable()
? CXAvailability_Available
: CXAvailability_NotAvailable));
}
} else if (getLangOpts().Modules) {
// Load the named module.
Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
Module::AllVisible,
/*IsInclusionDirective=*/false);
// Enumerate submodules.
if (Mod) {
for (Module::submodule_iterator Sub = Mod->submodule_begin(),
SubEnd = Mod->submodule_end();
Sub != SubEnd; ++Sub) {
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString((*Sub)->Name));
Results.AddResult(Result(Builder.TakeString(),
CCP_Declaration,
CXCursor_ModuleImportDecl,
(*Sub)->isAvailable()
? CXAvailability_Available
: CXAvailability_NotAvailable));
}
}
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, CompletionContext));
Results.EnterNewScope();
// Determine how to filter results, e.g., so that the names of
// values (functions, enumerators, function templates, etc.) are
// only allowed where we can have an expression.
switch (CompletionContext) {
case PCC_Namespace:
case PCC_Class:
case PCC_ObjCInterface:
case PCC_ObjCImplementation:
case PCC_ObjCInstanceVariableList:
case PCC_Template:
case PCC_MemberTemplate:
case PCC_Type:
case PCC_LocalDeclarationSpecifiers:
Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
break;
case PCC_Statement:
case PCC_ParenthesizedExpression:
case PCC_Expression:
case PCC_ForInit:
case PCC_Condition:
if (WantTypesInContext(CompletionContext, getLangOpts()))
Results.setFilter(&ResultBuilder::IsOrdinaryName);
else
Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
if (getLangOpts().CPlusPlus)
MaybeAddOverrideCalls(*this, /*InContext=*/nullptr, Results);
break;
case PCC_RecoveryInFunction:
// Unfiltered
break;
}
// If we are in a C++ non-static member function, check the qualifiers on
// the member function to filter/prioritize the results list.
if (CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext))
if (CurMethod->isInstance())
Results.setObjectTypeQualifiers(
Qualifiers::fromCVRMask(CurMethod->getTypeQualifiers()));
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
AddOrdinaryNameResults(CompletionContext, S, *this, Results);
Results.ExitScope();
switch (CompletionContext) {
case PCC_ParenthesizedExpression:
case PCC_Expression:
case PCC_Statement:
case PCC_RecoveryInFunction:
if (S->getFnParent())
AddPrettyFunctionResults(PP.getLangOpts(), Results);
break;
case PCC_Namespace:
case PCC_Class:
case PCC_ObjCInterface:
case PCC_ObjCImplementation:
case PCC_ObjCInstanceVariableList:
case PCC_Template:
case PCC_MemberTemplate:
case PCC_ForInit:
case PCC_Condition:
case PCC_Type:
case PCC_LocalDeclarationSpecifiers:
break;
}
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper,
ResultBuilder &Results);
void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
AllowNestedNameSpecifiers
? CodeCompletionContext::CCC_PotentiallyQualifiedName
: CodeCompletionContext::CCC_Name);
Results.EnterNewScope();
// Type qualifiers can come after names.
Results.AddResult(Result("const"));
Results.AddResult(Result("volatile"));
if (getLangOpts().C99)
Results.AddResult(Result("restrict"));
if (getLangOpts().CPlusPlus) {
if (AllowNonIdentifiers) {
Results.AddResult(Result("operator"));
}
// Add nested-name-specifiers.
if (AllowNestedNameSpecifiers) {
Results.allowNestedNameSpecifiers();
Results.setFilter(&ResultBuilder::IsImpossibleToSatisfy);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
CodeCompleter->includeGlobals());
Results.setFilter(nullptr);
}
}
Results.ExitScope();
// If we're in a context where we might have an expression (rather than a
// declaration), and what we've seen so far is an Objective-C type that could
// be a receiver of a class message, this may be a class message send with
// the initial opening bracket '[' missing. Add appropriate completions.
if (AllowNonIdentifiers && !AllowNestedNameSpecifiers &&
DS.getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier &&
DS.getTypeSpecType() == DeclSpec::TST_typename &&
DS.getTypeSpecComplex() == DeclSpec::TSC_unspecified &&
DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
!DS.isTypeAltiVecVector() &&
S &&
(S->getFlags() & Scope::DeclScope) != 0 &&
(S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
Scope::FunctionPrototypeScope |
Scope::AtCatchScope)) == 0) {
ParsedType T = DS.getRepAsType();
if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
AddClassMessageCompletions(*this, S, T, None, false, false, Results);
}
// Note that we intentionally suppress macro results here, since we do not
// encourage using macros to produce the names of entities.
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(), Results.size());
}
struct Sema::CodeCompleteExpressionData {
CodeCompleteExpressionData(QualType PreferredType = QualType())
: PreferredType(PreferredType), IntegralConstantExpression(false),
ObjCCollection(false) { }
QualType PreferredType;
bool IntegralConstantExpression;
bool ObjCCollection;
SmallVector<Decl *, 4> IgnoreDecls;
};
/// \brief Perform code-completion in an expression context when we know what
/// type we're looking for.
void Sema::CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Expression);
if (Data.ObjCCollection)
Results.setFilter(&ResultBuilder::IsObjCCollection);
else if (Data.IntegralConstantExpression)
Results.setFilter(&ResultBuilder::IsIntegralConstantValue);
else if (WantTypesInContext(PCC_Expression, getLangOpts()))
Results.setFilter(&ResultBuilder::IsOrdinaryName);
else
Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
if (!Data.PreferredType.isNull())
Results.setPreferredType(Data.PreferredType.getNonReferenceType());
// Ignore any declarations that we were told that we don't care about.
for (unsigned I = 0, N = Data.IgnoreDecls.size(); I != N; ++I)
Results.Ignore(Data.IgnoreDecls[I]);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
Results.EnterNewScope();
AddOrdinaryNameResults(PCC_Expression, S, *this, Results);
Results.ExitScope();
bool PreferredTypeIsPointer = false;
if (!Data.PreferredType.isNull())
PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType()
|| Data.PreferredType->isMemberPointerType()
|| Data.PreferredType->isBlockPointerType();
if (S->getFnParent() &&
!Data.ObjCCollection &&
!Data.IntegralConstantExpression)
AddPrettyFunctionResults(PP.getLangOpts(), Results);
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false, PreferredTypeIsPointer);
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext(CodeCompletionContext::CCC_Expression,
Data.PreferredType),
Results.data(),Results.size());
}
void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
if (E.isInvalid())
CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
else if (getLangOpts().ObjC1)
CodeCompleteObjCInstanceMessage(S, E.get(), None, false);
}
/// \brief The set of properties that have already been added, referenced by
/// property name.
typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
/// \brief Retrieve the container definition, if any?
static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
if (ObjCInterfaceDecl *Interface = dyn_cast<ObjCInterfaceDecl>(Container)) {
if (Interface->hasDefinition())
return Interface->getDefinition();
return Interface;
}
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
if (Protocol->hasDefinition())
return Protocol->getDefinition();
return Protocol;
}
return Container;
}
static void AddObjCProperties(const CodeCompletionContext &CCContext,
ObjCContainerDecl *Container,
bool AllowCategories,
bool AllowNullaryMethods,
DeclContext *CurContext,
AddedPropertiesSet &AddedProperties,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
// Retrieve the definition.
Container = getContainerDef(Container);
// Add properties in this container.
for (const auto *P : Container->properties())
if (AddedProperties.insert(P->getIdentifier()).second)
Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
CurContext);
// Add nullary methods
if (AllowNullaryMethods) {
ASTContext &Context = Container->getASTContext();
PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
for (auto *M : Container->methods()) {
if (M->getSelector().isUnarySelector())
if (IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0))
if (AddedProperties.insert(Name).second) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
AddResultTypeChunk(Context, Policy, M, CCContext.getBaseType(),
Builder);
Builder.AddTypedTextChunk(
Results.getAllocator().CopyString(Name->getName()));
Results.MaybeAddResult(Result(Builder.TakeString(), M,
CCP_MemberDeclaration + CCD_MethodAsProperty),
CurContext);
}
}
}
// Add properties in referenced protocols.
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
for (auto *P : Protocol->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results);
} else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
if (AllowCategories) {
// Look through categories.
for (auto *Cat : IFace->known_categories())
AddObjCProperties(CCContext, Cat, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results);
}
// Look through protocols.
for (auto *I : IFace->all_referenced_protocols())
AddObjCProperties(CCContext, I, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results);
// Look in the superclass.
if (IFace->getSuperClass())
AddObjCProperties(CCContext, IFace->getSuperClass(), AllowCategories,
AllowNullaryMethods, CurContext,
AddedProperties, Results);
} else if (const ObjCCategoryDecl *Category
= dyn_cast<ObjCCategoryDecl>(Container)) {
// Look through protocols.
for (auto *P : Category->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
CurContext, AddedProperties, Results);
}
}
void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow) {
if (!Base || !CodeCompleter)
return;
ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
if (ConvertedBase.isInvalid())
return;
Base = ConvertedBase.get();
typedef CodeCompletionResult Result;
QualType BaseType = Base->getType();
if (IsArrow) {
if (const PointerType *Ptr = BaseType->getAs<PointerType>())
BaseType = Ptr->getPointeeType();
else if (BaseType->isObjCObjectPointerType())
/*Do nothing*/ ;
else
return;
}
enum CodeCompletionContext::Kind contextKind;
if (IsArrow) {
contextKind = CodeCompletionContext::CCC_ArrowMemberAccess;
}
else {
if (BaseType->isObjCObjectPointerType() ||
BaseType->isObjCObjectOrInterfaceType()) {
contextKind = CodeCompletionContext::CCC_ObjCPropertyAccess;
}
else {
contextKind = CodeCompletionContext::CCC_DotMemberAccess;
}
}
CodeCompletionContext CCContext(contextKind, BaseType);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CCContext,
&ResultBuilder::IsMember);
Results.EnterNewScope();
if (const RecordType *Record = BaseType->getAs<RecordType>()) {
// Indicate that we are performing a member access, and the cv-qualifiers
// for the base object type.
Results.setObjectTypeQualifiers(BaseType.getQualifiers());
// Access to a C/C++ class, struct, or union.
Results.allowNestedNameSpecifiers();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(Record->getDecl(), LookupMemberName, Consumer,
CodeCompleter->includeGlobals());
if (getLangOpts().CPlusPlus) {
if (!Results.empty()) {
// The "template" keyword can follow "->" or "." in the grammar.
// However, we only want to suggest the template keyword if something
// is dependent.
bool IsDependent = BaseType->isDependentType();
if (!IsDependent) {
for (Scope *DepScope = S; DepScope; DepScope = DepScope->getParent())
if (DeclContext *Ctx = DepScope->getEntity()) {
IsDependent = Ctx->isDependentContext();
break;
}
}
if (IsDependent)
Results.AddResult(Result("template"));
}
}
} else if (!IsArrow && BaseType->getAsObjCInterfacePointerType()) {
// Objective-C property reference.
AddedPropertiesSet AddedProperties;
// Add property results based on our interface.
const ObjCObjectPointerType *ObjCPtr
= BaseType->getAsObjCInterfacePointerType();
assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
AddObjCProperties(CCContext, ObjCPtr->getInterfaceDecl(), true,
/*AllowNullaryMethods=*/true, CurContext,
AddedProperties, Results);
// Add properties from the protocols in a qualified interface.
for (auto *I : ObjCPtr->quals())
AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
CurContext, AddedProperties, Results);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
(!IsArrow && BaseType->isObjCObjectType())) {
// Objective-C instance variable access.
ObjCInterfaceDecl *Class = nullptr;
if (const ObjCObjectPointerType *ObjCPtr
= BaseType->getAs<ObjCObjectPointerType>())
Class = ObjCPtr->getInterfaceDecl();
else
Class = BaseType->getAs<ObjCObjectType>()->getInterface();
// Add all ivars from this class and its superclasses.
if (Class) {
CodeCompletionDeclConsumer Consumer(Results, CurContext);
Results.setFilter(&ResultBuilder::IsObjCIvar);
LookupVisibleDecls(Class, LookupMemberName, Consumer,
CodeCompleter->includeGlobals());
}
}
// FIXME: How do we cope with isa?
Results.ExitScope();
// Hand off the results found for code completion.
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
if (!CodeCompleter)
return;
ResultBuilder::LookupFilter Filter = nullptr;
enum CodeCompletionContext::Kind ContextKind
= CodeCompletionContext::CCC_Other;
switch ((DeclSpec::TST)TagSpec) {
case DeclSpec::TST_enum:
Filter = &ResultBuilder::IsEnum;
ContextKind = CodeCompletionContext::CCC_EnumTag;
break;
case DeclSpec::TST_union:
Filter = &ResultBuilder::IsUnion;
ContextKind = CodeCompletionContext::CCC_UnionTag;
break;
case DeclSpec::TST_struct:
case DeclSpec::TST_class:
case DeclSpec::TST_interface:
Filter = &ResultBuilder::IsClassOrStruct;
ContextKind = CodeCompletionContext::CCC_ClassOrStructTag;
break;
default:
llvm_unreachable("Unknown type specifier kind in CodeCompleteTag");
}
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(), ContextKind);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
// First pass: look for tags.
Results.setFilter(Filter);
LookupVisibleDecls(S, LookupTagName, Consumer,
CodeCompleter->includeGlobals());
if (CodeCompleter->includeGlobals()) {
// Second pass: look for nested name specifiers.
Results.setFilter(&ResultBuilder::IsNestedNameSpecifier);
LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer);
}
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_TypeQualifiers);
Results.EnterNewScope();
if (!(DS.getTypeQualifiers() & DeclSpec::TQ_const))
Results.AddResult("const");
if (!(DS.getTypeQualifiers() & DeclSpec::TQ_volatile))
Results.AddResult("volatile");
if (getLangOpts().C99 &&
!(DS.getTypeQualifiers() & DeclSpec::TQ_restrict))
Results.AddResult("restrict");
if (getLangOpts().C11 &&
!(DS.getTypeQualifiers() & DeclSpec::TQ_atomic))
Results.AddResult("_Atomic");
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(), Results.size());
}
void Sema::CodeCompleteCase(Scope *S) {
if (getCurFunction()->SwitchStack.empty() || !CodeCompleter)
return;
SwitchStmt *Switch = getCurFunction()->SwitchStack.back();
QualType type = Switch->getCond()->IgnoreImplicit()->getType();
if (!type->isEnumeralType()) {
CodeCompleteExpressionData Data(type);
Data.IntegralConstantExpression = true;
CodeCompleteExpression(S, Data);
return;
}
// Code-complete the cases of a switch statement over an enumeration type
// by providing the list of
EnumDecl *Enum = type->castAs<EnumType>()->getDecl();
if (EnumDecl *Def = Enum->getDefinition())
Enum = Def;
// Determine which enumerators we have already seen in the switch statement.
// FIXME: Ideally, we would also be able to look *past* the code-completion
// token, in case we are code-completing in the middle of the switch and not
// at the end. However, we aren't able to do so at the moment.
llvm::SmallPtrSet<EnumConstantDecl *, 8> EnumeratorsSeen;
NestedNameSpecifier *Qualifier = nullptr;
for (SwitchCase *SC = Switch->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
CaseStmt *Case = dyn_cast<CaseStmt>(SC);
if (!Case)
continue;
Expr *CaseVal = Case->getLHS()->IgnoreParenCasts();
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseVal))
if (EnumConstantDecl *Enumerator
= dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
// We look into the AST of the case statement to determine which
// enumerator was named. Alternatively, we could compute the value of
// the integral constant expression, then compare it against the
// values of each enumerator. However, value-based approach would not
// work as well with C++ templates where enumerators declared within a
// template are type- and value-dependent.
EnumeratorsSeen.insert(Enumerator);
// If this is a qualified-id, keep track of the nested-name-specifier
// so that we can reproduce it as part of code completion, e.g.,
//
// switch (TagD.getKind()) {
// case TagDecl::TK_enum:
// break;
// case XXX
//
// At the XXX, our completions are TagDecl::TK_union,
// TagDecl::TK_struct, and TagDecl::TK_class, rather than TK_union,
// TK_struct, and TK_class.
Qualifier = DRE->getQualifier();
}
}
if (getLangOpts().CPlusPlus && !Qualifier && EnumeratorsSeen.empty()) {
// If there are no prior enumerators in C++, check whether we have to
// qualify the names of the enumerators that we suggest, because they
// may not be visible in this scope.
Qualifier = getRequiredQualification(Context, CurContext, Enum);
}
// Add any enumerators that have not yet been mentioned.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Expression);
Results.EnterNewScope();
for (auto *E : Enum->enumerators()) {
if (EnumeratorsSeen.count(E))
continue;
CodeCompletionResult R(E, CCP_EnumInCase, Qualifier);
Results.AddResult(R, CurContext, nullptr, false);
}
Results.ExitScope();
//We need to make sure we're setting the right context,
//so only say we include macros if the code completer says we do
enum CodeCompletionContext::Kind kind = CodeCompletionContext::CCC_Other;
if (CodeCompleter->includeMacros()) {
AddMacroResults(PP, Results, false);
kind = CodeCompletionContext::CCC_OtherWithMacros;
}
HandleCodeCompleteResults(this, CodeCompleter,
kind,
Results.data(),Results.size());
}
static bool anyNullArguments(ArrayRef<Expr *> Args) {
if (Args.size() && !Args.data())
return true;
for (unsigned I = 0; I != Args.size(); ++I)
if (!Args[I])
return true;
return false;
}
typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate;
static void mergeCandidatesWithResults(Sema &SemaRef,
SmallVectorImpl<ResultCandidate> &Results,
OverloadCandidateSet &CandidateSet,
SourceLocation Loc) {
if (!CandidateSet.empty()) {
// Sort the overload candidate set by placing the best overloads first.
std::stable_sort(
CandidateSet.begin(), CandidateSet.end(),
[&](const OverloadCandidate &X, const OverloadCandidate &Y) {
return isBetterOverloadCandidate(SemaRef, X, Y, Loc);
});
// Add the remaining viable overload candidates as code-completion results.
for (auto &Candidate : CandidateSet)
if (Candidate.Viable)
Results.push_back(ResultCandidate(Candidate.Function));
}
}
/// \brief Get the type of the Nth parameter from a given set of overload
/// candidates.
static QualType getParamType(Sema &SemaRef,
ArrayRef<ResultCandidate> Candidates,
unsigned N) {
// Given the overloads 'Candidates' for a function call matching all arguments
// up to N, return the type of the Nth parameter if it is the same for all
// overload candidates.
QualType ParamType;
for (auto &Candidate : Candidates) {
if (auto FType = Candidate.getFunctionType())
if (auto Proto = dyn_cast<FunctionProtoType>(FType))
if (N < Proto->getNumParams()) {
if (ParamType.isNull())
ParamType = Proto->getParamType(N);
else if (!SemaRef.Context.hasSameUnqualifiedType(
ParamType.getNonReferenceType(),
Proto->getParamType(N).getNonReferenceType()))
// Otherwise return a default-constructed QualType.
return QualType();
}
}
return ParamType;
}
static void CodeCompleteOverloadResults(Sema &SemaRef, Scope *S,
MutableArrayRef<ResultCandidate> Candidates,
unsigned CurrentArg,
bool CompleteExpressionWithCurrentArg = true) {
QualType ParamType;
if (CompleteExpressionWithCurrentArg)
ParamType = getParamType(SemaRef, Candidates, CurrentArg);
if (ParamType.isNull())
SemaRef.CodeCompleteOrdinaryName(S, Sema::PCC_Expression);
else
SemaRef.CodeCompleteExpression(S, ParamType);
if (!Candidates.empty())
SemaRef.CodeCompleter->ProcessOverloadCandidates(SemaRef, CurrentArg,
Candidates.data(),
Candidates.size());
}
void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) {
if (!CodeCompleter)
return;
// When we're code-completing for a call, we fall back to ordinary
// name code-completion whenever we can't produce specific
// results. We may want to revisit this strategy in the future,
// e.g., by merging the two kinds of results.
// FIXME: Provide support for variadic template functions.
// Ignore type-dependent call expressions entirely.
if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args) ||
Expr::hasAnyTypeDependentArguments(Args)) {
CodeCompleteOrdinaryName(S, PCC_Expression);
return;
}
// Build an overload candidate set based on the functions we find.
SourceLocation Loc = Fn->getExprLoc();
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
SmallVector<ResultCandidate, 8> Results;
Expr *NakedFn = Fn->IgnoreParenCasts();
if (auto ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn))
AddOverloadedCallCandidates(ULE, Args, CandidateSet,
/*PartialOverloading=*/true);
else if (auto UME = dyn_cast<UnresolvedMemberExpr>(NakedFn)) {
TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
if (UME->hasExplicitTemplateArgs()) {
UME->copyTemplateArgumentsInto(TemplateArgsBuffer);
TemplateArgs = &TemplateArgsBuffer;
}
SmallVector<Expr *, 12> ArgExprs(1, UME->getBase());
ArgExprs.append(Args.begin(), Args.end());
UnresolvedSet<8> Decls;
Decls.append(UME->decls_begin(), UME->decls_end());
AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs,
/*SuppressUsedConversions=*/false,
/*PartialOverloading=*/true);
} else {
FunctionDecl *FD = nullptr;
if (auto MCE = dyn_cast<MemberExpr>(NakedFn))
FD = dyn_cast<FunctionDecl>(MCE->getMemberDecl());
else if (auto DRE = dyn_cast<DeclRefExpr>(NakedFn))
FD = dyn_cast<FunctionDecl>(DRE->getDecl());
if (FD) { // We check whether it's a resolved function declaration.
if (!getLangOpts().CPlusPlus ||
!FD->getType()->getAs<FunctionProtoType>())
Results.push_back(ResultCandidate(FD));
else
AddOverloadCandidate(FD, DeclAccessPair::make(FD, FD->getAccess()),
Args, CandidateSet,
/*SuppressUsedConversions=*/false,
/*PartialOverloading=*/true);
} else if (auto DC = NakedFn->getType()->getAsCXXRecordDecl()) {
// If expression's type is CXXRecordDecl, it may overload the function
// call operator, so we check if it does and add them as candidates.
// A complete type is needed to lookup for member function call operators.
if (!RequireCompleteType(Loc, NakedFn->getType(), 0)) {
DeclarationName OpName = Context.DeclarationNames
.getCXXOperatorName(OO_Call);
LookupResult R(*this, OpName, Loc, LookupOrdinaryName);
LookupQualifiedName(R, DC);
R.suppressDiagnostics();
SmallVector<Expr *, 12> ArgExprs(1, NakedFn);
ArgExprs.append(Args.begin(), Args.end());
AddFunctionCandidates(R.asUnresolvedSet(), ArgExprs, CandidateSet,
/*ExplicitArgs=*/nullptr,
/*SuppressUsedConversions=*/false,
/*PartialOverloading=*/true);
}
} else {
// Lastly we check whether expression's type is function pointer or
// function.
QualType T = NakedFn->getType();
if (!T->getPointeeType().isNull())
T = T->getPointeeType();
if (auto FP = T->getAs<FunctionProtoType>()) {
if (!TooManyArguments(FP->getNumParams(), Args.size(),
/*PartialOverloading=*/true) ||
FP->isVariadic())
Results.push_back(ResultCandidate(FP));
} else if (auto FT = T->getAs<FunctionType>())
// No prototype and declaration, it may be a K & R style function.
Results.push_back(ResultCandidate(FT));
}
}
mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
CodeCompleteOverloadResults(*this, S, Results, Args.size(),
!CandidateSet.empty());
}
void Sema::CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args) {
if (!CodeCompleter)
return;
// A complete type is needed to lookup for constructors.
if (RequireCompleteType(Loc, Type, 0))
return;
CXXRecordDecl *RD = Type->getAsCXXRecordDecl();
if (!RD) {
CodeCompleteExpression(S, Type);
return;
}
// FIXME: Provide support for member initializers.
// FIXME: Provide support for variadic template constructors.
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
for (auto C : LookupConstructors(RD)) {
if (auto FD = dyn_cast<FunctionDecl>(C)) {
AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()),
Args, CandidateSet,
/*SuppressUsedConversions=*/false,
/*PartialOverloading=*/true);
} else if (auto FTD = dyn_cast<FunctionTemplateDecl>(C)) {
AddTemplateOverloadCandidate(FTD,
DeclAccessPair::make(FTD, C->getAccess()),
/*ExplicitTemplateArgs=*/nullptr,
Args, CandidateSet,
/*SuppressUsedConversions=*/false,
/*PartialOverloading=*/true);
}
}
SmallVector<ResultCandidate, 8> Results;
mergeCandidatesWithResults(*this, Results, CandidateSet, Loc);
CodeCompleteOverloadResults(*this, S, Results, Args.size());
}
void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D);
if (!VD) {
CodeCompleteOrdinaryName(S, PCC_Expression);
return;
}
CodeCompleteExpression(S, VD->getType());
}
void Sema::CodeCompleteReturn(Scope *S) {
QualType ResultType;
if (isa<BlockDecl>(CurContext)) {
if (BlockScopeInfo *BSI = getCurBlock())
ResultType = BSI->ReturnType;
} else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext))
ResultType = Function->getReturnType();
else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext))
ResultType = Method->getReturnType();
if (ResultType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
else
CodeCompleteExpression(S, ResultType);
}
void Sema::CodeCompleteAfterIf(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
mapCodeCompletionContext(*this, PCC_Statement));
Results.setFilter(&ResultBuilder::IsOrdinaryName);
Results.EnterNewScope();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
AddOrdinaryNameResults(PCC_Statement, S, *this, Results);
// "else" block
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("else");
if (Results.includeCodePatterns()) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
}
Results.AddResult(Builder.TakeString());
// "else if" block
Builder.AddTypedTextChunk("else");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("if");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (getLangOpts().CPlusPlus)
Builder.AddPlaceholderChunk("condition");
else
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
if (Results.includeCodePatterns()) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
}
Results.AddResult(Builder.TakeString());
Results.ExitScope();
if (S->getFnParent())
AddPrettyFunctionResults(PP.getLangOpts(), Results);
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteAssignmentRHS(Scope *S, Expr *LHS) {
if (LHS)
CodeCompleteExpression(S, static_cast<Expr *>(LHS)->getType());
else
CodeCompleteOrdinaryName(S, PCC_Expression);
}
void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext) {
if (!SS.getScopeRep() || !CodeCompleter)
return;
DeclContext *Ctx = computeDeclContext(SS, EnteringContext);
if (!Ctx)
return;
// Try to instantiate any non-dependent declaration contexts before
// we look in them.
if (!isDependentScopeSpecifier(SS) && RequireCompleteDeclContext(SS, Ctx))
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Name);
Results.EnterNewScope();
// The "template" keyword can follow "::" in the grammar, but only
// put it into the grammar if the nested-name-specifier is dependent.
NestedNameSpecifier *NNS = SS.getScopeRep();
if (!Results.empty() && NNS->isDependent())
Results.AddResult("template");
// Add calls to overridden virtual functions, if there are any.
//
// FIXME: This isn't wonderful, because we don't know whether we're actually
// in a context that permits expressions. This is a general issue with
// qualified-id completions.
if (!EnteringContext)
MaybeAddOverrideCalls(*this, Ctx, Results);
Results.ExitScope();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer);
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteUsing(Scope *S) {
if (!CodeCompleter)
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PotentiallyQualifiedName,
&ResultBuilder::IsNestedNameSpecifier);
Results.EnterNewScope();
// If we aren't in class scope, we could see the "namespace" keyword.
if (!S->isClassScope())
Results.AddResult(CodeCompletionResult("namespace"));
// After "using", we can see anything that would start a
// nested-name-specifier.
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_PotentiallyQualifiedName,
Results.data(),Results.size());
}
void Sema::CodeCompleteUsingDirective(Scope *S) {
if (!CodeCompleter)
return;
// After "using namespace", we expect to see a namespace name or namespace
// alias.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Namespace,
&ResultBuilder::IsNamespaceOrAlias);
Results.EnterNewScope();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Namespace,
Results.data(),Results.size());
}
void Sema::CodeCompleteNamespaceDecl(Scope *S) {
if (!CodeCompleter)
return;
DeclContext *Ctx = S->getEntity();
if (!S->getParent())
Ctx = Context.getTranslationUnitDecl();
bool SuppressedGlobalResults
= Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
SuppressedGlobalResults
? CodeCompletionContext::CCC_Namespace
: CodeCompletionContext::CCC_Other,
&ResultBuilder::IsNamespace);
if (Ctx && Ctx->isFileContext() && !SuppressedGlobalResults) {
// We only want to see those namespaces that have already been defined
// within this scope, because its likely that the user is creating an
// extended namespace declaration. Keep track of the most recent
// definition of each namespace.
std::map<NamespaceDecl *, NamespaceDecl *> OrigToLatest;
for (DeclContext::specific_decl_iterator<NamespaceDecl>
NS(Ctx->decls_begin()), NSEnd(Ctx->decls_end());
NS != NSEnd; ++NS)
OrigToLatest[NS->getOriginalNamespace()] = *NS;
// Add the most recent definition (or extended definition) of each
// namespace to the list of results.
Results.EnterNewScope();
for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
NS = OrigToLatest.begin(),
NSEnd = OrigToLatest.end();
NS != NSEnd; ++NS)
Results.AddResult(CodeCompletionResult(
NS->second, Results.getBasePriority(NS->second),
nullptr),
CurContext, nullptr, false);
Results.ExitScope();
}
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
if (!CodeCompleter)
return;
// After "namespace", we expect to see a namespace or alias.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Namespace,
&ResultBuilder::IsNamespaceOrAlias);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteOperatorName(Scope *S) {
if (!CodeCompleter)
return;
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type,
&ResultBuilder::IsType);
Results.EnterNewScope();
// Add the names of overloadable operators.
#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
if (std::strcmp(Spelling, "?")) \
Results.AddResult(Result(Spelling));
#include "clang/Basic/OperatorKinds.def"
// Add any type names visible from the current scope
Results.allowNestedNameSpecifiers();
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
// Add any type specifiers
AddTypeSpecifierResults(getLangOpts(), Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Type,
Results.data(),Results.size());
}
void Sema::CodeCompleteConstructorInitializer(
Decl *ConstructorD,
ArrayRef <CXXCtorInitializer *> Initializers) {
if (!ConstructorD)
return;
AdjustDeclIfTemplate(ConstructorD);
CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD);
if (!Constructor)
return;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PotentiallyQualifiedName);
Results.EnterNewScope();
// Fill in any already-initialized fields or base classes.
llvm::SmallPtrSet<FieldDecl *, 4> InitializedFields;
llvm::SmallPtrSet<CanQualType, 4> InitializedBases;
for (unsigned I = 0, E = Initializers.size(); I != E; ++I) {
if (Initializers[I]->isBaseInitializer())
InitializedBases.insert(
Context.getCanonicalType(QualType(Initializers[I]->getBaseClass(), 0)));
else
InitializedFields.insert(cast<FieldDecl>(
Initializers[I]->getAnyMember()));
}
// Add completions for base classes.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
bool SawLastInitializer = Initializers.empty();
CXXRecordDecl *ClassDecl = Constructor->getParent();
for (const auto &Base : ClassDecl->bases()) {
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
SawLastInitializer
= !Initializers.empty() &&
Initializers.back()->isBaseInitializer() &&
Context.hasSameUnqualifiedType(Base.getType(),
QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
Builder.AddTypedTextChunk(
Results.getAllocator().CopyString(
Base.getType().getAsString(Policy)));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration));
SawLastInitializer = false;
}
// Add completions for virtual base classes.
for (const auto &Base : ClassDecl->vbases()) {
if (!InitializedBases.insert(Context.getCanonicalType(Base.getType()))
.second) {
SawLastInitializer
= !Initializers.empty() &&
Initializers.back()->isBaseInitializer() &&
Context.hasSameUnqualifiedType(Base.getType(),
QualType(Initializers.back()->getBaseClass(), 0));
continue;
}
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(
Base.getType().getAsString(Policy)));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration));
SawLastInitializer = false;
}
// Add completions for members.
for (auto *Field : ClassDecl->fields()) {
if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))
.second) {
SawLastInitializer
= !Initializers.empty() &&
Initializers.back()->isAnyMemberInitializer() &&
Initializers.back()->getAnyMember() == Field;
continue;
}
if (!Field->getDeclName())
continue;
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Field->getIdentifier()->getName()));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(CodeCompletionResult(Builder.TakeString(),
SawLastInitializer? CCP_NextInitializer
: CCP_MemberDeclaration,
CXCursor_MemberRef,
CXAvailability_Available,
Field));
SawLastInitializer = false;
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
/// \brief Determine whether this scope denotes a namespace.
static bool isNamespaceScope(Scope *S) {
DeclContext *DC = S->getEntity();
if (!DC)
return false;
return DC->isFileContext();
}
void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
// Note what has already been captured.
llvm::SmallPtrSet<IdentifierInfo *, 4> Known;
bool IncludedThis = false;
for (const auto &C : Intro.Captures) {
if (C.Kind == LCK_This) {
IncludedThis = true;
continue;
}
Known.insert(C.Id);
}
// Look for other capturable variables.
for (; S && !isNamespaceScope(S); S = S->getParent()) {
for (const auto *D : S->decls()) {
const auto *Var = dyn_cast<VarDecl>(D);
if (!Var ||
!Var->hasLocalStorage() ||
Var->hasAttr<BlocksAttr>())
continue;
if (Known.insert(Var->getIdentifier()).second)
Results.AddResult(CodeCompletionResult(Var, CCP_LocalDeclaration),
CurContext, nullptr, false);
}
}
// Add 'this', if it would be valid.
if (!IncludedThis && !AfterAmpersand && Intro.Default != LCD_ByCopy)
addThisCompletion(*this, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
/// Macro that optionally prepends an "@" to the string literal passed in via
/// Keyword, depending on whether NeedAt is true or false.
#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) ((NeedAt)? "@" Keyword : Keyword)
static void AddObjCImplementationResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
typedef CodeCompletionResult Result;
// Since we have an implementation, we can end it.
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (LangOpts.ObjC2) {
// @dynamic
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"dynamic"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
// @synthesize
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synthesize"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("property");
Results.AddResult(Result(Builder.TakeString()));
}
}
static void AddObjCInterfaceResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
typedef CodeCompletionResult Result;
// Since we have an interface or protocol, we can end it.
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"end")));
if (LangOpts.ObjC2) {
// @property
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"property")));
// @required
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"required")));
// @optional
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"optional")));
}
}
static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
// @class name ;
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"class"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("name");
Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// @interface name
// FIXME: Could introduce the whole pattern, including superclasses and
// such.
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"interface"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
// @protocol name
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("protocol");
Results.AddResult(Result(Builder.TakeString()));
// @implementation name
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"implementation"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
}
// @compatibility_alias name
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"compatibility_alias"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("alias");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("class");
Results.AddResult(Result(Builder.TakeString()));
if (Results.getSema().getLangOpts().Modules) {
// @import name
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "import"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("module");
Results.AddResult(Result(Builder.TakeString()));
}
}
void Sema::CodeCompleteObjCAtDirective(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (isa<ObjCImplDecl>(CurContext))
AddObjCImplementationResults(getLangOpts(), Results, false);
else if (CurContext->isObjCContainer())
AddObjCInterfaceResults(getLangOpts(), Results, false);
else
AddObjCTopLevelResults(Results, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
// @encode ( type-name )
const char *EncodeType = "char[]";
if (Results.getSema().getLangOpts().CPlusPlus ||
Results.getSema().getLangOpts().ConstStrings)
EncodeType = "const char[]";
Builder.AddResultTypeChunk(EncodeType);
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"encode"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("type-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// @protocol ( protocol-name )
Builder.AddResultTypeChunk("Protocol *");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"protocol"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("protocol-name");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// @selector ( selector )
Builder.AddResultTypeChunk("SEL");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"selector"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("selector");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
// @"string"
Builder.AddResultTypeChunk("NSString *");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"\""));
Builder.AddPlaceholderChunk("string");
Builder.AddTextChunk("\"");
Results.AddResult(Result(Builder.TakeString()));
// @[objects, ...]
Builder.AddResultTypeChunk("NSArray *");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"["));
Builder.AddPlaceholderChunk("objects, ...");
Builder.AddChunk(CodeCompletionString::CK_RightBracket);
Results.AddResult(Result(Builder.TakeString()));
// @{key : object, ...}
Builder.AddResultTypeChunk("NSDictionary *");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"{"));
Builder.AddPlaceholderChunk("key");
Builder.AddChunk(CodeCompletionString::CK_Colon);
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("object, ...");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
// @(expression)
Builder.AddResultTypeChunk("id");
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt, "("));
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Result(Builder.TakeString()));
}
static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
typedef CodeCompletionResult Result;
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (Results.includeCodePatterns()) {
// @try { statements } @catch ( declaration ) { statements } @finally
// { statements }
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"try"));
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Builder.AddTextChunk("@catch");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("parameter");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Builder.AddTextChunk("@finally");
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
// @throw
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"throw"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Results.AddResult(Result(Builder.TakeString()));
if (Results.includeCodePatterns()) {
// @synchronized ( expression ) { statements }
Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"synchronized"));
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
Results.AddResult(Result(Builder.TakeString()));
}
}
static void AddObjCVisibilityResults(const LangOptions &LangOpts,
ResultBuilder &Results,
bool NeedAt) {
typedef CodeCompletionResult Result;
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"private")));
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"protected")));
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"public")));
if (LangOpts.ObjC2)
Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,"package")));
}
void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCVisibilityResults(getLangOpts(), Results, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCAtStatement(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCStatementResults(Results, false);
AddObjCExpressionResults(Results, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCAtExpression(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
AddObjCExpressionResults(Results, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
/// \brief Determine whether the addition of the given flag to an Objective-C
/// property's attributes will cause a conflict.
static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
// Check if we've already added this flag.
if (Attributes & NewFlag)
return true;
Attributes |= NewFlag;
// Check for collisions with "readonly".
if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
(Attributes & ObjCDeclSpec::DQ_PR_readwrite))
return true;
// Check for more than one of { assign, copy, retain, strong, weak }.
unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
ObjCDeclSpec::DQ_PR_unsafe_unretained |
ObjCDeclSpec::DQ_PR_copy |
ObjCDeclSpec::DQ_PR_retain |
ObjCDeclSpec::DQ_PR_strong |
ObjCDeclSpec::DQ_PR_weak);
if (AssignCopyRetMask &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong &&
AssignCopyRetMask != ObjCDeclSpec::DQ_PR_weak)
return true;
return false;
}
void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
if (!CodeCompleter)
return;
unsigned Attributes = ODS.getPropertyAttributes();
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readonly))
Results.AddResult(CodeCompletionResult("readonly"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_assign))
Results.AddResult(CodeCompletionResult("assign"));
if (!ObjCPropertyFlagConflicts(Attributes,
ObjCDeclSpec::DQ_PR_unsafe_unretained))
Results.AddResult(CodeCompletionResult("unsafe_unretained"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readwrite))
Results.AddResult(CodeCompletionResult("readwrite"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_retain))
Results.AddResult(CodeCompletionResult("retain"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_strong))
Results.AddResult(CodeCompletionResult("strong"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_copy))
Results.AddResult(CodeCompletionResult("copy"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nonatomic))
Results.AddResult(CodeCompletionResult("nonatomic"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic))
Results.AddResult(CodeCompletionResult("atomic"));
// Only suggest "weak" if we're compiling for ARC-with-weak-references or GC.
if (getLangOpts().ObjCARCWeak || getLangOpts().getGC() != LangOptions::NonGC)
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_weak))
Results.AddResult(CodeCompletionResult("weak"));
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
CodeCompletionBuilder Setter(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Setter.AddTypedTextChunk("setter");
Setter.AddTextChunk("=");
Setter.AddPlaceholderChunk("method");
Results.AddResult(CodeCompletionResult(Setter.TakeString()));
}
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_getter)) {
CodeCompletionBuilder Getter(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Getter.AddTypedTextChunk("getter");
Getter.AddTextChunk("=");
Getter.AddPlaceholderChunk("method");
Results.AddResult(CodeCompletionResult(Getter.TakeString()));
}
if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nullability)) {
Results.AddResult(CodeCompletionResult("nonnull"));
Results.AddResult(CodeCompletionResult("nullable"));
Results.AddResult(CodeCompletionResult("null_unspecified"));
Results.AddResult(CodeCompletionResult("null_resettable"));
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
/// \brief Describes the kind of Objective-C method that we want to find
/// via code completion.
enum ObjCMethodKind {
MK_Any, ///< Any kind of method, provided it means other specified criteria.
MK_ZeroArgSelector, ///< Zero-argument (unary) selector.
MK_OneArgSelector ///< One-argument selector.
};
static bool isAcceptableObjCSelector(Selector Sel,
ObjCMethodKind WantKind,
ArrayRef<IdentifierInfo *> SelIdents,
bool AllowSameLength = true) {
unsigned NumSelIdents = SelIdents.size();
if (NumSelIdents > Sel.getNumArgs())
return false;
switch (WantKind) {
case MK_Any: break;
case MK_ZeroArgSelector: return Sel.isUnarySelector();
case MK_OneArgSelector: return Sel.getNumArgs() == 1;
}
if (!AllowSameLength && NumSelIdents && NumSelIdents == Sel.getNumArgs())
return false;
for (unsigned I = 0; I != NumSelIdents; ++I)
if (SelIdents[I] != Sel.getIdentifierInfoForSlot(I))
return false;
return true;
}
static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
ObjCMethodKind WantKind,
ArrayRef<IdentifierInfo *> SelIdents,
bool AllowSameLength = true) {
return isAcceptableObjCSelector(Method->getSelector(), WantKind, SelIdents,
AllowSameLength);
}
namespace {
/// \brief A set of selectors, which is used to avoid introducing multiple
/// completions with the same selector into the result set.
typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
}
/// \brief Add all of the Objective-C methods in the given Objective-C
/// container to the set of results.
///
/// The container will be a class, protocol, category, or implementation of
/// any of the above. This mether will recurse to include methods from
/// the superclasses of classes along with their categories, protocols, and
/// implementations.
///
/// \param Container the container in which we'll look to find methods.
///
/// \param WantInstanceMethods Whether to add instance methods (only); if
/// false, this routine will add factory methods (only).
///
/// \param CurContext the context in which we're performing the lookup that
/// finds methods.
///
/// \param AllowSameLength Whether we allow a method to be added to the list
/// when it has the same number of parameters as we have selector identifiers.
///
/// \param Results the structure into which we'll add results.
static void AddObjCMethods(ObjCContainerDecl *Container,
bool WantInstanceMethods,
ObjCMethodKind WantKind,
ArrayRef<IdentifierInfo *> SelIdents,
DeclContext *CurContext,
VisitedSelectorSet &Selectors,
bool AllowSameLength,
ResultBuilder &Results,
bool InOriginalClass = true) {
typedef CodeCompletionResult Result;
Container = getContainerDef(Container);
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
bool isRootClass = IFace && !IFace->getSuperClass();
for (auto *M : Container->methods()) {
// The instance methods on the root class can be messaged via the
// metaclass.
if (M->isInstanceMethod() == WantInstanceMethods ||
(isRootClass && !WantInstanceMethods)) {
// Check whether the selector identifiers we've been given are a
// subset of the identifiers for this particular method.
if (!isAcceptableObjCMethod(M, WantKind, SelIdents, AllowSameLength))
continue;
if (!Selectors.insert(M->getSelector()).second)
continue;
Result R = Result(M, Results.getBasePriority(M), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = (WantKind != MK_Any);
if (!InOriginalClass)
R.Priority += CCD_InBaseClass;
Results.MaybeAddResult(R, CurContext);
}
}
// Visit the protocols of protocols.
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
if (Protocol->hasDefinition()) {
const ObjCList<ObjCProtocolDecl> &Protocols
= Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength, Results, false);
}
}
if (!IFace || !IFace->hasDefinition())
return;
// Add methods in protocols.
for (auto *I : IFace->protocols())
AddObjCMethods(I, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength, Results, false);
// Add methods in categories.
for (auto *CatDecl : IFace->known_categories()) {
AddObjCMethods(CatDecl, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength,
Results, InOriginalClass);
// Add a categories protocol methods.
const ObjCList<ObjCProtocolDecl> &Protocols
= CatDecl->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength,
Results, false);
// Add methods in category implementations.
if (ObjCCategoryImplDecl *Impl = CatDecl->getImplementation())
AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength,
Results, InOriginalClass);
}
// Add methods in superclass.
if (IFace->getSuperClass())
AddObjCMethods(IFace->getSuperClass(), WantInstanceMethods, WantKind,
SelIdents, CurContext, Selectors,
AllowSameLength, Results, false);
// Add methods in our implementation, if any.
if (ObjCImplementationDecl *Impl = IFace->getImplementation())
AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
CurContext, Selectors, AllowSameLength,
Results, InOriginalClass);
}
void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
// Try to find the interface where getters might live.
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
if (!Class) {
if (ObjCCategoryDecl *Category
= dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
Class = Category->getClassInterface();
if (!Class)
return;
}
// Find all of the potential getters.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
VisitedSelectorSet Selectors;
AddObjCMethods(Class, true, MK_ZeroArgSelector, None, CurContext, Selectors,
/*AllowSameLength=*/true, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
// Try to find the interface where setters might live.
ObjCInterfaceDecl *Class
= dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
if (!Class) {
if (ObjCCategoryDecl *Category
= dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
Class = Category->getClassInterface();
if (!Class)
return;
}
// Find all of the potential getters.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
VisitedSelectorSet Selectors;
AddObjCMethods(Class, true, MK_OneArgSelector, None, CurContext,
Selectors, /*AllowSameLength=*/true, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Type);
Results.EnterNewScope();
// Add context-sensitive, Objective-C parameter-passing keywords.
bool AddedInOut = false;
if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_In | ObjCDeclSpec::DQ_Inout)) == 0) {
Results.AddResult("in");
Results.AddResult("inout");
AddedInOut = true;
}
if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_Out | ObjCDeclSpec::DQ_Inout)) == 0) {
Results.AddResult("out");
if (!AddedInOut)
Results.AddResult("inout");
}
if ((DS.getObjCDeclQualifier() &
(ObjCDeclSpec::DQ_Bycopy | ObjCDeclSpec::DQ_Byref |
ObjCDeclSpec::DQ_Oneway)) == 0) {
Results.AddResult("bycopy");
Results.AddResult("byref");
Results.AddResult("oneway");
}
if ((DS.getObjCDeclQualifier() & ObjCDeclSpec::DQ_CSNullability) == 0) {
Results.AddResult("nonnull");
Results.AddResult("nullable");
Results.AddResult("null_unspecified");
}
// If we're completing the return type of an Objective-C method and the
// identifier IBAction refers to a macro, provide a completion item for
// an action, e.g.,
// IBAction)<#selector#>:(id)sender
if (DS.getObjCDeclQualifier() == 0 && !IsParameter &&
PP.isMacroDefined("IBAction")) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo(),
CCP_CodePattern, CXAvailability_Available);
Builder.AddTypedTextChunk("IBAction");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddPlaceholderChunk("selector");
Builder.AddChunk(CodeCompletionString::CK_Colon);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("id");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("sender");
Results.AddResult(CodeCompletionResult(Builder.TakeString()));
}
// If we're completing the return type, provide 'instancetype'.
if (!IsParameter) {
Results.AddResult(CodeCompletionResult("instancetype"));
}
// Add various builtin type names and specifiers.
AddOrdinaryNameResults(PCC_Type, S, *this, Results);
Results.ExitScope();
// Add the various type names
Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Type,
Results.data(), Results.size());
}
/// \brief When we have an expression with type "id", we may assume
/// that it has some more-specific class type based on knowledge of
/// common uses of Objective-C. This routine returns that class type,
/// or NULL if no better result could be determined.
static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
ObjCMessageExpr *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
if (!Msg)
return nullptr;
Selector Sel = Msg->getSelector();
if (Sel.isNull())
return nullptr;
IdentifierInfo *Id = Sel.getIdentifierInfoForSlot(0);
if (!Id)
return nullptr;
ObjCMethodDecl *Method = Msg->getMethodDecl();
if (!Method)
return nullptr;
// Determine the class that we're sending the message to.
ObjCInterfaceDecl *IFace = nullptr;
switch (Msg->getReceiverKind()) {
case ObjCMessageExpr::Class:
if (const ObjCObjectType *ObjType
= Msg->getClassReceiver()->getAs<ObjCObjectType>())
IFace = ObjType->getInterface();
break;
case ObjCMessageExpr::Instance: {
QualType T = Msg->getInstanceReceiver()->getType();
if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
IFace = Ptr->getInterfaceDecl();
break;
}
case ObjCMessageExpr::SuperInstance:
case ObjCMessageExpr::SuperClass:
break;
}
if (!IFace)
return nullptr;
ObjCInterfaceDecl *Super = IFace->getSuperClass();
if (Method->isInstanceMethod())
return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
.Case("retain", IFace)
.Case("strong", IFace)
.Case("autorelease", IFace)
.Case("copy", IFace)
.Case("copyWithZone", IFace)
.Case("mutableCopy", IFace)
.Case("mutableCopyWithZone", IFace)
.Case("awakeFromCoder", IFace)
.Case("replacementObjectFromCoder", IFace)
.Case("class", IFace)
.Case("classForCoder", IFace)
.Case("superclass", Super)
.Default(nullptr);
return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
.Case("new", IFace)
.Case("alloc", IFace)
.Case("allocWithZone", IFace)
.Case("class", IFace)
.Case("superclass", Super)
.Default(nullptr);
}
// Add a special completion for a message send to "super", which fills in the
// most likely case of forwarding all of our arguments to the superclass
// function.
///
/// \param S The semantic analysis object.
///
/// \param NeedSuperKeyword Whether we need to prefix this completion with
/// the "super" keyword. Otherwise, we just need to provide the arguments.
///
/// \param SelIdents The identifiers in the selector that have already been
/// provided as arguments for a send to "super".
///
/// \param Results The set of results to augment.
///
/// \returns the Objective-C method declaration that would be invoked by
/// this "super" completion. If NULL, no completion was added.
static ObjCMethodDecl *AddSuperSendCompletion(
Sema &S, bool NeedSuperKeyword,
ArrayRef<IdentifierInfo *> SelIdents,
ResultBuilder &Results) {
ObjCMethodDecl *CurMethod = S.getCurMethodDecl();
if (!CurMethod)
return nullptr;
ObjCInterfaceDecl *Class = CurMethod->getClassInterface();
if (!Class)
return nullptr;
// Try to find a superclass method with the same selector.
ObjCMethodDecl *SuperMethod = nullptr;
while ((Class = Class->getSuperClass()) && !SuperMethod) {
// Check in the class
SuperMethod = Class->getMethod(CurMethod->getSelector(),
CurMethod->isInstanceMethod());
// Check in categories or class extensions.
if (!SuperMethod) {
for (const auto *Cat : Class->known_categories()) {
if ((SuperMethod = Cat->getMethod(CurMethod->getSelector(),
CurMethod->isInstanceMethod())))
break;
}
}
}
if (!SuperMethod)
return nullptr;
// Check whether the superclass method has the same signature.
if (CurMethod->param_size() != SuperMethod->param_size() ||
CurMethod->isVariadic() != SuperMethod->isVariadic())
return nullptr;
for (ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin(),
CurPEnd = CurMethod->param_end(),
SuperP = SuperMethod->param_begin();
CurP != CurPEnd; ++CurP, ++SuperP) {
// Make sure the parameter types are compatible.
if (!S.Context.hasSameUnqualifiedType((*CurP)->getType(),
(*SuperP)->getType()))
return nullptr;
// Make sure we have a parameter name to forward!
if (!(*CurP)->getIdentifier())
return nullptr;
}
// We have a superclass method. Now, form the send-to-super completion.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
// Give this completion a return type.
AddResultTypeChunk(S.Context, getCompletionPrintingPolicy(S), SuperMethod,
Results.getCompletionContext().getBaseType(),
Builder);
// If we need the "super" keyword, add it (plus some spacing).
if (NeedSuperKeyword) {
Builder.AddTypedTextChunk("super");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
}
Selector Sel = CurMethod->getSelector();
if (Sel.isUnarySelector()) {
if (NeedSuperKeyword)
Builder.AddTextChunk(Builder.getAllocator().CopyString(
Sel.getNameForSlot(0)));
else
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Sel.getNameForSlot(0)));
} else {
ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin();
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I, ++CurP) {
if (I > SelIdents.size())
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
if (I < SelIdents.size())
Builder.AddInformativeChunk(
Builder.getAllocator().CopyString(
Sel.getNameForSlot(I) + ":"));
else if (NeedSuperKeyword || I > SelIdents.size()) {
Builder.AddTextChunk(
Builder.getAllocator().CopyString(
Sel.getNameForSlot(I) + ":"));
Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
(*CurP)->getIdentifier()->getName()));
} else {
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(
Sel.getNameForSlot(I) + ":"));
Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
(*CurP)->getIdentifier()->getName()));
}
}
}
Results.AddResult(CodeCompletionResult(Builder.TakeString(), SuperMethod,
CCP_SuperCompletion));
return SuperMethod;
}
void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCMessageReceiver,
getLangOpts().CPlusPlus11
? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
: &ResultBuilder::IsObjCMessageReceiver);
CodeCompletionDeclConsumer Consumer(Results, CurContext);
Results.EnterNewScope();
LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
CodeCompleter->includeGlobals());
// If we are in an Objective-C method inside a class that has a superclass,
// add "super" as an option.
if (ObjCMethodDecl *Method = getCurMethodDecl())
if (ObjCInterfaceDecl *Iface = Method->getClassInterface())
if (Iface->getSuperClass()) {
Results.AddResult(Result("super"));
AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, None, Results);
}
if (getLangOpts().CPlusPlus11)
addThisCompletion(*this, Results);
Results.ExitScope();
if (CodeCompleter->includeMacros())
AddMacroResults(PP, Results, false);
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression) {
ObjCInterfaceDecl *CDecl = nullptr;
if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
// Figure out which interface we're in.
CDecl = CurMethod->getClassInterface();
if (!CDecl)
return;
// Find the superclass of this class.
CDecl = CDecl->getSuperClass();
if (!CDecl)
return;
if (CurMethod->isInstanceMethod()) {
// We are inside an instance method, which means that the message
// send [super ...] is actually calling an instance method on the
// current object.
return CodeCompleteObjCInstanceMessage(S, nullptr, SelIdents,
AtArgumentExpression,
CDecl);
}
// Fall through to send to the superclass in CDecl.
} else {
// "super" may be the name of a type or variable. Figure out which
// it is.
IdentifierInfo *Super = getSuperIdentifier();
NamedDecl *ND = LookupSingleName(S, Super, SuperLoc,
LookupOrdinaryName);
if ((CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(ND))) {
// "super" names an interface. Use it.
} else if (TypeDecl *TD = dyn_cast_or_null<TypeDecl>(ND)) {
if (const ObjCObjectType *Iface
= Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
CDecl = Iface->getInterface();
} else if (ND && isa<UnresolvedUsingTypenameDecl>(ND)) {
// "super" names an unresolved type; we can't be more specific.
} else {
// Assume that "super" names some kind of value and parse that way.
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId id;
id.setIdentifier(Super, SuperLoc);
ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
false, false);
return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
SelIdents,
AtArgumentExpression);
}
// Fall through
}
ParsedType Receiver;
if (CDecl)
Receiver = ParsedType::make(Context.getObjCInterfaceType(CDecl));
return CodeCompleteObjCClassMessage(S, Receiver, SelIdents,
AtArgumentExpression,
/*IsSuper=*/true);
}
/// \brief Given a set of code-completion results for the argument of a message
/// send, determine the preferred type (if any) for that argument expression.
static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
unsigned NumSelIdents) {
typedef CodeCompletionResult Result;
ASTContext &Context = Results.getSema().Context;
QualType PreferredType;
unsigned BestPriority = CCP_Unlikely * 2;
Result *ResultsData = Results.data();
for (unsigned I = 0, N = Results.size(); I != N; ++I) {
Result &R = ResultsData[I];
if (R.Kind == Result::RK_Declaration &&
isa<ObjCMethodDecl>(R.Declaration)) {
if (R.Priority <= BestPriority) {
const ObjCMethodDecl *Method = cast<ObjCMethodDecl>(R.Declaration);
if (NumSelIdents <= Method->param_size()) {
QualType MyPreferredType = Method->parameters()[NumSelIdents - 1]
->getType();
if (R.Priority < BestPriority || PreferredType.isNull()) {
BestPriority = R.Priority;
PreferredType = MyPreferredType;
} else if (!Context.hasSameUnqualifiedType(PreferredType,
MyPreferredType)) {
PreferredType = QualType();
}
}
}
}
}
return PreferredType;
}
static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
ObjCInterfaceDecl *CDecl = nullptr;
// If the given name refers to an interface type, retrieve the
// corresponding declaration.
if (Receiver) {
QualType T = SemaRef.GetTypeFromParser(Receiver, nullptr);
if (!T.isNull())
if (const ObjCObjectType *Interface = T->getAs<ObjCObjectType>())
CDecl = Interface->getInterface();
}
// Add all of the factory methods in this Objective-C class, its protocols,
// superclasses, categories, implementation, etc.
Results.EnterNewScope();
// If this is a send-to-super, try to add the special "super" send
// completion.
if (IsSuper) {
if (ObjCMethodDecl *SuperMethod
= AddSuperSendCompletion(SemaRef, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
// If we're inside an Objective-C method definition, prefer its selector to
// others.
if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl())
Results.setPreferredSelector(CurMethod->getSelector());
VisitedSelectorSet Selectors;
if (CDecl)
AddObjCMethods(CDecl, false, MK_Any, SelIdents,
SemaRef.CurContext, Selectors, AtArgumentExpression,
Results);
else {
// We're messaging "id" as a type; provide all class/factory methods.
// If we have an external source, load the entire class method
// pool from the AST file.
if (SemaRef.getExternalSource()) {
for (uint32_t I = 0,
N = SemaRef.getExternalSource()->GetNumExternalSelectors();
I != N; ++I) {
Selector Sel = SemaRef.getExternalSource()->GetExternalSelector(I);
if (Sel.isNull() || SemaRef.MethodPool.count(Sel))
continue;
SemaRef.ReadMethodPool(Sel);
}
}
for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
MEnd = SemaRef.MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.second;
MethList && MethList->getMethod();
MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
Result R(MethList->getMethod(),
Results.getBasePriority(MethList->getMethod()), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, SemaRef.CurContext);
}
}
}
Results.ExitScope();
}
void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper) {
QualType T = this->GetTypeFromParser(Receiver);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage,
T, SelIdents));
AddClassMessageCompletions(*this, S, Receiver, SelIdents,
AtArgumentExpression, IsSuper, Results);
// If we're actually at the argument expression (rather than prior to the
// selector), we're actually performing code completion for an expression.
// Determine whether we have a single, best method. If so, we can
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
else
CodeCompleteExpression(S, PreferredType);
return;
}
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(), Results.size());
}
void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super) {
typedef CodeCompletionResult Result;
Expr *RecExpr = static_cast<Expr *>(Receiver);
// If necessary, apply function/array conversion to the receiver.
// C99 6.7.5.3p[7,8].
if (RecExpr) {
ExprResult Conv = DefaultFunctionArrayLvalueConversion(RecExpr);
if (Conv.isInvalid()) // conversion failed. bail.
return;
RecExpr = Conv.get();
}
QualType ReceiverType = RecExpr? RecExpr->getType()
: Super? Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(Super))
: Context.getObjCIdType();
// If we're messaging an expression with type "id" or "Class", check
// whether we know something special about the receiver that allows
// us to assume a more-specific receiver type.
if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType()) {
if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
if (ReceiverType->isObjCClassType())
return CodeCompleteObjCClassMessage(S,
ParsedType::make(Context.getObjCInterfaceType(IFace)),
SelIdents,
AtArgumentExpression, Super);
ReceiverType = Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(IFace));
}
} else if (RecExpr && getLangOpts().CPlusPlus) {
ExprResult Conv = PerformContextuallyConvertToObjCPointer(RecExpr);
if (Conv.isUsable()) {
RecExpr = Conv.get();
ReceiverType = RecExpr->getType();
}
}
// Build the set of methods we can see.
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
ReceiverType, SelIdents));
Results.EnterNewScope();
// If this is a send-to-super, try to add the special "super" send
// completion.
if (Super) {
if (ObjCMethodDecl *SuperMethod
= AddSuperSendCompletion(*this, false, SelIdents, Results))
Results.Ignore(SuperMethod);
}
// If we're inside an Objective-C method definition, prefer its selector to
// others.
if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
Results.setPreferredSelector(CurMethod->getSelector());
// Keep track of the selectors we've already added.
VisitedSelectorSet Selectors;
// Handle messages to Class. This really isn't a message to an instance
// method, so we treat it the same way we would treat a message send to a
// class method.
if (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType()) {
if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
AddObjCMethods(ClassDecl, false, MK_Any, SelIdents,
CurContext, Selectors, AtArgumentExpression, Results);
}
}
// Handle messages to a qualified ID ("id<foo>").
else if (const ObjCObjectPointerType *QualID
= ReceiverType->getAsObjCQualifiedIdType()) {
// Search protocols for instance methods.
for (auto *I : QualID->quals())
AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
Selectors, AtArgumentExpression, Results);
}
// Handle messages to a pointer to interface type.
else if (const ObjCObjectPointerType *IFacePtr
= ReceiverType->getAsObjCInterfacePointerType()) {
// Search the class, its superclasses, etc., for instance methods.
AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
CurContext, Selectors, AtArgumentExpression,
Results);
// Search protocols for instance methods.
for (auto *I : IFacePtr->quals())
AddObjCMethods(I, true, MK_Any, SelIdents, CurContext,
Selectors, AtArgumentExpression, Results);
}
// Handle messages to "id".
else if (ReceiverType->isObjCIdType()) {
// We're messaging "id", so provide all instance methods we know
// about as code-completion results.
// If we have an external source, load the entire class method
// pool from the AST file.
if (ExternalSource) {
for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
I != N; ++I) {
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
ReadMethodPool(Sel);
}
}
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = &M->second.first;
MethList && MethList->getMethod();
MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
if (!Selectors.insert(MethList->getMethod()->getSelector()).second)
continue;
Result R(MethList->getMethod(),
Results.getBasePriority(MethList->getMethod()), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
Results.MaybeAddResult(R, CurContext);
}
}
}
Results.ExitScope();
// If we're actually at the argument expression (rather than prior to the
// selector), we're actually performing code completion for an expression.
// Determine whether we have a single, best method. If so, we can
// code-complete the expression using the corresponding parameter type as
// our preferred type, improving completion results.
if (AtArgumentExpression) {
QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
SelIdents.size());
if (PreferredType.isNull())
CodeCompleteOrdinaryName(S, PCC_Expression);
else
CodeCompleteExpression(S, PreferredType);
return;
}
HandleCodeCompleteResults(this, CodeCompleter,
Results.getCompletionContext(),
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar) {
CodeCompleteExpressionData Data;
Data.ObjCCollection = true;
if (IterationVar.getAsOpaquePtr()) {
DeclGroupRef DG = IterationVar.get();
for (DeclGroupRef::iterator I = DG.begin(), End = DG.end(); I != End; ++I) {
if (*I)
Data.IgnoreDecls.push_back(*I);
}
}
CodeCompleteExpression(S, Data);
}
void Sema::CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents) {
// If we have an external source, load the entire class method
// pool from the AST file.
if (ExternalSource) {
for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
I != N; ++I) {
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
ReadMethodPool(Sel);
}
}
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_SelectorName);
Results.EnterNewScope();
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
M != MEnd; ++M) {
Selector Sel = M->first;
if (!isAcceptableObjCSelector(Sel, MK_Any, SelIdents))
continue;
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
if (Sel.isUnarySelector()) {
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Sel.getNameForSlot(0)));
Results.AddResult(Builder.TakeString());
continue;
}
std::string Accumulator;
for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I) {
if (I == SelIdents.size()) {
if (!Accumulator.empty()) {
Builder.AddInformativeChunk(Builder.getAllocator().CopyString(
Accumulator));
Accumulator.clear();
}
}
Accumulator += Sel.getNameForSlot(I);
Accumulator += ':';
}
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString( Accumulator));
Results.AddResult(Builder.TakeString());
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_SelectorName,
Results.data(), Results.size());
}
/// \brief Add all of the protocol declarations that we find in the given
/// (translation unit) context.
static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
bool OnlyForwardDeclarations,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
for (const auto *D : Ctx->decls()) {
// Record any protocols we find.
if (const auto *Proto = dyn_cast<ObjCProtocolDecl>(D))
if (!OnlyForwardDeclarations || !Proto->hasDefinition())
Results.AddResult(Result(Proto, Results.getBasePriority(Proto),nullptr),
CurContext, nullptr, false);
}
}
void Sema::CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
if (CodeCompleter && CodeCompleter->includeGlobals()) {
Results.EnterNewScope();
// Tell the result set to ignore all of the protocols we have
// already seen.
// FIXME: This doesn't work when caching code-completion results.
for (unsigned I = 0; I != NumProtocols; ++I)
if (ObjCProtocolDecl *Protocol = LookupProtocol(Protocols[I].first,
Protocols[I].second))
Results.Ignore(Protocol);
// Add all protocols.
AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, false,
Results);
Results.ExitScope();
}
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCProtocolName,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
if (CodeCompleter && CodeCompleter->includeGlobals()) {
Results.EnterNewScope();
// Add all protocols.
AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, true,
Results);
Results.ExitScope();
}
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCProtocolName,
Results.data(),Results.size());
}
/// \brief Add all of the Objective-C interface declarations that we find in
/// the given (translation unit) context.
static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
bool OnlyForwardDeclarations,
bool OnlyUnimplemented,
ResultBuilder &Results) {
typedef CodeCompletionResult Result;
for (const auto *D : Ctx->decls()) {
// Record any interfaces we find.
if (const auto *Class = dyn_cast<ObjCInterfaceDecl>(D))
if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
(!OnlyUnimplemented || !Class->getImplementation()))
Results.AddResult(Result(Class, Results.getBasePriority(Class),nullptr),
CurContext, nullptr, false);
}
}
void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (CodeCompleter->includeGlobals()) {
// Add all classes.
AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
false, Results);
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCInterfaceName,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCInterfaceName);
Results.EnterNewScope();
// Make sure that we ignore the class we're currently defining.
NamedDecl *CurClass
= LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
if (CurClass && isa<ObjCInterfaceDecl>(CurClass))
Results.Ignore(CurClass);
if (CodeCompleter->includeGlobals()) {
// Add all classes.
AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
false, Results);
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCInterfaceName,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
if (CodeCompleter->includeGlobals()) {
// Add all unimplemented classes.
AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
true, Results);
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCInterfaceName,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
// Ignore any categories we find that have already been implemented by this
// interface.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
NamedDecl *CurClass
= LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
if (ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass)){
for (const auto *Cat : Class->visible_categories())
CategoryNames.insert(Cat->getIdentifier());
}
// Add all of the categories we know about.
Results.EnterNewScope();
TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
for (const auto *D : TU->decls())
if (const auto *Category = dyn_cast<ObjCCategoryDecl>(D))
if (CategoryNames.insert(Category->getIdentifier()).second)
Results.AddResult(Result(Category, Results.getBasePriority(Category),
nullptr),
CurContext, nullptr, false);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCCategoryName,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
typedef CodeCompletionResult Result;
// Find the corresponding interface. If we couldn't find the interface, the
// program itself is ill-formed. However, we'll try to be helpful still by
// providing the list of all of the categories we know about.
NamedDecl *CurClass
= LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass);
if (!Class)
return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
// Add all of the categories that have have corresponding interface
// declarations in this class and any of its superclasses, except for
// already-implemented categories in the class itself.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
Results.EnterNewScope();
bool IgnoreImplemented = true;
while (Class) {
for (const auto *Cat : Class->visible_categories()) {
if ((!IgnoreImplemented || !Cat->getImplementation()) &&
CategoryNames.insert(Cat->getIdentifier()).second)
Results.AddResult(Result(Cat, Results.getBasePriority(Cat), nullptr),
CurContext, nullptr, false);
}
Class = Class->getSuperClass();
IgnoreImplemented = false;
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_ObjCCategoryName,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
CodeCompletionContext CCContext(CodeCompletionContext::CCC_Other);
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CCContext);
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container
= dyn_cast_or_null<ObjCContainerDecl>(CurContext);
if (!Container ||
(!isa<ObjCImplementationDecl>(Container) &&
!isa<ObjCCategoryImplDecl>(Container)))
return;
// Ignore any properties that have already been implemented.
Container = getContainerDef(Container);
for (const auto *D : Container->decls())
if (const auto *PropertyImpl = dyn_cast<ObjCPropertyImplDecl>(D))
Results.Ignore(PropertyImpl->getPropertyDecl());
// Add any properties that we find.
AddedPropertiesSet AddedProperties;
Results.EnterNewScope();
if (ObjCImplementationDecl *ClassImpl
= dyn_cast<ObjCImplementationDecl>(Container))
AddObjCProperties(CCContext, ClassImpl->getClassInterface(), false,
/*AllowNullaryMethods=*/false, CurContext,
AddedProperties, Results);
else
AddObjCProperties(CCContext,
cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl(),
false, /*AllowNullaryMethods=*/false, CurContext,
AddedProperties, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName) {
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
// Figure out where this @synthesize lives.
ObjCContainerDecl *Container
= dyn_cast_or_null<ObjCContainerDecl>(CurContext);
if (!Container ||
(!isa<ObjCImplementationDecl>(Container) &&
!isa<ObjCCategoryImplDecl>(Container)))
return;
// Figure out which interface we're looking into.
ObjCInterfaceDecl *Class = nullptr;
if (ObjCImplementationDecl *ClassImpl
= dyn_cast<ObjCImplementationDecl>(Container))
Class = ClassImpl->getClassInterface();
else
Class = cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl()
->getClassInterface();
// Determine the type of the property we're synthesizing.
QualType PropertyType = Context.getObjCIdType();
if (Class) {
if (ObjCPropertyDecl *Property
= Class->FindPropertyDeclaration(PropertyName)) {
PropertyType
= Property->getType().getNonReferenceType().getUnqualifiedType();
// Give preference to ivars
Results.setPreferredType(PropertyType);
}
}
// Add all of the instance variables in this class and its superclasses.
Results.EnterNewScope();
bool SawSimilarlyNamedIvar = false;
std::string NameWithPrefix;
NameWithPrefix += '_';
NameWithPrefix += PropertyName->getName();
std::string NameWithSuffix = PropertyName->getName().str();
NameWithSuffix += '_';
for(; Class; Class = Class->getSuperClass()) {
for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
Ivar = Ivar->getNextIvar()) {
Results.AddResult(Result(Ivar, Results.getBasePriority(Ivar), nullptr),
CurContext, nullptr, false);
// Determine whether we've seen an ivar with a name similar to the
// property.
if ((PropertyName == Ivar->getIdentifier() ||
NameWithPrefix == Ivar->getName() ||
NameWithSuffix == Ivar->getName())) {
SawSimilarlyNamedIvar = true;
// Reduce the priority of this result by one, to give it a slight
// advantage over other results whose names don't match so closely.
if (Results.size() &&
Results.data()[Results.size() - 1].Kind
== CodeCompletionResult::RK_Declaration &&
Results.data()[Results.size() - 1].Declaration == Ivar)
Results.data()[Results.size() - 1].Priority--;
}
}
}
if (!SawSimilarlyNamedIvar) {
// Create ivar result _propName, that the user can use to synthesize
// an ivar of the appropriate type.
unsigned Priority = CCP_MemberDeclaration + 1;
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo(),
Priority,CXAvailability_Available);
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
Builder.AddResultTypeChunk(GetCompletionTypeString(PropertyType, Context,
Policy, Allocator));
Builder.AddTypedTextChunk(Allocator.CopyString(NameWithPrefix));
Results.AddResult(Result(Builder.TakeString(), Priority,
CXCursor_ObjCIvarDecl));
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
// Mapping from selectors to the methods that implement that selector, along
// with the "in original class" flag.
typedef llvm::DenseMap<
Selector, llvm::PointerIntPair<ObjCMethodDecl *, 1, bool> > KnownMethodsMap;
/// \brief Find all of the methods that reside in the given container
/// (and its superclasses, protocols, etc.) that meet the given
/// criteria. Insert those methods into the map of known methods,
/// indexed by selector so they can be easily found.
static void FindImplementableMethods(ASTContext &Context,
ObjCContainerDecl *Container,
bool WantInstanceMethods,
QualType ReturnType,
KnownMethodsMap &KnownMethods,
bool InOriginalClass = true) {
if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)) {
// Make sure we have a definition; that's what we'll walk.
if (!IFace->hasDefinition())
return;
IFace = IFace->getDefinition();
Container = IFace;
const ObjCList<ObjCProtocolDecl> &Protocols
= IFace->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, InOriginalClass);
// Add methods from any class extensions and categories.
for (auto *Cat : IFace->visible_categories()) {
FindImplementableMethods(Context, Cat, WantInstanceMethods, ReturnType,
KnownMethods, false);
}
// Visit the superclass.
if (IFace->getSuperClass())
FindImplementableMethods(Context, IFace->getSuperClass(),
WantInstanceMethods, ReturnType,
KnownMethods, false);
}
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
// Recurse into protocols.
const ObjCList<ObjCProtocolDecl> &Protocols
= Category->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, InOriginalClass);
// If this category is the original class, jump to the interface.
if (InOriginalClass && Category->getClassInterface())
FindImplementableMethods(Context, Category->getClassInterface(),
WantInstanceMethods, ReturnType, KnownMethods,
false);
}
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
// Make sure we have a definition; that's what we'll walk.
if (!Protocol->hasDefinition())
return;
Protocol = Protocol->getDefinition();
Container = Protocol;
// Recurse into protocols.
const ObjCList<ObjCProtocolDecl> &Protocols
= Protocol->getReferencedProtocols();
for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
E = Protocols.end();
I != E; ++I)
FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
KnownMethods, false);
}
// Add methods in this container. This operation occurs last because
// we want the methods from this container to override any methods
// we've previously seen with the same selector.
for (auto *M : Container->methods()) {
if (M->isInstanceMethod() == WantInstanceMethods) {
if (!ReturnType.isNull() &&
!Context.hasSameUnqualifiedType(ReturnType, M->getReturnType()))
continue;
KnownMethods[M->getSelector()] =
KnownMethodsMap::mapped_type(M, InOriginalClass);
}
}
}
/// \brief Add the parenthesized return or parameter type chunk to a code
/// completion string.
static void AddObjCPassingTypeChunk(QualType Type,
unsigned ObjCDeclQuals,
ASTContext &Context,
const PrintingPolicy &Policy,
CodeCompletionBuilder &Builder) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
std::string Quals = formatObjCParamQualifiers(ObjCDeclQuals, Type);
if (!Quals.empty())
Builder.AddTextChunk(Builder.getAllocator().CopyString(Quals));
Builder.AddTextChunk(GetCompletionTypeString(Type, Context, Policy,
Builder.getAllocator()));
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
/// \brief Determine whether the given class is or inherits from a class by
/// the given name.
static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
StringRef Name) {
if (!Class)
return false;
if (Class->getIdentifier() && Class->getIdentifier()->getName() == Name)
return true;
return InheritsFromClassNamed(Class->getSuperClass(), Name);
}
/// \brief Add code completions for Objective-C Key-Value Coding (KVC) and
/// Key-Value Observing (KVO).
static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
bool IsInstanceMethod,
QualType ReturnType,
ASTContext &Context,
VisitedSelectorSet &KnownSelectors,
ResultBuilder &Results) {
IdentifierInfo *PropName = Property->getIdentifier();
if (!PropName || PropName->getLength() == 0)
return;
PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
// Builder that will create each code completion.
typedef CodeCompletionResult Result;
CodeCompletionAllocator &Allocator = Results.getAllocator();
CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
// The selector table.
SelectorTable &Selectors = Context.Selectors;
// The property name, copied into the code completion allocation region
// on demand.
struct KeyHolder {
CodeCompletionAllocator &Allocator;
StringRef Key;
const char *CopiedKey;
KeyHolder(CodeCompletionAllocator &Allocator, StringRef Key)
: Allocator(Allocator), Key(Key), CopiedKey(nullptr) {}
operator const char *() {
if (CopiedKey)
return CopiedKey;
return CopiedKey = Allocator.CopyString(Key);
}
} Key(Allocator, PropName->getName());
// The uppercased name of the property name.
std::string UpperKey = PropName->getName();
if (!UpperKey.empty())
UpperKey[0] = toUppercase(UpperKey[0]);
bool ReturnTypeMatchesProperty = ReturnType.isNull() ||
Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
Property->getType());
bool ReturnTypeMatchesVoid
= ReturnType.isNull() || ReturnType->isVoidType();
// Add the normal accessor -(type)key.
if (IsInstanceMethod &&
KnownSelectors.insert(Selectors.getNullarySelector(PropName)).second &&
ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
if (ReturnType.isNull())
AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
Context, Policy, Builder);
Builder.AddTypedTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
// If we have an integral or boolean property (or the user has provided
// an integral or boolean return type), add the accessor -(type)isKey.
if (IsInstanceMethod &&
((!ReturnType.isNull() &&
(ReturnType->isIntegerType() || ReturnType->isBooleanType())) ||
(ReturnType.isNull() &&
(Property->getType()->isIntegerType() ||
Property->getType()->isBooleanType())))) {
std::string SelectorName = (Twine("is") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("BOOL");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(
Allocator.CopyString(SelectorId->getName()));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Add the normal mutator.
if (IsInstanceMethod && ReturnTypeMatchesVoid &&
!Property->getSetterMethodDecl()) {
std::string SelectorName = (Twine("set") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(
Allocator.CopyString(SelectorId->getName()));
Builder.AddTypedTextChunk(":");
AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
Context, Policy, Builder);
Builder.AddTextChunk(Key);
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Indexed and unordered accessors
unsigned IndexedGetterPriority = CCP_CodePattern;
unsigned IndexedSetterPriority = CCP_CodePattern;
unsigned UnorderedGetterPriority = CCP_CodePattern;
unsigned UnorderedSetterPriority = CCP_CodePattern;
if (const ObjCObjectPointerType *ObjCPointer
= Property->getType()->getAs<ObjCObjectPointerType>()) {
if (ObjCInterfaceDecl *IFace = ObjCPointer->getInterfaceDecl()) {
// If this interface type is not provably derived from a known
// collection, penalize the corresponding completions.
if (!InheritsFromClassNamed(IFace, "NSMutableArray")) {
IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
if (!InheritsFromClassNamed(IFace, "NSArray"))
IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
}
if (!InheritsFromClassNamed(IFace, "NSMutableSet")) {
UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
if (!InheritsFromClassNamed(IFace, "NSSet"))
UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
}
}
} else {
IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
}
// Add -(NSUInteger)countOf<key>
if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isIntegerType())) {
std::string SelectorName = (Twine("countOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(
Allocator.CopyString(SelectorId->getName()));
Results.AddResult(Result(Builder.TakeString(),
std::min(IndexedGetterPriority,
UnorderedGetterPriority),
CXCursor_ObjCInstanceMethodDecl));
}
}
// Indexed getters
// Add -(id)objectInKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
std::string SelectorName
= (Twine("objectIn") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("id");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("index");
Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Add -(NSArray *)keyAtIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod &&
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
->getName() == "NSArray"))) {
std::string SelectorName
= (Twine(Property->getName()) + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSArray *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSIndexSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("indexes");
Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Add -(void)getKey:(type **)buffer range:(NSRange)inRange
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("get") + UpperKey).str();
IdentifierInfo *SelectorIds[2] = {
&Context.Idents.get(SelectorName),
&Context.Idents.get("range")
};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" **");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("buffer");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk("range:");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSRange");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("inRange");
Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Mutable indexed accessors
// - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorIds[2] = {
&Context.Idents.get("insertObject"),
&Context.Idents.get(SelectorName)
};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk("insertObject:");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("index");
Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("insert") + UpperKey).str();
IdentifierInfo *SelectorIds[2] = {
&Context.Idents.get(SelectorName),
&Context.Idents.get("atIndexes")
};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSArray *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("array");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk("atIndexes:");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("NSIndexSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("indexes");
Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// -(void)removeObjectFromKeyAtIndex:(NSUInteger)index
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
= (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("index");
Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
= (Twine("remove") + UpperKey + "AtIndexes").str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSIndexSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("indexes");
Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (void)replaceObjectInKeyAtIndex:(NSUInteger)index withObject:(id)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
= (Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
IdentifierInfo *SelectorIds[2] = {
&Context.Idents.get(SelectorName),
&Context.Idents.get("withObject")
};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("NSUInteger");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("index");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk("withObject:");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("id");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (void)replaceKeyAtIndexes:(NSIndexSet *)indexes withKey:(NSArray *)array
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName1
= (Twine("replace") + UpperKey + "AtIndexes").str();
std::string SelectorName2 = (Twine("with") + UpperKey).str();
IdentifierInfo *SelectorIds[2] = {
&Context.Idents.get(SelectorName1),
&Context.Idents.get(SelectorName2)
};
if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName1 + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("NSIndexSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("indexes");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName2 + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSArray *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("array");
Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Unordered getters
// - (NSEnumerator *)enumeratorOfKey
if (IsInstanceMethod &&
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
->getName() == "NSEnumerator"))) {
std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSEnumerator *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (type *)memberOfKey:(type *)object
if (IsInstanceMethod &&
(ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
std::string SelectorName = (Twine("memberOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
if (ReturnType.isNull()) {
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
} else {
Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
Policy,
Builder.getAllocator()));
}
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Mutable unordered accessors
// - (void)addKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
= (Twine("add") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (void)addKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("add") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("objects");
Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (void)removeKeyObject:(type *)object
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName
= (Twine("remove") + UpperKey + Twine("Object")).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("object-type");
Builder.AddTextChunk(" *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("object");
Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (void)removeKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("remove") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("objects");
Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// - (void)intersectKey:(NSSet *)objects
if (IsInstanceMethod && ReturnTypeMatchesVoid) {
std::string SelectorName = (Twine("intersect") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId)).second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("void");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Builder.AddTextChunk("objects");
Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
CXCursor_ObjCInstanceMethodDecl));
}
}
// Key-Value Observing
// + (NSSet *)keyPathsForValuesAffectingKey
if (!IsInstanceMethod &&
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
->getName() == "NSSet"))) {
std::string SelectorName
= (Twine("keyPathsForValuesAffecting") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("NSSet *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCClassMethodDecl));
}
}
// + (BOOL)automaticallyNotifiesObserversForKey
if (!IsInstanceMethod &&
(ReturnType.isNull() ||
ReturnType->isIntegerType() ||
ReturnType->isBooleanType())) {
std::string SelectorName
= (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))
.second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddTextChunk("BOOL");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
CXCursor_ObjCClassMethodDecl));
}
}
}
void Sema::CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnTy) {
// Determine the return type of the method we're declaring, if
// provided.
QualType ReturnType = GetTypeFromParser(ReturnTy);
Decl *IDecl = nullptr;
if (CurContext->isObjCContainer()) {
ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
IDecl = cast<Decl>(OCD);
}
// Determine where we should start searching for methods.
ObjCContainerDecl *SearchDecl = nullptr;
bool IsInImplementation = false;
if (Decl *D = IDecl) {
if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) {
SearchDecl = Impl->getClassInterface();
IsInImplementation = true;
} else if (ObjCCategoryImplDecl *CatImpl
= dyn_cast<ObjCCategoryImplDecl>(D)) {
SearchDecl = CatImpl->getCategoryDecl();
IsInImplementation = true;
} else
SearchDecl = dyn_cast<ObjCContainerDecl>(D);
}
if (!SearchDecl && S) {
if (DeclContext *DC = S->getEntity())
SearchDecl = dyn_cast<ObjCContainerDecl>(DC);
}
if (!SearchDecl) {
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
nullptr, 0);
return;
}
// Find all of the methods that we could declare/implement here.
KnownMethodsMap KnownMethods;
FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
ReturnType, KnownMethods);
// Add declarations or definitions for each of the known methods.
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
MEnd = KnownMethods.end();
M != MEnd; ++M) {
ObjCMethodDecl *Method = M->second.getPointer();
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
// If the result type was not already provided, add it to the
// pattern as (type).
if (ReturnType.isNull())
AddObjCPassingTypeChunk(Method->getSendResultType()
.stripObjCKindOfType(Context),
Method->getObjCDeclQualifier(), Context, Policy,
Builder);
Selector Sel = Method->getSelector();
// Add the first part of the selector to the pattern.
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Sel.getNameForSlot(0)));
// Add parameters to the pattern.
unsigned I = 0;
for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
PEnd = Method->param_end();
P != PEnd; (void)++P, ++I) {
// Add the part of the selector name.
if (I == 0)
Builder.AddTypedTextChunk(":");
else if (I < Sel.getNumArgs()) {
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTypedTextChunk(
Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
} else
break;
// Add the parameter type.
QualType ParamType;
if ((*P)->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
ParamType = (*P)->getType();
else
ParamType = (*P)->getOriginalType();
ParamType = ParamType.substObjCTypeArgs(Context, {},
ObjCSubstitutionContext::Parameter);
AddObjCPassingTypeChunk(ParamType,
(*P)->getObjCDeclQualifier(),
Context, Policy,
Builder);
if (IdentifierInfo *Id = (*P)->getIdentifier())
Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
}
if (Method->isVariadic()) {
if (Method->param_size() > 0)
Builder.AddChunk(CodeCompletionString::CK_Comma);
Builder.AddTextChunk("...");
}
if (IsInImplementation && Results.includeCodePatterns()) {
// We will be defining the method here, so add a compound statement.
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
if (!Method->getReturnType()->isVoidType()) {
// If the result type is not void, add a return clause.
Builder.AddTextChunk("return");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("expression");
Builder.AddChunk(CodeCompletionString::CK_SemiColon);
} else
Builder.AddPlaceholderChunk("statements");
Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
Builder.AddChunk(CodeCompletionString::CK_RightBrace);
}
unsigned Priority = CCP_CodePattern;
if (!M->second.getInt())
Priority += CCD_InBaseClass;
Results.AddResult(Result(Builder.TakeString(), Method, Priority));
}
// Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
// the properties in this class and its categories.
if (Context.getLangOpts().ObjC2) {
SmallVector<ObjCContainerDecl *, 4> Containers;
Containers.push_back(SearchDecl);
VisitedSelectorSet KnownSelectors;
for (KnownMethodsMap::iterator M = KnownMethods.begin(),
MEnd = KnownMethods.end();
M != MEnd; ++M)
KnownSelectors.insert(M->first);
ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(SearchDecl);
if (!IFace)
if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
IFace = Category->getClassInterface();
if (IFace)
for (auto *Cat : IFace->visible_categories())
Containers.push_back(Cat);
for (unsigned I = 0, N = Containers.size(); I != N; ++I)
for (auto *P : Containers[I]->properties())
AddObjCKeyValueCompletions(P, IsInstanceMethod, ReturnType, Context,
KnownSelectors, Results);
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnTy,
ArrayRef<IdentifierInfo *> SelIdents) {
// If we have an external source, load the entire class method
// pool from the AST file.
if (ExternalSource) {
for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
I != N; ++I) {
Selector Sel = ExternalSource->GetExternalSelector(I);
if (Sel.isNull() || MethodPool.count(Sel))
continue;
ReadMethodPool(Sel);
}
}
// Build the set of methods we can see.
typedef CodeCompletionResult Result;
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_Other);
if (ReturnTy)
Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType());
Results.EnterNewScope();
for (GlobalMethodPool::iterator M = MethodPool.begin(),
MEnd = MethodPool.end();
M != MEnd; ++M) {
for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first :
&M->second.second;
MethList && MethList->getMethod();
MethList = MethList->getNext()) {
if (!isAcceptableObjCMethod(MethList->getMethod(), MK_Any, SelIdents))
continue;
if (AtParameterName) {
// Suggest parameter names we've seen before.
unsigned NumSelIdents = SelIdents.size();
if (NumSelIdents &&
NumSelIdents <= MethList->getMethod()->param_size()) {
ParmVarDecl *Param =
MethList->getMethod()->parameters()[NumSelIdents - 1];
if (Param->getIdentifier()) {
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
Param->getIdentifier()->getName()));
Results.AddResult(Builder.TakeString());
}
}
continue;
}
Result R(MethList->getMethod(),
Results.getBasePriority(MethList->getMethod()), nullptr);
R.StartParameter = SelIdents.size();
R.AllParametersAreInformative = false;
R.DeclaringEntity = true;
Results.MaybeAddResult(R, CurContext);
}
}
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_Other,
Results.data(),Results.size());
}
void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorDirective);
Results.EnterNewScope();
// #if <condition>
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("if");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("condition");
Results.AddResult(Builder.TakeString());
// #ifdef <macro>
Builder.AddTypedTextChunk("ifdef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("macro");
Results.AddResult(Builder.TakeString());
// #ifndef <macro>
Builder.AddTypedTextChunk("ifndef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("macro");
Results.AddResult(Builder.TakeString());
if (InConditional) {
// #elif <condition>
Builder.AddTypedTextChunk("elif");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("condition");
Results.AddResult(Builder.TakeString());
// #else
Builder.AddTypedTextChunk("else");
Results.AddResult(Builder.TakeString());
// #endif
Builder.AddTypedTextChunk("endif");
Results.AddResult(Builder.TakeString());
}
// #include "header"
Builder.AddTypedTextChunk("include");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("\"");
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk("\"");
Results.AddResult(Builder.TakeString());
// #include <header>
Builder.AddTypedTextChunk("include");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("<");
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk(">");
Results.AddResult(Builder.TakeString());
// #define <macro>
Builder.AddTypedTextChunk("define");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("macro");
Results.AddResult(Builder.TakeString());
// #define <macro>(<args>)
Builder.AddTypedTextChunk("define");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("macro");
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("args");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Builder.TakeString());
// #undef <macro>
Builder.AddTypedTextChunk("undef");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("macro");
Results.AddResult(Builder.TakeString());
// #line <number>
Builder.AddTypedTextChunk("line");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("number");
Results.AddResult(Builder.TakeString());
// #line <number> "filename"
Builder.AddTypedTextChunk("line");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("number");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("\"");
Builder.AddPlaceholderChunk("filename");
Builder.AddTextChunk("\"");
Results.AddResult(Builder.TakeString());
// #error <message>
Builder.AddTypedTextChunk("error");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("message");
Results.AddResult(Builder.TakeString());
// #pragma <arguments>
Builder.AddTypedTextChunk("pragma");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("arguments");
Results.AddResult(Builder.TakeString());
if (getLangOpts().ObjC1) {
// #import "header"
Builder.AddTypedTextChunk("import");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("\"");
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk("\"");
Results.AddResult(Builder.TakeString());
// #import <header>
Builder.AddTypedTextChunk("import");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("<");
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk(">");
Results.AddResult(Builder.TakeString());
}
// #include_next "header"
Builder.AddTypedTextChunk("include_next");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("\"");
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk("\"");
Results.AddResult(Builder.TakeString());
// #include_next <header>
Builder.AddTypedTextChunk("include_next");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddTextChunk("<");
Builder.AddPlaceholderChunk("header");
Builder.AddTextChunk(">");
Results.AddResult(Builder.TakeString());
// #warning <message>
Builder.AddTypedTextChunk("warning");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddPlaceholderChunk("message");
Results.AddResult(Builder.TakeString());
// Note: #ident and #sccs are such crazy anachronisms that we don't provide
// completions for them. And __include_macros is a Clang-internal extension
// that we don't want to encourage anyone to use.
// FIXME: we don't support #assert or #unassert, so don't suggest them.
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_PreprocessorDirective,
Results.data(), Results.size());
}
void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
CodeCompleteOrdinaryName(S,
S->getFnParent()? Sema::PCC_RecoveryInFunction
: Sema::PCC_Namespace);
}
void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
IsDefinition? CodeCompletionContext::CCC_MacroName
: CodeCompletionContext::CCC_MacroNameUse);
if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
// Add just the names of macros, not their arguments.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Results.EnterNewScope();
for (Preprocessor::macro_iterator M = PP.macro_begin(),
MEnd = PP.macro_end();
M != MEnd; ++M) {
Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
M->first->getName()));
Results.AddResult(CodeCompletionResult(Builder.TakeString(),
CCP_CodePattern,
CXCursor_MacroDefinition));
}
Results.ExitScope();
} else if (IsDefinition) {
// FIXME: Can we detect when the user just wrote an include guard above?
}
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
Results.data(), Results.size());
}
void Sema::CodeCompletePreprocessorExpression() {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorExpression);
if (!CodeCompleter || CodeCompleter->includeMacros())
AddMacroResults(PP, Results, true);
// defined (<macro>)
Results.EnterNewScope();
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
Builder.AddTypedTextChunk("defined");
Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
Builder.AddPlaceholderChunk("macro");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
Results.AddResult(Builder.TakeString());
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_PreprocessorExpression,
Results.data(), Results.size());
}
void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument) {
// FIXME: In the future, we could provide "overload" results, much like we
// do for function calls.
// Now just ignore this. There will be another code-completion callback
// for the expanded tokens.
}
void Sema::CodeCompleteNaturalLanguage() {
HandleCodeCompleteResults(this, CodeCompleter,
CodeCompletionContext::CCC_NaturalLanguage,
nullptr, 0);
}
void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results) {
ResultBuilder Builder(*this, Allocator, CCTUInfo,
CodeCompletionContext::CCC_Recovery);
if (!CodeCompleter || CodeCompleter->includeGlobals()) {
CodeCompletionDeclConsumer Consumer(Builder,
Context.getTranslationUnitDecl());
LookupVisibleDecls(Context.getTranslationUnitDecl(), LookupAnyName,
Consumer);
}
if (!CodeCompleter || CodeCompleter->includeMacros())
AddMacroResults(PP, Builder, true);
Results.clear();
Results.insert(Results.end(),
Builder.data(), Builder.data() + Builder.size());
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaCast.cpp | //===--- SemaCast.cpp - Semantic Analysis for Casts -----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements semantic analysis for cast expressions, including
// 1) C-style casts like '(int) x'
// 2) C++ functional casts like 'int(x)'
// 3) C++ named casts like 'static_cast<int>(x)'
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "llvm/ADT/SmallVector.h"
#include "clang/Sema/SemaHLSL.h" // HLSL Change
#include <set>
using namespace clang;
enum TryCastResult {
TC_NotApplicable, ///< The cast method is not applicable.
TC_Success, ///< The cast method is appropriate and successful.
TC_Failed ///< The cast method is appropriate, but failed. A
///< diagnostic has been emitted.
};
enum CastType {
CT_Const, ///< const_cast
CT_Static, ///< static_cast
CT_Reinterpret, ///< reinterpret_cast
CT_Dynamic, ///< dynamic_cast
CT_CStyle, ///< (Type)expr
CT_Functional ///< Type(expr)
};
namespace {
struct CastOperation {
CastOperation(Sema &S, QualType destType, ExprResult src)
: Self(S), SrcExpr(src), DestType(destType),
ResultType(destType.getNonLValueExprType(S.Context)),
ValueKind(Expr::getValueKindForType(destType)),
Kind(CK_Dependent), IsARCUnbridgedCast(false) {
if (const BuiltinType *placeholder =
src.get()->getType()->getAsPlaceholderType()) {
PlaceholderKind = placeholder->getKind();
} else {
PlaceholderKind = (BuiltinType::Kind) 0;
}
}
Sema &Self;
ExprResult SrcExpr;
QualType DestType;
QualType ResultType;
ExprValueKind ValueKind;
CastKind Kind;
BuiltinType::Kind PlaceholderKind;
CXXCastPath BasePath;
bool IsARCUnbridgedCast;
SourceRange OpRange;
SourceRange DestRange;
// Top-level semantics-checking routines.
void CheckConstCast();
void CheckReinterpretCast();
void CheckStaticCast();
void CheckDynamicCast();
void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
void CheckCStyleCast();
/// Complete an apparently-successful cast operation that yields
/// the given expression.
ExprResult complete(CastExpr *castExpr) {
// If this is an unbridged cast, wrap the result in an implicit
// cast that yields the unbridged-cast placeholder type.
if (IsARCUnbridgedCast) {
castExpr = ImplicitCastExpr::Create(Self.Context,
Self.Context.ARCUnbridgedCastTy,
CK_Dependent, castExpr, nullptr,
castExpr->getValueKind());
}
return castExpr;
}
// Internal convenience methods.
/// Try to handle the given placeholder expression kind. Return
/// true if the source expression has the appropriate placeholder
/// kind. A placeholder can only be claimed once.
bool claimPlaceholder(BuiltinType::Kind K) {
if (PlaceholderKind != K) return false;
PlaceholderKind = (BuiltinType::Kind) 0;
return true;
}
bool isPlaceholder() const {
return PlaceholderKind != 0;
}
bool isPlaceholder(BuiltinType::Kind K) const {
return PlaceholderKind == K;
}
void checkCastAlign() {
Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
}
void checkObjCARCConversion(Sema::CheckedConversionKind CCK) {
assert(Self.getLangOpts().ObjCAutoRefCount);
Expr *src = SrcExpr.get();
if (Self.CheckObjCARCConversion(OpRange, DestType, src, CCK) ==
Sema::ACR_unbridged)
IsARCUnbridgedCast = true;
SrcExpr = src;
}
/// Check for and handle non-overload placeholder expressions.
void checkNonOverloadPlaceholders() {
if (!isPlaceholder() || isPlaceholder(BuiltinType::Overload))
return;
SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
if (SrcExpr.isInvalid())
return;
PlaceholderKind = (BuiltinType::Kind) 0;
}
};
}
// The Try functions attempt a specific way of casting. If they succeed, they
// return TC_Success. If their way of casting is not appropriate for the given
// arguments, they return TC_NotApplicable and *may* set diag to a diagnostic
// to emit if no other way succeeds. If their way of casting is appropriate but
// fails, they return TC_Failed and *must* set diag; they can set it to 0 if
// they emit a specialized diagnostic.
// All diagnostics returned by these functions must expect the same three
// arguments:
// %0: Cast Type (a value from the CastType enumeration)
// %1: Source Type
// %2: Destination Type
static TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
QualType DestType, bool CStyle,
CastKind &Kind,
CXXCastPath &BasePath,
unsigned &msg);
static TryCastResult TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath);
static TryCastResult TryStaticPointerDowncast(Sema &Self, QualType SrcType,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath);
static TryCastResult TryStaticDowncast(Sema &Self, CanQualType SrcType,
CanQualType DestType, bool CStyle,
const SourceRange &OpRange,
QualType OrigSrcType,
QualType OrigDestType, unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath);
static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr,
QualType SrcType,
QualType DestType,bool CStyle,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind,
CXXCastPath &BasePath);
static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange,
unsigned &msg, CastKind &Kind,
bool ListInitialization);
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath,
bool ListInitialization);
static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
unsigned &msg);
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult
Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAngleBracketLoc, Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, Expr *E,
SourceLocation RParenLoc) {
assert(!D.isInvalidType());
TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, E->getType());
if (D.isInvalidType())
return ExprError();
if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments (C++ only).
CheckExtraCXXDefaultArguments(D);
}
return BuildCXXNamedCast(OpLoc, Kind, TInfo, E,
SourceRange(LAngleBracketLoc, RAngleBracketLoc),
SourceRange(LParenLoc, RParenLoc));
}
ExprResult
Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
TypeSourceInfo *DestTInfo, Expr *E,
SourceRange AngleBrackets, SourceRange Parens) {
ExprResult Ex = E;
QualType DestType = DestTInfo->getType();
// If the type is dependent, we won't do the semantic analysis now.
bool TypeDependent =
DestType->isDependentType() || Ex.get()->isTypeDependent();
CastOperation Op(*this, DestType, E);
Op.OpRange = SourceRange(OpLoc, Parens.getEnd());
Op.DestRange = AngleBrackets;
switch (Kind) {
default: llvm_unreachable("Unknown C++ cast!");
case tok::kw_const_cast:
if (!TypeDependent) {
Op.CheckConstCast();
if (Op.SrcExpr.isInvalid())
return ExprError();
}
return Op.complete(CXXConstCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, Op.SrcExpr.get(), DestTInfo,
OpLoc, Parens.getEnd(),
AngleBrackets));
case tok::kw_dynamic_cast: {
if (!TypeDependent) {
Op.CheckDynamicCast();
if (Op.SrcExpr.isInvalid())
return ExprError();
}
return Op.complete(CXXDynamicCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
&Op.BasePath, DestTInfo,
OpLoc, Parens.getEnd(),
AngleBrackets));
}
case tok::kw_reinterpret_cast: {
if (!TypeDependent) {
Op.CheckReinterpretCast();
if (Op.SrcExpr.isInvalid())
return ExprError();
}
return Op.complete(CXXReinterpretCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
nullptr, DestTInfo, OpLoc,
Parens.getEnd(),
AngleBrackets));
}
case tok::kw_static_cast: {
if (!TypeDependent) {
Op.CheckStaticCast();
if (Op.SrcExpr.isInvalid())
return ExprError();
}
return Op.complete(CXXStaticCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
&Op.BasePath, DestTInfo,
OpLoc, Parens.getEnd(),
AngleBrackets));
}
}
}
/// Try to diagnose a failed overloaded cast. Returns true if
/// diagnostics were emitted.
static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
SourceRange range, Expr *src,
QualType destType,
bool listInitialization) {
switch (CT) {
// These cast kinds don't consider user-defined conversions.
case CT_Const:
case CT_Reinterpret:
case CT_Dynamic:
return false;
// These do.
case CT_Static:
case CT_CStyle:
case CT_Functional:
break;
}
QualType srcType = src->getType();
if (!destType->isRecordType() && !srcType->isRecordType())
return false;
InitializedEntity entity = InitializedEntity::InitializeTemporary(destType);
InitializationKind initKind
= (CT == CT_CStyle)? InitializationKind::CreateCStyleCast(range.getBegin(),
range, listInitialization)
: (CT == CT_Functional)? InitializationKind::CreateFunctionalCast(range,
listInitialization)
: InitializationKind::CreateCast(/*type range?*/ range);
InitializationSequence sequence(S, entity, initKind, src);
assert(sequence.Failed() && "initialization succeeded on second try?");
switch (sequence.getFailureKind()) {
default: return false;
case InitializationSequence::FK_ConstructorOverloadFailed:
case InitializationSequence::FK_UserConversionOverloadFailed:
break;
}
OverloadCandidateSet &candidates = sequence.getFailedCandidateSet();
unsigned msg = 0;
OverloadCandidateDisplayKind howManyCandidates = OCD_AllCandidates;
switch (sequence.getFailedOverloadResult()) {
case OR_Success: llvm_unreachable("successful failed overload");
case OR_No_Viable_Function:
if (candidates.empty())
msg = diag::err_ovl_no_conversion_in_cast;
else
msg = diag::err_ovl_no_viable_conversion_in_cast;
howManyCandidates = OCD_AllCandidates;
break;
case OR_Ambiguous:
msg = diag::err_ovl_ambiguous_conversion_in_cast;
howManyCandidates = OCD_ViableCandidates;
break;
case OR_Deleted:
msg = diag::err_ovl_deleted_conversion_in_cast;
howManyCandidates = OCD_ViableCandidates;
break;
}
S.Diag(range.getBegin(), msg)
<< CT << srcType << destType
<< range << src->getSourceRange();
candidates.NoteCandidates(S, howManyCandidates, src);
return true;
}
/// Diagnose a failed cast.
static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
SourceRange opRange, Expr *src, QualType destType,
bool listInitialization) {
if (msg == diag::err_bad_cxx_cast_generic &&
tryDiagnoseOverloadedCast(S, castType, opRange, src, destType,
listInitialization))
return;
S.Diag(opRange.getBegin(), msg) << castType
<< src->getType() << destType << opRange << src->getSourceRange();
// Detect if both types are (ptr to) class, and note any incompleteness.
int DifferentPtrness = 0;
QualType From = destType;
if (auto Ptr = From->getAs<PointerType>()) {
From = Ptr->getPointeeType();
DifferentPtrness++;
}
QualType To = src->getType();
if (auto Ptr = To->getAs<PointerType>()) {
To = Ptr->getPointeeType();
DifferentPtrness--;
}
if (!DifferentPtrness) {
auto RecFrom = From->getAs<RecordType>();
auto RecTo = To->getAs<RecordType>();
if (RecFrom && RecTo) {
auto DeclFrom = RecFrom->getAsCXXRecordDecl();
if (!DeclFrom->isCompleteDefinition())
S.Diag(DeclFrom->getLocation(), diag::note_type_incomplete)
<< DeclFrom->getDeclName();
auto DeclTo = RecTo->getAsCXXRecordDecl();
if (!DeclTo->isCompleteDefinition())
S.Diag(DeclTo->getLocation(), diag::note_type_incomplete)
<< DeclTo->getDeclName();
}
}
}
/// UnwrapDissimilarPointerTypes - Like Sema::UnwrapSimilarPointerTypes,
/// this removes one level of indirection from both types, provided that they're
/// the same kind of pointer (plain or to-member). Unlike the Sema function,
/// this one doesn't care if the two pointers-to-member don't point into the
/// same class. This is because CastsAwayConstness doesn't care.
static bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) {
const PointerType *T1PtrType = T1->getAs<PointerType>(),
*T2PtrType = T2->getAs<PointerType>();
if (T1PtrType && T2PtrType) {
T1 = T1PtrType->getPointeeType();
T2 = T2PtrType->getPointeeType();
return true;
}
const ObjCObjectPointerType *T1ObjCPtrType =
T1->getAs<ObjCObjectPointerType>(),
*T2ObjCPtrType =
T2->getAs<ObjCObjectPointerType>();
if (T1ObjCPtrType) {
if (T2ObjCPtrType) {
T1 = T1ObjCPtrType->getPointeeType();
T2 = T2ObjCPtrType->getPointeeType();
return true;
}
else if (T2PtrType) {
T1 = T1ObjCPtrType->getPointeeType();
T2 = T2PtrType->getPointeeType();
return true;
}
}
else if (T2ObjCPtrType) {
if (T1PtrType) {
T2 = T2ObjCPtrType->getPointeeType();
T1 = T1PtrType->getPointeeType();
return true;
}
}
const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
*T2MPType = T2->getAs<MemberPointerType>();
if (T1MPType && T2MPType) {
T1 = T1MPType->getPointeeType();
T2 = T2MPType->getPointeeType();
return true;
}
const BlockPointerType *T1BPType = T1->getAs<BlockPointerType>(),
*T2BPType = T2->getAs<BlockPointerType>();
if (T1BPType && T2BPType) {
T1 = T1BPType->getPointeeType();
T2 = T2BPType->getPointeeType();
return true;
}
return false;
}
/// CastsAwayConstness - Check if the pointer conversion from SrcType to
/// DestType casts away constness as defined in C++ 5.2.11p8ff. This is used by
/// the cast checkers. Both arguments must denote pointer (possibly to member)
/// types.
///
/// \param CheckCVR Whether to check for const/volatile/restrict qualifiers.
///
/// \param CheckObjCLifetime Whether to check Objective-C lifetime qualifiers.
static bool
CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
bool CheckCVR, bool CheckObjCLifetime,
QualType *TheOffendingSrcType = nullptr,
QualType *TheOffendingDestType = nullptr,
Qualifiers *CastAwayQualifiers = nullptr) {
// If the only checking we care about is for Objective-C lifetime qualifiers,
// and we're not in ARC mode, there's nothing to check.
if (!CheckCVR && CheckObjCLifetime &&
!Self.Context.getLangOpts().ObjCAutoRefCount)
return false;
// Casting away constness is defined in C++ 5.2.11p8 with reference to
// C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since
// the rules are non-trivial. So first we construct Tcv *...cv* as described
// in C++ 5.2.11p8.
assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType() ||
SrcType->isBlockPointerType()) &&
"Source type is not pointer or pointer to member.");
assert((DestType->isAnyPointerType() || DestType->isMemberPointerType() ||
DestType->isBlockPointerType()) &&
"Destination type is not pointer or pointer to member.");
QualType UnwrappedSrcType = Self.Context.getCanonicalType(SrcType),
UnwrappedDestType = Self.Context.getCanonicalType(DestType);
SmallVector<Qualifiers, 8> cv1, cv2;
// Find the qualifiers. We only care about cvr-qualifiers for the
// purpose of this check, because other qualifiers (address spaces,
// Objective-C GC, etc.) are part of the type's identity.
QualType PrevUnwrappedSrcType = UnwrappedSrcType;
QualType PrevUnwrappedDestType = UnwrappedDestType;
while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
// Determine the relevant qualifiers at this level.
Qualifiers SrcQuals, DestQuals;
Self.Context.getUnqualifiedArrayType(UnwrappedSrcType, SrcQuals);
Self.Context.getUnqualifiedArrayType(UnwrappedDestType, DestQuals);
Qualifiers RetainedSrcQuals, RetainedDestQuals;
if (CheckCVR) {
RetainedSrcQuals.setCVRQualifiers(SrcQuals.getCVRQualifiers());
RetainedDestQuals.setCVRQualifiers(DestQuals.getCVRQualifiers());
if (RetainedSrcQuals != RetainedDestQuals && TheOffendingSrcType &&
TheOffendingDestType && CastAwayQualifiers) {
*TheOffendingSrcType = PrevUnwrappedSrcType;
*TheOffendingDestType = PrevUnwrappedDestType;
*CastAwayQualifiers = RetainedSrcQuals - RetainedDestQuals;
}
}
if (CheckObjCLifetime &&
!DestQuals.compatiblyIncludesObjCLifetime(SrcQuals))
return true;
cv1.push_back(RetainedSrcQuals);
cv2.push_back(RetainedDestQuals);
PrevUnwrappedSrcType = UnwrappedSrcType;
PrevUnwrappedDestType = UnwrappedDestType;
}
if (cv1.empty())
return false;
// Construct void pointers with those qualifiers (in reverse order of
// unwrapping, of course).
QualType SrcConstruct = Self.Context.VoidTy;
QualType DestConstruct = Self.Context.VoidTy;
ASTContext &Context = Self.Context;
for (SmallVectorImpl<Qualifiers>::reverse_iterator i1 = cv1.rbegin(),
i2 = cv2.rbegin();
i1 != cv1.rend(); ++i1, ++i2) {
SrcConstruct
= Context.getPointerType(Context.getQualifiedType(SrcConstruct, *i1));
DestConstruct
= Context.getPointerType(Context.getQualifiedType(DestConstruct, *i2));
}
// Test if they're compatible.
bool ObjCLifetimeConversion;
return SrcConstruct != DestConstruct &&
!Self.IsQualificationConversion(SrcConstruct, DestConstruct, false,
ObjCLifetimeConversion);
}
/// CheckDynamicCast - Check that a dynamic_cast\<DestType\>(SrcExpr) is valid.
/// Refer to C++ 5.2.7 for details. Dynamic casts are used mostly for runtime-
/// checked downcasts in class hierarchies.
void CastOperation::CheckDynamicCast() {
if (ValueKind == VK_RValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
return;
QualType OrigSrcType = SrcExpr.get()->getType();
QualType DestType = Self.Context.getCanonicalType(this->DestType);
// C++ 5.2.7p1: T shall be a pointer or reference to a complete class type,
// or "pointer to cv void".
QualType DestPointee;
const PointerType *DestPointer = DestType->getAs<PointerType>();
const ReferenceType *DestReference = nullptr;
if (DestPointer) {
DestPointee = DestPointer->getPointeeType();
} else if ((DestReference = DestType->getAs<ReferenceType>())) {
DestPointee = DestReference->getPointeeType();
} else {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ref_or_ptr)
<< this->DestType << DestRange;
SrcExpr = ExprError();
return;
}
const RecordType *DestRecord = DestPointee->getAs<RecordType>();
if (DestPointee->isVoidType()) {
assert(DestPointer && "Reference to void is not possible");
} else if (DestRecord) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestPointee,
diag::err_bad_dynamic_cast_incomplete,
DestRange)) {
SrcExpr = ExprError();
return;
}
} else {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
<< DestPointee.getUnqualifiedType() << DestRange;
SrcExpr = ExprError();
return;
}
// C++0x 5.2.7p2: If T is a pointer type, v shall be an rvalue of a pointer to
// complete class type, [...]. If T is an lvalue reference type, v shall be
// an lvalue of a complete class type, [...]. If T is an rvalue reference
// type, v shall be an expression having a complete class type, [...]
QualType SrcType = Self.Context.getCanonicalType(OrigSrcType);
QualType SrcPointee;
if (DestPointer) {
if (const PointerType *SrcPointer = SrcType->getAs<PointerType>()) {
SrcPointee = SrcPointer->getPointeeType();
} else {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ptr)
<< OrigSrcType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
} else if (DestReference->isLValueReferenceType()) {
if (!SrcExpr.get()->isLValue()) {
Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
<< CT_Dynamic << OrigSrcType << this->DestType << OpRange;
}
SrcPointee = SrcType;
} else {
// If we're dynamic_casting from a prvalue to an rvalue reference, we need
// to materialize the prvalue before we bind the reference to it.
if (SrcExpr.get()->isRValue())
SrcExpr = new (Self.Context) MaterializeTemporaryExpr(
SrcType, SrcExpr.get(), /*IsLValueReference*/false);
SrcPointee = SrcType;
}
const RecordType *SrcRecord = SrcPointee->getAs<RecordType>();
if (SrcRecord) {
if (Self.RequireCompleteType(OpRange.getBegin(), SrcPointee,
diag::err_bad_dynamic_cast_incomplete,
SrcExpr.get())) {
SrcExpr = ExprError();
return;
}
} else {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
<< SrcPointee.getUnqualifiedType() << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
assert((DestPointer || DestReference) &&
"Bad destination non-ptr/ref slipped through.");
assert((DestRecord || DestPointee->isVoidType()) &&
"Bad destination pointee slipped through.");
assert(SrcRecord && "Bad source pointee slipped through.");
// C++ 5.2.7p1: The dynamic_cast operator shall not cast away constness.
if (!DestPointee.isAtLeastAsQualifiedAs(SrcPointee)) {
Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_qualifiers_away)
<< CT_Dynamic << OrigSrcType << this->DestType << OpRange;
SrcExpr = ExprError();
return;
}
// C++ 5.2.7p3: If the type of v is the same as the required result type,
// [except for cv].
if (DestRecord == SrcRecord) {
Kind = CK_NoOp;
return;
}
// C++ 5.2.7p5
// Upcasts are resolved statically.
if (DestRecord && Self.IsDerivedFrom(SrcPointee, DestPointee)) {
if (Self.CheckDerivedToBaseConversion(SrcPointee, DestPointee,
OpRange.getBegin(), OpRange,
&BasePath)) {
SrcExpr = ExprError();
return;
}
Kind = CK_DerivedToBase;
return;
}
// C++ 5.2.7p6: Otherwise, v shall be [polymorphic].
const RecordDecl *SrcDecl = SrcRecord->getDecl()->getDefinition();
assert(SrcDecl && "Definition missing");
if (!cast<CXXRecordDecl>(SrcDecl)->isPolymorphic()) {
Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_polymorphic)
<< SrcPointee.getUnqualifiedType() << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
}
// dynamic_cast is not available with -fno-rtti.
// As an exception, dynamic_cast to void* is available because it doesn't
// use RTTI.
if (!Self.getLangOpts().RTTI && !DestPointee->isVoidType()) {
Self.Diag(OpRange.getBegin(), diag::err_no_dynamic_cast_with_fno_rtti);
SrcExpr = ExprError();
return;
}
// Done. Everything else is run-time checks.
Kind = CK_Dynamic;
}
/// CheckConstCast - Check that a const_cast\<DestType\>(SrcExpr) is valid.
/// Refer to C++ 5.2.11 for details. const_cast is typically used in code
/// like this:
/// const char *str = "literal";
/// legacy_function(const_cast\<char*\>(str));
void CastOperation::CheckConstCast() {
if (ValueKind == VK_RValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
return;
unsigned msg = diag::err_bad_cxx_cast_generic;
if (TryConstCast(Self, SrcExpr, DestType, /*CStyle*/false, msg) != TC_Success
&& msg != 0) {
Self.Diag(OpRange.getBegin(), msg) << CT_Const
<< SrcExpr.get()->getType() << DestType << OpRange;
SrcExpr = ExprError();
}
}
/// Check that a reinterpret_cast\<DestType\>(SrcExpr) is not used as upcast
/// or downcast between respective pointers or references.
static void DiagnoseReinterpretUpDownCast(Sema &Self, const Expr *SrcExpr,
QualType DestType,
SourceRange OpRange) {
QualType SrcType = SrcExpr->getType();
// When casting from pointer or reference, get pointee type; use original
// type otherwise.
const CXXRecordDecl *SrcPointeeRD = SrcType->getPointeeCXXRecordDecl();
const CXXRecordDecl *SrcRD =
SrcPointeeRD ? SrcPointeeRD : SrcType->getAsCXXRecordDecl();
// Examining subobjects for records is only possible if the complete and
// valid definition is available. Also, template instantiation is not
// allowed here.
if (!SrcRD || !SrcRD->isCompleteDefinition() || SrcRD->isInvalidDecl())
return;
const CXXRecordDecl *DestRD = DestType->getPointeeCXXRecordDecl();
if (!DestRD || !DestRD->isCompleteDefinition() || DestRD->isInvalidDecl())
return;
enum {
ReinterpretUpcast,
ReinterpretDowncast
} ReinterpretKind;
CXXBasePaths BasePaths;
if (SrcRD->isDerivedFrom(DestRD, BasePaths))
ReinterpretKind = ReinterpretUpcast;
else if (DestRD->isDerivedFrom(SrcRD, BasePaths))
ReinterpretKind = ReinterpretDowncast;
else
return;
bool VirtualBase = true;
bool NonZeroOffset = false;
for (CXXBasePaths::const_paths_iterator I = BasePaths.begin(),
E = BasePaths.end();
I != E; ++I) {
const CXXBasePath &Path = *I;
CharUnits Offset = CharUnits::Zero();
bool IsVirtual = false;
for (CXXBasePath::const_iterator IElem = Path.begin(), EElem = Path.end();
IElem != EElem; ++IElem) {
IsVirtual = IElem->Base->isVirtual();
if (IsVirtual)
break;
const CXXRecordDecl *BaseRD = IElem->Base->getType()->getAsCXXRecordDecl();
assert(BaseRD && "Base type should be a valid unqualified class type");
// Don't check if any base has invalid declaration or has no definition
// since it has no layout info.
const CXXRecordDecl *Class = IElem->Class,
*ClassDefinition = Class->getDefinition();
if (Class->isInvalidDecl() || !ClassDefinition ||
!ClassDefinition->isCompleteDefinition())
return;
const ASTRecordLayout &DerivedLayout =
Self.Context.getASTRecordLayout(Class);
Offset += DerivedLayout.getBaseClassOffset(BaseRD);
}
if (!IsVirtual) {
// Don't warn if any path is a non-virtually derived base at offset zero.
if (Offset.isZero())
return;
// Offset makes sense only for non-virtual bases.
else
NonZeroOffset = true;
}
VirtualBase = VirtualBase && IsVirtual;
}
(void) NonZeroOffset; // Silence set but not used warning.
assert((VirtualBase || NonZeroOffset) &&
"Should have returned if has non-virtual base with zero offset");
QualType BaseType =
ReinterpretKind == ReinterpretUpcast? DestType : SrcType;
QualType DerivedType =
ReinterpretKind == ReinterpretUpcast? SrcType : DestType;
SourceLocation BeginLoc = OpRange.getBegin();
Self.Diag(BeginLoc, diag::warn_reinterpret_different_from_static)
<< DerivedType << BaseType << !VirtualBase << int(ReinterpretKind)
<< OpRange;
Self.Diag(BeginLoc, diag::note_reinterpret_updowncast_use_static)
<< int(ReinterpretKind)
<< FixItHint::CreateReplacement(BeginLoc, "static_cast");
}
/// CheckReinterpretCast - Check that a reinterpret_cast\<DestType\>(SrcExpr) is
/// valid.
/// Refer to C++ 5.2.10 for details. reinterpret_cast is typically used in code
/// like this:
/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
void CastOperation::CheckReinterpretCast() {
if (ValueKind == VK_RValue && !isPlaceholder(BuiltinType::Overload))
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else
checkNonOverloadPlaceholders();
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
return;
unsigned msg = diag::err_bad_cxx_cast_generic;
TryCastResult tcr =
TryReinterpretCast(Self, SrcExpr, DestType,
/*CStyle*/false, OpRange, msg, Kind);
if (tcr != TC_Success && msg != 0)
{
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
return;
if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
//FIXME: &f<int>; is overloaded and resolvable
Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_overload)
<< OverloadExpr::find(SrcExpr.get()).Expression->getName()
<< DestType << OpRange;
Self.NoteAllOverloadCandidates(SrcExpr.get());
} else {
diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr.get(),
DestType, /*listInitialization=*/false);
}
SrcExpr = ExprError();
} else if (tcr == TC_Success) {
if (Self.getLangOpts().ObjCAutoRefCount)
checkObjCARCConversion(Sema::CCK_OtherCast);
DiagnoseReinterpretUpDownCast(Self, SrcExpr.get(), DestType, OpRange);
}
}
/// CheckStaticCast - Check that a static_cast\<DestType\>(SrcExpr) is valid.
/// Refer to C++ 5.2.9 for details. Static casts are mostly used for making
/// implicit conversions explicit and getting rid of data loss warnings.
void CastOperation::CheckStaticCast() {
if (isPlaceholder()) {
checkNonOverloadPlaceholders();
if (SrcExpr.isInvalid())
return;
}
// This test is outside everything else because it's the only case where
// a non-lvalue-reference target type does not lead to decay.
// C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
if (DestType->isVoidType()) {
Kind = CK_ToVoid;
if (claimPlaceholder(BuiltinType::Overload)) {
Self.ResolveAndFixSingleFunctionTemplateSpecialization(SrcExpr,
false, // Decay Function to ptr
true, // Complain
OpRange, DestType, diag::err_bad_static_cast_overload);
if (SrcExpr.isInvalid())
return;
}
SrcExpr = Self.IgnoredValueConversions(SrcExpr.get());
return;
}
if (ValueKind == VK_RValue && !DestType->isRecordType() &&
!isPlaceholder(BuiltinType::Overload)
&& !(Self.getLangOpts().HLSL && !hlsl::IsConversionToLessOrEqualElements(&Self, SrcExpr, DestType, true)) // HLSL Change: check for HLSL vector-shrinking.
) {
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
return;
}
unsigned msg = diag::err_bad_cxx_cast_generic;
TryCastResult tcr
= TryStaticCast(Self, SrcExpr, DestType, Sema::CCK_OtherCast, OpRange, msg,
Kind, BasePath, /*ListInitialization=*/false);
if (tcr != TC_Success && msg != 0) {
if (SrcExpr.isInvalid())
return;
if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
OverloadExpr* oe = OverloadExpr::find(SrcExpr.get()).Expression;
Self.Diag(OpRange.getBegin(), diag::err_bad_static_cast_overload)
<< oe->getName() << DestType << OpRange
<< oe->getQualifierLoc().getSourceRange();
Self.NoteAllOverloadCandidates(SrcExpr.get());
} else {
diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr.get(), DestType,
/*listInitialization=*/false);
}
SrcExpr = ExprError();
} else if (tcr == TC_Success) {
if (Kind == CK_BitCast)
checkCastAlign();
if (Self.getLangOpts().ObjCAutoRefCount)
checkObjCARCConversion(Sema::CCK_OtherCast);
} else if (Kind == CK_BitCast) {
checkCastAlign();
}
}
/// TryStaticCast - Check if a static cast can be performed, and do so if
/// possible. If @p CStyle, ignore access restrictions on hierarchy casting
/// and casting away constness.
static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath,
bool ListInitialization) {
// Determine whether we have the semantics of a C-style cast.
bool CStyle
= (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
// The order the tests is not entirely arbitrary. There is one conversion
// that can be handled in two different ways. Given:
// struct A {};
// struct B : public A {
// B(); B(const A&);
// };
// const A &a = B();
// the cast static_cast<const B&>(a) could be seen as either a static
// reference downcast, or an explicit invocation of the user-defined
// conversion using B's conversion constructor.
// DR 427 specifies that the downcast is to be applied here.
// C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
// Done outside this function.
TryCastResult tcr;
// C++ 5.2.9p5, reference downcast.
// See the function for details.
// DR 427 specifies that this is to be applied before paragraph 2.
tcr = TryStaticReferenceDowncast(Self, SrcExpr.get(), DestType, CStyle,
OpRange, msg, Kind, BasePath);
if (tcr != TC_NotApplicable)
return tcr;
// C++11 [expr.static.cast]p3:
// A glvalue of type "cv1 T1" can be cast to type "rvalue reference to cv2
// T2" if "cv2 T2" is reference-compatible with "cv1 T1".
tcr = TryLValueToRValueCast(Self, SrcExpr.get(), DestType, CStyle, Kind,
BasePath, msg);
if (tcr != TC_NotApplicable)
return tcr;
// C++ 5.2.9p2: An expression e can be explicitly converted to a type T
// [...] if the declaration "T t(e);" is well-formed, [...].
tcr = TryStaticImplicitCast(Self, SrcExpr, DestType, CCK, OpRange, msg,
Kind, ListInitialization);
if (SrcExpr.isInvalid())
return TC_Failed;
if (tcr != TC_NotApplicable)
return tcr;
// C++ 5.2.9p6: May apply the reverse of any standard conversion, except
// lvalue-to-rvalue, array-to-pointer, function-to-pointer, and boolean
// conversions, subject to further restrictions.
// Also, C++ 5.2.9p1 forbids casting away constness, which makes reversal
// of qualification conversions impossible.
// In the CStyle case, the earlier attempt to const_cast should have taken
// care of reverse qualification conversions.
QualType SrcType = Self.Context.getCanonicalType(SrcExpr.get()->getType());
// C++0x 5.2.9p9: A value of a scoped enumeration type can be explicitly
// converted to an integral type. [...] A value of a scoped enumeration type
// can also be explicitly converted to a floating-point type [...].
if (const EnumType *Enum = SrcType->getAs<EnumType>()) {
if (Enum->getDecl()->isScoped()) {
if (DestType->isBooleanType()) {
Kind = CK_IntegralToBoolean;
return TC_Success;
} else if (DestType->isIntegralType(Self.Context)) {
Kind = CK_IntegralCast;
return TC_Success;
} else if (DestType->isRealFloatingType()) {
Kind = CK_IntegralToFloating;
return TC_Success;
}
}
}
// Reverse integral promotion/conversion. All such conversions are themselves
// again integral promotions or conversions and are thus already handled by
// p2 (TryDirectInitialization above).
// (Note: any data loss warnings should be suppressed.)
// The exception is the reverse of enum->integer, i.e. integer->enum (and
// enum->enum). See also C++ 5.2.9p7.
// The same goes for reverse floating point promotion/conversion and
// floating-integral conversions. Again, only floating->enum is relevant.
if (DestType->isEnumeralType()) {
if (SrcType->isIntegralOrEnumerationType()) {
Kind = CK_IntegralCast;
return TC_Success;
} else if (SrcType->isRealFloatingType()) {
Kind = CK_FloatingToIntegral;
return TC_Success;
}
}
// Reverse pointer upcast. C++ 4.10p3 specifies pointer upcast.
// C++ 5.2.9p8 additionally disallows a cast path through virtual inheritance.
tcr = TryStaticPointerDowncast(Self, SrcType, DestType, CStyle, OpRange, msg,
Kind, BasePath);
if (tcr != TC_NotApplicable)
return tcr;
// Reverse member pointer conversion. C++ 4.11 specifies member pointer
// conversion. C++ 5.2.9p9 has additional information.
// DR54's access restrictions apply here also.
tcr = TryStaticMemberPointerUpcast(Self, SrcExpr, SrcType, DestType, CStyle,
OpRange, msg, Kind, BasePath);
if (tcr != TC_NotApplicable)
return tcr;
// Reverse pointer conversion to void*. C++ 4.10.p2 specifies conversion to
// void*. C++ 5.2.9p10 specifies additional restrictions, which really is
// just the usual constness stuff.
if (const PointerType *SrcPointer = SrcType->getAs<PointerType>()) {
QualType SrcPointee = SrcPointer->getPointeeType();
if (SrcPointee->isVoidType()) {
if (const PointerType *DestPointer = DestType->getAs<PointerType>()) {
QualType DestPointee = DestPointer->getPointeeType();
if (DestPointee->isIncompleteOrObjectType()) {
// This is definitely the intended conversion, but it might fail due
// to a qualifier violation. Note that we permit Objective-C lifetime
// and GC qualifier mismatches here.
if (!CStyle) {
Qualifiers DestPointeeQuals = DestPointee.getQualifiers();
Qualifiers SrcPointeeQuals = SrcPointee.getQualifiers();
DestPointeeQuals.removeObjCGCAttr();
DestPointeeQuals.removeObjCLifetime();
SrcPointeeQuals.removeObjCGCAttr();
SrcPointeeQuals.removeObjCLifetime();
if (DestPointeeQuals != SrcPointeeQuals &&
!DestPointeeQuals.compatiblyIncludes(SrcPointeeQuals)) {
msg = diag::err_bad_cxx_cast_qualifiers_away;
return TC_Failed;
}
}
Kind = CK_BitCast;
return TC_Success;
}
// Microsoft permits static_cast from 'pointer-to-void' to
// 'pointer-to-function'.
if (!CStyle && Self.getLangOpts().MSVCCompat &&
DestPointee->isFunctionType()) {
Self.Diag(OpRange.getBegin(), diag::ext_ms_cast_fn_obj) << OpRange;
Kind = CK_BitCast;
return TC_Success;
}
}
else if (DestType->isObjCObjectPointerType()) {
// allow both c-style cast and static_cast of objective-c pointers as
// they are pervasive.
Kind = CK_CPointerToObjCPointerCast;
return TC_Success;
}
else if (CStyle && DestType->isBlockPointerType()) {
// allow c-style cast of void * to block pointers.
Kind = CK_AnyPointerToBlockPointerCast;
return TC_Success;
}
}
}
// Allow arbitray objective-c pointer conversion with static casts.
if (SrcType->isObjCObjectPointerType() &&
DestType->isObjCObjectPointerType()) {
Kind = CK_BitCast;
return TC_Success;
}
// Allow ns-pointer to cf-pointer conversion in either direction
// with static casts.
if (!CStyle &&
Self.CheckTollFreeBridgeStaticCast(DestType, SrcExpr.get(), Kind))
return TC_Success;
// See if it looks like the user is trying to convert between
// related record types, and select a better diagnostic if so.
if (auto SrcPointer = SrcType->getAs<PointerType>())
if (auto DestPointer = DestType->getAs<PointerType>())
if (SrcPointer->getPointeeType()->getAs<RecordType>() &&
DestPointer->getPointeeType()->getAs<RecordType>())
msg = diag::err_bad_cxx_cast_unrelated_class;
// HLSL Change Starts
if (Self.getLangOpts().HLSL) {
const bool SuppressDiagFalse = false;
if (hlsl::TryStaticCastForHLSL(&Self, SrcExpr, DestType, CCK, OpRange, msg,
Kind, BasePath, ListInitialization,
SuppressDiagFalse, nullptr)) {
return TC_Success;
}
}
// HLSL Change Ends
// We tried everything. Everything! Nothing works! :-(
return TC_NotApplicable;
}
/// Tests whether a conversion according to N2844 is valid.
TryCastResult
TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
bool CStyle, CastKind &Kind, CXXCastPath &BasePath,
unsigned &msg) {
// C++11 [expr.static.cast]p3:
// A glvalue of type "cv1 T1" can be cast to type "rvalue reference to
// cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
const RValueReferenceType *R = DestType->getAs<RValueReferenceType>();
if (!R)
return TC_NotApplicable;
if (!SrcExpr->isGLValue())
return TC_NotApplicable;
// Because we try the reference downcast before this function, from now on
// this is the only cast possibility, so we issue an error if we fail now.
// FIXME: Should allow casting away constness if CStyle.
bool DerivedToBase;
bool ObjCConversion;
bool ObjCLifetimeConversion;
QualType FromType = SrcExpr->getType();
QualType ToType = R->getPointeeType();
if (CStyle) {
FromType = FromType.getUnqualifiedType();
ToType = ToType.getUnqualifiedType();
}
if (Self.CompareReferenceRelationship(SrcExpr->getLocStart(),
ToType, FromType,
DerivedToBase, ObjCConversion,
ObjCLifetimeConversion)
< Sema::Ref_Compatible_With_Added_Qualification) {
if (CStyle)
return TC_NotApplicable;
msg = diag::err_bad_lvalue_to_rvalue_cast;
return TC_Failed;
}
if (DerivedToBase) {
Kind = CK_DerivedToBase;
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
if (!Self.IsDerivedFrom(SrcExpr->getType(), R->getPointeeType(), Paths))
return TC_NotApplicable;
Self.BuildBasePathArray(Paths, BasePath);
} else
Kind = CK_NoOp;
return TC_Success;
}
/// Tests whether a conversion according to C++ 5.2.9p5 is valid.
TryCastResult
TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr, QualType DestType,
bool CStyle, const SourceRange &OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath) {
// C++ 5.2.9p5: An lvalue of type "cv1 B", where B is a class type, can be
// cast to type "reference to cv2 D", where D is a class derived from B,
// if a valid standard conversion from "pointer to D" to "pointer to B"
// exists, cv2 >= cv1, and B is not a virtual base class of D.
// In addition, DR54 clarifies that the base must be accessible in the
// current context. Although the wording of DR54 only applies to the pointer
// variant of this rule, the intent is clearly for it to apply to the this
// conversion as well.
const ReferenceType *DestReference = DestType->getAs<ReferenceType>();
if (!DestReference) {
return TC_NotApplicable;
}
bool RValueRef = DestReference->isRValueReferenceType();
if (!RValueRef && !SrcExpr->isLValue()) {
// We know the left side is an lvalue reference, so we can suggest a reason.
msg = diag::err_bad_cxx_cast_rvalue;
return TC_NotApplicable;
}
QualType DestPointee = DestReference->getPointeeType();
// FIXME: If the source is a prvalue, we should issue a warning (because the
// cast always has undefined behavior), and for AST consistency, we should
// materialize a temporary.
return TryStaticDowncast(Self,
Self.Context.getCanonicalType(SrcExpr->getType()),
Self.Context.getCanonicalType(DestPointee), CStyle,
OpRange, SrcExpr->getType(), DestType, msg, Kind,
BasePath);
}
/// Tests whether a conversion according to C++ 5.2.9p8 is valid.
TryCastResult
TryStaticPointerDowncast(Sema &Self, QualType SrcType, QualType DestType,
bool CStyle, const SourceRange &OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath) {
// C++ 5.2.9p8: An rvalue of type "pointer to cv1 B", where B is a class
// type, can be converted to an rvalue of type "pointer to cv2 D", where D
// is a class derived from B, if a valid standard conversion from "pointer
// to D" to "pointer to B" exists, cv2 >= cv1, and B is not a virtual base
// class of D.
// In addition, DR54 clarifies that the base must be accessible in the
// current context.
const PointerType *DestPointer = DestType->getAs<PointerType>();
if (!DestPointer) {
return TC_NotApplicable;
}
const PointerType *SrcPointer = SrcType->getAs<PointerType>();
if (!SrcPointer) {
msg = diag::err_bad_static_cast_pointer_nonpointer;
return TC_NotApplicable;
}
return TryStaticDowncast(Self,
Self.Context.getCanonicalType(SrcPointer->getPointeeType()),
Self.Context.getCanonicalType(DestPointer->getPointeeType()),
CStyle, OpRange, SrcType, DestType, msg, Kind,
BasePath);
}
/// TryStaticDowncast - Common functionality of TryStaticReferenceDowncast and
/// TryStaticPointerDowncast. Tests whether a static downcast from SrcType to
/// DestType is possible and allowed.
TryCastResult
TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
bool CStyle, const SourceRange &OpRange, QualType OrigSrcType,
QualType OrigDestType, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath) {
// We can only work with complete types. But don't complain if it doesn't work
if (Self.RequireCompleteType(OpRange.getBegin(), SrcType, 0) ||
Self.RequireCompleteType(OpRange.getBegin(), DestType, 0))
return TC_NotApplicable;
// Downcast can only happen in class hierarchies, so we need classes.
if (!DestType->getAs<RecordType>() || !SrcType->getAs<RecordType>()) {
return TC_NotApplicable;
}
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
if (!Self.IsDerivedFrom(DestType, SrcType, Paths)) {
return TC_NotApplicable;
}
// Target type does derive from source type. Now we're serious. If an error
// appears now, it's not ignored.
// This may not be entirely in line with the standard. Take for example:
// struct A {};
// struct B : virtual A {
// B(A&);
// };
//
// void f()
// {
// (void)static_cast<const B&>(*((A*)0));
// }
// As far as the standard is concerned, p5 does not apply (A is virtual), so
// p2 should be used instead - "const B& t(*((A*)0));" is perfectly valid.
// However, both GCC and Comeau reject this example, and accepting it would
// mean more complex code if we're to preserve the nice error message.
// FIXME: Being 100% compliant here would be nice to have.
// Must preserve cv, as always, unless we're in C-style mode.
if (!CStyle && !DestType.isAtLeastAsQualifiedAs(SrcType)) {
msg = diag::err_bad_cxx_cast_qualifiers_away;
return TC_Failed;
}
if (Paths.isAmbiguous(SrcType.getUnqualifiedType())) {
// This code is analoguous to that in CheckDerivedToBaseConversion, except
// that it builds the paths in reverse order.
// To sum up: record all paths to the base and build a nice string from
// them. Use it to spice up the error message.
if (!Paths.isRecordingPaths()) {
Paths.clear();
Paths.setRecordingPaths(true);
Self.IsDerivedFrom(DestType, SrcType, Paths);
}
std::string PathDisplayStr;
std::set<unsigned> DisplayedPaths;
for (CXXBasePaths::paths_iterator PI = Paths.begin(), PE = Paths.end();
PI != PE; ++PI) {
if (DisplayedPaths.insert(PI->back().SubobjectNumber).second) {
// We haven't displayed a path to this particular base
// class subobject yet.
PathDisplayStr += "\n ";
for (CXXBasePath::const_reverse_iterator EI = PI->rbegin(),
EE = PI->rend();
EI != EE; ++EI)
PathDisplayStr += EI->Base->getType().getAsString() + " -> ";
PathDisplayStr += QualType(DestType).getAsString();
}
}
Self.Diag(OpRange.getBegin(), diag::err_ambiguous_base_to_derived_cast)
<< QualType(SrcType).getUnqualifiedType()
<< QualType(DestType).getUnqualifiedType()
<< PathDisplayStr << OpRange;
msg = 0;
return TC_Failed;
}
if (Paths.getDetectedVirtual() != nullptr) {
QualType VirtualBase(Paths.getDetectedVirtual(), 0);
Self.Diag(OpRange.getBegin(), diag::err_static_downcast_via_virtual)
<< OrigSrcType << OrigDestType << VirtualBase << OpRange;
msg = 0;
return TC_Failed;
}
if (!CStyle) {
switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
SrcType, DestType,
Paths.front(),
diag::err_downcast_from_inaccessible_base)) {
case Sema::AR_accessible:
case Sema::AR_delayed: // be optimistic
case Sema::AR_dependent: // be optimistic
break;
case Sema::AR_inaccessible:
msg = 0;
return TC_Failed;
}
}
Self.BuildBasePathArray(Paths, BasePath);
Kind = CK_BaseToDerived;
return TC_Success;
}
/// TryStaticMemberPointerUpcast - Tests whether a conversion according to
/// C++ 5.2.9p9 is valid:
///
/// An rvalue of type "pointer to member of D of type cv1 T" can be
/// converted to an rvalue of type "pointer to member of B of type cv2 T",
/// where B is a base class of D [...].
///
TryCastResult
TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath) {
const MemberPointerType *DestMemPtr = DestType->getAs<MemberPointerType>();
if (!DestMemPtr)
return TC_NotApplicable;
bool WasOverloadedFunction = false;
DeclAccessPair FoundOverload;
if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
if (FunctionDecl *Fn
= Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(), DestType, false,
FoundOverload)) {
CXXMethodDecl *M = cast<CXXMethodDecl>(Fn);
SrcType = Self.Context.getMemberPointerType(Fn->getType(),
Self.Context.getTypeDeclType(M->getParent()).getTypePtr());
WasOverloadedFunction = true;
}
}
const MemberPointerType *SrcMemPtr = SrcType->getAs<MemberPointerType>();
if (!SrcMemPtr) {
msg = diag::err_bad_static_cast_member_pointer_nonmp;
return TC_NotApplicable;
}
// T == T, modulo cv
if (!Self.Context.hasSameUnqualifiedType(SrcMemPtr->getPointeeType(),
DestMemPtr->getPointeeType()))
return TC_NotApplicable;
// B base of D
QualType SrcClass(SrcMemPtr->getClass(), 0);
QualType DestClass(DestMemPtr->getClass(), 0);
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
if (Self.RequireCompleteType(OpRange.getBegin(), SrcClass, 0) ||
!Self.IsDerivedFrom(SrcClass, DestClass, Paths)) {
return TC_NotApplicable;
}
// B is a base of D. But is it an allowed base? If not, it's a hard error.
if (Paths.isAmbiguous(Self.Context.getCanonicalType(DestClass))) {
Paths.clear();
Paths.setRecordingPaths(true);
bool StillOkay = Self.IsDerivedFrom(SrcClass, DestClass, Paths);
assert(StillOkay);
(void)StillOkay;
std::string PathDisplayStr = Self.getAmbiguousPathsDisplayString(Paths);
Self.Diag(OpRange.getBegin(), diag::err_ambiguous_memptr_conv)
<< 1 << SrcClass << DestClass << PathDisplayStr << OpRange;
msg = 0;
return TC_Failed;
}
if (const RecordType *VBase = Paths.getDetectedVirtual()) {
Self.Diag(OpRange.getBegin(), diag::err_memptr_conv_via_virtual)
<< SrcClass << DestClass << QualType(VBase, 0) << OpRange;
msg = 0;
return TC_Failed;
}
if (!CStyle) {
switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
DestClass, SrcClass,
Paths.front(),
diag::err_upcast_to_inaccessible_base)) {
case Sema::AR_accessible:
case Sema::AR_delayed:
case Sema::AR_dependent:
// Optimistically assume that the delayed and dependent cases
// will work out.
break;
case Sema::AR_inaccessible:
msg = 0;
return TC_Failed;
}
}
if (WasOverloadedFunction) {
// Resolve the address of the overloaded function again, this time
// allowing complaints if something goes wrong.
FunctionDecl *Fn = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(),
DestType,
true,
FoundOverload);
if (!Fn) {
msg = 0;
return TC_Failed;
}
SrcExpr = Self.FixOverloadedFunctionReference(SrcExpr, FoundOverload, Fn);
if (!SrcExpr.isUsable()) {
msg = 0;
return TC_Failed;
}
}
Self.BuildBasePathArray(Paths, BasePath);
Kind = CK_DerivedToBaseMemberPointer;
return TC_Success;
}
/// TryStaticImplicitCast - Tests whether a conversion according to C++ 5.2.9p2
/// is valid:
///
/// An expression e can be explicitly converted to a type T using a
/// @c static_cast if the declaration "T t(e);" is well-formed [...].
TryCastResult
TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange, unsigned &msg,
CastKind &Kind, bool ListInitialization) {
if (DestType->isRecordType()) {
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
diag::err_bad_dynamic_cast_incomplete) ||
Self.RequireNonAbstractType(OpRange.getBegin(), DestType,
diag::err_allocation_of_abstract_type)) {
msg = 0;
return TC_Failed;
}
} else if (DestType->isMemberPointerType()) {
if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
Self.RequireCompleteType(OpRange.getBegin(), DestType, 0);
}
}
InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
InitializationKind InitKind
= (CCK == Sema::CCK_CStyleCast)
? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange,
ListInitialization)
: (CCK == Sema::CCK_FunctionalCast)
? InitializationKind::CreateFunctionalCast(OpRange, ListInitialization)
: InitializationKind::CreateCast(OpRange);
Expr *SrcExprRaw = SrcExpr.get();
InitializationSequence InitSeq(Self, Entity, InitKind, SrcExprRaw);
// At this point of CheckStaticCast, if the destination is a reference,
// or the expression is an overload expression this has to work.
// There is no other way that works.
// On the other hand, if we're checking a C-style cast, we've still got
// the reinterpret_cast way.
bool CStyle
= (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType()))
return TC_NotApplicable;
ExprResult Result = InitSeq.Perform(Self, Entity, InitKind, SrcExprRaw);
if (Result.isInvalid()) {
msg = 0;
return TC_Failed;
}
if (InitSeq.isConstructorInitialization())
Kind = CK_ConstructorConversion;
else
Kind = CK_NoOp;
SrcExpr = Result;
return TC_Success;
}
/// TryConstCast - See if a const_cast from source to destination is allowed,
/// and perform it if it is.
static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
unsigned &msg) {
DestType = Self.Context.getCanonicalType(DestType);
QualType SrcType = SrcExpr.get()->getType();
bool NeedToMaterializeTemporary = false;
if (const ReferenceType *DestTypeTmp =DestType->getAs<ReferenceType>()) {
// C++11 5.2.11p4:
// if a pointer to T1 can be explicitly converted to the type "pointer to
// T2" using a const_cast, then the following conversions can also be
// made:
// -- an lvalue of type T1 can be explicitly converted to an lvalue of
// type T2 using the cast const_cast<T2&>;
// -- a glvalue of type T1 can be explicitly converted to an xvalue of
// type T2 using the cast const_cast<T2&&>; and
// -- if T1 is a class type, a prvalue of type T1 can be explicitly
// converted to an xvalue of type T2 using the cast const_cast<T2&&>.
if (isa<LValueReferenceType>(DestTypeTmp) && !SrcExpr.get()->isLValue()) {
// Cannot const_cast non-lvalue to lvalue reference type. But if this
// is C-style, static_cast might find a way, so we simply suggest a
// message and tell the parent to keep searching.
msg = diag::err_bad_cxx_cast_rvalue;
return TC_NotApplicable;
}
if (isa<RValueReferenceType>(DestTypeTmp) && SrcExpr.get()->isRValue()) {
if (!SrcType->isRecordType()) {
// Cannot const_cast non-class prvalue to rvalue reference type. But if
// this is C-style, static_cast can do this.
msg = diag::err_bad_cxx_cast_rvalue;
return TC_NotApplicable;
}
// Materialize the class prvalue so that the const_cast can bind a
// reference to it.
NeedToMaterializeTemporary = true;
}
// It's not completely clear under the standard whether we can
// const_cast bit-field gl-values. Doing so would not be
// intrinsically complicated, but for now, we say no for
// consistency with other compilers and await the word of the
// committee.
if (SrcExpr.get()->refersToBitField()) {
msg = diag::err_bad_cxx_cast_bitfield;
return TC_NotApplicable;
}
DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
SrcType = Self.Context.getPointerType(SrcType);
}
// C++ 5.2.11p5: For a const_cast involving pointers to data members [...]
// the rules for const_cast are the same as those used for pointers.
if (!DestType->isPointerType() &&
!DestType->isMemberPointerType() &&
!DestType->isObjCObjectPointerType()) {
// Cannot cast to non-pointer, non-reference type. Note that, if DestType
// was a reference type, we converted it to a pointer above.
// The status of rvalue references isn't entirely clear, but it looks like
// conversion to them is simply invalid.
// C++ 5.2.11p3: For two pointer types [...]
if (!CStyle)
msg = diag::err_bad_const_cast_dest;
return TC_NotApplicable;
}
if (DestType->isFunctionPointerType() ||
DestType->isMemberFunctionPointerType()) {
// Cannot cast direct function pointers.
// C++ 5.2.11p2: [...] where T is any object type or the void type [...]
// T is the ultimate pointee of source and target type.
if (!CStyle)
msg = diag::err_bad_const_cast_dest;
return TC_NotApplicable;
}
SrcType = Self.Context.getCanonicalType(SrcType);
// Unwrap the pointers. Ignore qualifiers. Terminate early if the types are
// completely equal.
// C++ 5.2.11p3 describes the core semantics of const_cast. All cv specifiers
// in multi-level pointers may change, but the level count must be the same,
// as must be the final pointee type.
while (SrcType != DestType &&
Self.Context.UnwrapSimilarPointerTypes(SrcType, DestType)) {
Qualifiers SrcQuals, DestQuals;
SrcType = Self.Context.getUnqualifiedArrayType(SrcType, SrcQuals);
DestType = Self.Context.getUnqualifiedArrayType(DestType, DestQuals);
// const_cast is permitted to strip cvr-qualifiers, only. Make sure that
// the other qualifiers (e.g., address spaces) are identical.
SrcQuals.removeCVRQualifiers();
DestQuals.removeCVRQualifiers();
if (SrcQuals != DestQuals)
return TC_NotApplicable;
}
// Since we're dealing in canonical types, the remainder must be the same.
if (SrcType != DestType)
return TC_NotApplicable;
if (NeedToMaterializeTemporary)
// This is a const_cast from a class prvalue to an rvalue reference type.
// Materialize a temporary to store the result of the conversion.
SrcExpr = new (Self.Context) MaterializeTemporaryExpr(
SrcType, SrcExpr.get(), /*IsLValueReference*/ false);
return TC_Success;
}
// Checks for undefined behavior in reinterpret_cast.
// The cases that is checked for is:
// *reinterpret_cast<T*>(&a)
// reinterpret_cast<T&>(a)
// where accessing 'a' as type 'T' will result in undefined behavior.
void Sema::CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference,
SourceRange Range) {
unsigned DiagID = IsDereference ?
diag::warn_pointer_indirection_from_incompatible_type :
diag::warn_undefined_reinterpret_cast;
if (Diags.isIgnored(DiagID, Range.getBegin()))
return;
QualType SrcTy, DestTy;
if (IsDereference) {
if (!SrcType->getAs<PointerType>() || !DestType->getAs<PointerType>()) {
return;
}
SrcTy = SrcType->getPointeeType();
DestTy = DestType->getPointeeType();
} else {
if (!DestType->getAs<ReferenceType>()) {
return;
}
SrcTy = SrcType;
DestTy = DestType->getPointeeType();
}
// Cast is compatible if the types are the same.
if (Context.hasSameUnqualifiedType(DestTy, SrcTy)) {
return;
}
// or one of the types is a char or void type
if (DestTy->isAnyCharacterType() || DestTy->isVoidType() ||
SrcTy->isAnyCharacterType() || SrcTy->isVoidType()) {
return;
}
// or one of the types is a tag type.
if (SrcTy->getAs<TagType>() || DestTy->getAs<TagType>()) {
return;
}
// FIXME: Scoped enums?
if ((SrcTy->isUnsignedIntegerType() && DestTy->isSignedIntegerType()) ||
(SrcTy->isSignedIntegerType() && DestTy->isUnsignedIntegerType())) {
if (Context.getTypeSize(DestTy) == Context.getTypeSize(SrcTy)) {
return;
}
}
Diag(Range.getBegin(), DiagID) << SrcType << DestType << Range;
}
static void DiagnoseCastOfObjCSEL(Sema &Self, const ExprResult &SrcExpr,
QualType DestType) {
QualType SrcType = SrcExpr.get()->getType();
if (Self.Context.hasSameType(SrcType, DestType))
return;
if (const PointerType *SrcPtrTy = SrcType->getAs<PointerType>())
if (SrcPtrTy->isObjCSelType()) {
QualType DT = DestType;
if (isa<PointerType>(DestType))
DT = DestType->getPointeeType();
if (!DT.getUnqualifiedType()->isVoidType())
Self.Diag(SrcExpr.get()->getExprLoc(),
diag::warn_cast_pointer_from_sel)
<< SrcType << DestType << SrcExpr.get()->getSourceRange();
}
}
static void checkIntToPointerCast(bool CStyle, SourceLocation Loc,
const Expr *SrcExpr, QualType DestType,
Sema &Self) {
QualType SrcType = SrcExpr->getType();
// Not warning on reinterpret_cast, boolean, constant expressions, etc
// are not explicit design choices, but consistent with GCC's behavior.
// Feel free to modify them if you've reason/evidence for an alternative.
if (CStyle && SrcType->isIntegralType(Self.Context)
&& !SrcType->isBooleanType()
&& !SrcType->isEnumeralType()
&& !SrcExpr->isIntegerConstantExpr(Self.Context)
&& Self.Context.getTypeSize(DestType) >
Self.Context.getTypeSize(SrcType)) {
// Separate between casts to void* and non-void* pointers.
// Some APIs use (abuse) void* for something like a user context,
// and often that value is an integer even if it isn't a pointer itself.
// Having a separate warning flag allows users to control the warning
// for their workflow.
unsigned Diag = DestType->isVoidPointerType() ?
diag::warn_int_to_void_pointer_cast
: diag::warn_int_to_pointer_cast;
Self.Diag(Loc, Diag) << SrcType << DestType;
}
}
static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
const SourceRange &OpRange,
unsigned &msg,
CastKind &Kind) {
bool IsLValueCast = false;
DestType = Self.Context.getCanonicalType(DestType);
QualType SrcType = SrcExpr.get()->getType();
// Is the source an overloaded name? (i.e. &foo)
// If so, reinterpret_cast can not help us here (13.4, p1, bullet 5) ...
if (SrcType == Self.Context.OverloadTy) {
// ... unless foo<int> resolves to an lvalue unambiguously.
// TODO: what if this fails because of DiagnoseUseOfDecl or something
// like it?
ExprResult SingleFunctionExpr = SrcExpr;
if (Self.ResolveAndFixSingleFunctionTemplateSpecialization(
SingleFunctionExpr,
Expr::getValueKindForType(DestType) == VK_RValue // Convert Fun to Ptr
) && SingleFunctionExpr.isUsable()) {
SrcExpr = SingleFunctionExpr;
SrcType = SrcExpr.get()->getType();
} else {
return TC_NotApplicable;
}
}
if (const ReferenceType *DestTypeTmp = DestType->getAs<ReferenceType>()) {
if (!SrcExpr.get()->isGLValue()) {
// Cannot cast non-glvalue to (lvalue or rvalue) reference type. See the
// similar comment in const_cast.
msg = diag::err_bad_cxx_cast_rvalue;
return TC_NotApplicable;
}
if (!CStyle) {
Self.CheckCompatibleReinterpretCast(SrcType, DestType,
/*isDereference=*/false, OpRange);
}
// C++ 5.2.10p10: [...] a reference cast reinterpret_cast<T&>(x) has the
// same effect as the conversion *reinterpret_cast<T*>(&x) with the
// built-in & and * operators.
const char *inappropriate = nullptr;
switch (SrcExpr.get()->getObjectKind()) {
case OK_Ordinary:
break;
case OK_BitField: inappropriate = "bit-field"; break;
case OK_VectorComponent: inappropriate = "vector element"; break;
case OK_ObjCProperty: inappropriate = "property expression"; break;
case OK_ObjCSubscript: inappropriate = "container subscripting expression";
break;
}
if (inappropriate) {
Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_reference)
<< inappropriate << DestType
<< OpRange << SrcExpr.get()->getSourceRange();
msg = 0; SrcExpr = ExprError();
return TC_NotApplicable;
}
// This code does this transformation for the checked types.
DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
SrcType = Self.Context.getPointerType(SrcType);
IsLValueCast = true;
}
// Canonicalize source for comparison.
SrcType = Self.Context.getCanonicalType(SrcType);
const MemberPointerType *DestMemPtr = DestType->getAs<MemberPointerType>(),
*SrcMemPtr = SrcType->getAs<MemberPointerType>();
if (DestMemPtr && SrcMemPtr) {
// C++ 5.2.10p9: An rvalue of type "pointer to member of X of type T1"
// can be explicitly converted to an rvalue of type "pointer to member
// of Y of type T2" if T1 and T2 are both function types or both object
// types.
if (DestMemPtr->isMemberFunctionPointer() !=
SrcMemPtr->isMemberFunctionPointer())
return TC_NotApplicable;
// C++ 5.2.10p2: The reinterpret_cast operator shall not cast away
// constness.
// A reinterpret_cast followed by a const_cast can, though, so in C-style,
// we accept it.
if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
/*CheckObjCLifetime=*/CStyle)) {
msg = diag::err_bad_cxx_cast_qualifiers_away;
return TC_Failed;
}
if (Self.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// We need to determine the inheritance model that the class will use if
// haven't yet.
Self.RequireCompleteType(OpRange.getBegin(), SrcType, 0);
Self.RequireCompleteType(OpRange.getBegin(), DestType, 0);
}
// Don't allow casting between member pointers of different sizes.
if (Self.Context.getTypeSize(DestMemPtr) !=
Self.Context.getTypeSize(SrcMemPtr)) {
msg = diag::err_bad_cxx_cast_member_pointer_size;
return TC_Failed;
}
// A valid member pointer cast.
assert(!IsLValueCast);
Kind = CK_ReinterpretMemberPointer;
return TC_Success;
}
// See below for the enumeral issue.
if (SrcType->isNullPtrType() && DestType->isIntegralType(Self.Context)) {
// C++0x 5.2.10p4: A pointer can be explicitly converted to any integral
// type large enough to hold it. A value of std::nullptr_t can be
// converted to an integral type; the conversion has the same meaning
// and validity as a conversion of (void*)0 to the integral type.
if (Self.Context.getTypeSize(SrcType) >
Self.Context.getTypeSize(DestType)) {
msg = diag::err_bad_reinterpret_cast_small_int;
return TC_Failed;
}
Kind = CK_PointerToIntegral;
return TC_Success;
}
bool destIsVector = DestType->isVectorType();
bool srcIsVector = SrcType->isVectorType();
if (srcIsVector || destIsVector) {
// FIXME: Should this also apply to floating point types?
bool srcIsScalar = SrcType->isIntegralType(Self.Context);
bool destIsScalar = DestType->isIntegralType(Self.Context);
// Check if this is a cast between a vector and something else.
if (!(srcIsScalar && destIsVector) && !(srcIsVector && destIsScalar) &&
!(srcIsVector && destIsVector))
return TC_NotApplicable;
// If both types have the same size, we can successfully cast.
if (Self.Context.getTypeSize(SrcType)
== Self.Context.getTypeSize(DestType)) {
Kind = CK_BitCast;
return TC_Success;
}
if (destIsScalar)
msg = diag::err_bad_cxx_cast_vector_to_scalar_different_size;
else if (srcIsScalar)
msg = diag::err_bad_cxx_cast_scalar_to_vector_different_size;
else
msg = diag::err_bad_cxx_cast_vector_to_vector_different_size;
return TC_Failed;
}
if (SrcType == DestType) {
// C++ 5.2.10p2 has a note that mentions that, subject to all other
// restrictions, a cast to the same type is allowed so long as it does not
// cast away constness. In C++98, the intent was not entirely clear here,
// since all other paragraphs explicitly forbid casts to the same type.
// C++11 clarifies this case with p2.
//
// The only allowed types are: integral, enumeration, pointer, or
// pointer-to-member types. We also won't restrict Obj-C pointers either.
Kind = CK_NoOp;
TryCastResult Result = TC_NotApplicable;
if (SrcType->isIntegralOrEnumerationType() ||
SrcType->isAnyPointerType() ||
SrcType->isMemberPointerType() ||
SrcType->isBlockPointerType()) {
Result = TC_Success;
}
return Result;
}
bool destIsPtr = DestType->isAnyPointerType() ||
DestType->isBlockPointerType();
bool srcIsPtr = SrcType->isAnyPointerType() ||
SrcType->isBlockPointerType();
if (!destIsPtr && !srcIsPtr) {
// Except for std::nullptr_t->integer and lvalue->reference, which are
// handled above, at least one of the two arguments must be a pointer.
return TC_NotApplicable;
}
if (DestType->isIntegralType(Self.Context)) {
assert(srcIsPtr && "One type must be a pointer");
// C++ 5.2.10p4: A pointer can be explicitly converted to any integral
// type large enough to hold it; except in Microsoft mode, where the
// integral type size doesn't matter (except we don't allow bool).
bool MicrosoftException = Self.getLangOpts().MicrosoftExt &&
!DestType->isBooleanType();
if ((Self.Context.getTypeSize(SrcType) >
Self.Context.getTypeSize(DestType)) &&
!MicrosoftException) {
msg = diag::err_bad_reinterpret_cast_small_int;
return TC_Failed;
}
Kind = CK_PointerToIntegral;
return TC_Success;
}
if (SrcType->isIntegralOrEnumerationType()) {
assert(destIsPtr && "One type must be a pointer");
checkIntToPointerCast(CStyle, OpRange.getBegin(), SrcExpr.get(), DestType,
Self);
// C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
// converted to a pointer.
// C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not
// necessarily converted to a null pointer value.]
Kind = CK_IntegralToPointer;
return TC_Success;
}
if (!destIsPtr || !srcIsPtr) {
// With the valid non-pointer conversions out of the way, we can be even
// more stringent.
return TC_NotApplicable;
}
// C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
// The C-style cast operator can.
if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
/*CheckObjCLifetime=*/CStyle)) {
msg = diag::err_bad_cxx_cast_qualifiers_away;
return TC_Failed;
}
// Cannot convert between block pointers and Objective-C object pointers.
if ((SrcType->isBlockPointerType() && DestType->isObjCObjectPointerType()) ||
(DestType->isBlockPointerType() && SrcType->isObjCObjectPointerType()))
return TC_NotApplicable;
if (IsLValueCast) {
Kind = CK_LValueBitCast;
} else if (DestType->isObjCObjectPointerType()) {
Kind = Self.PrepareCastToObjCObjectPointer(SrcExpr);
} else if (DestType->isBlockPointerType()) {
if (!SrcType->isBlockPointerType()) {
Kind = CK_AnyPointerToBlockPointerCast;
} else {
Kind = CK_BitCast;
}
} else {
Kind = CK_BitCast;
}
// Any pointer can be cast to an Objective-C pointer type with a C-style
// cast.
if (CStyle && DestType->isObjCObjectPointerType()) {
return TC_Success;
}
if (CStyle)
DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
// Not casting away constness, so the only remaining check is for compatible
// pointer categories.
if (SrcType->isFunctionPointerType()) {
if (DestType->isFunctionPointerType()) {
// C++ 5.2.10p6: A pointer to a function can be explicitly converted to
// a pointer to a function of a different type.
return TC_Success;
}
// C++0x 5.2.10p8: Converting a pointer to a function into a pointer to
// an object type or vice versa is conditionally-supported.
// Compilers support it in C++03 too, though, because it's necessary for
// casting the return value of dlsym() and GetProcAddress().
// FIXME: Conditionally-supported behavior should be configurable in the
// TargetInfo or similar.
Self.Diag(OpRange.getBegin(),
Self.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
<< OpRange;
return TC_Success;
}
if (DestType->isFunctionPointerType()) {
// See above.
Self.Diag(OpRange.getBegin(),
Self.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
<< OpRange;
return TC_Success;
}
// C++ 5.2.10p7: A pointer to an object can be explicitly converted to
// a pointer to an object of different type.
// Void pointers are not specified, but supported by every compiler out there.
// So we finish by allowing everything that remains - it's got to be two
// object pointers.
return TC_Success;
}
void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
bool ListInitialization) {
// Handle placeholders.
if (isPlaceholder()) {
// C-style casts can resolve __unknown_any types.
if (claimPlaceholder(BuiltinType::UnknownAny)) {
SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
SrcExpr.get(), Kind,
ValueKind, BasePath);
return;
}
checkNonOverloadPlaceholders();
if (SrcExpr.isInvalid())
return;
}
// C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
// This test is outside everything else because it's the only case where
// a non-lvalue-reference target type does not lead to decay.
if (DestType->isVoidType()) {
Kind = CK_ToVoid;
if (claimPlaceholder(BuiltinType::Overload)) {
Self.ResolveAndFixSingleFunctionTemplateSpecialization(
SrcExpr, /* Decay Function to ptr */ false,
/* Complain */ true, DestRange, DestType,
diag::err_bad_cstyle_cast_overload);
if (SrcExpr.isInvalid())
return;
}
SrcExpr = Self.IgnoredValueConversions(SrcExpr.get());
return;
}
// If the type is dependent, we won't do any other semantic analysis now.
if (DestType->isDependentType() || SrcExpr.get()->isTypeDependent() ||
SrcExpr.get()->isValueDependent()) {
assert(Kind == CK_Dependent);
return;
}
// HLSL Change Starts
// Check for HLSL vector/matrix/array/struct shrinking.
if (ValueKind == VK_RValue &&
!FunctionalStyle &&
!isPlaceholder(BuiltinType::Overload) &&
Self.getLangOpts().HLSL &&
SrcExpr.get()->isLValue() &&
// Cannot use casts on basic type l-values
!SrcExpr.get()->getType().getCanonicalType()->isBuiltinType() &&
hlsl::IsConversionToLessOrEqualElements(&Self, SrcExpr, DestType, true)) {
ValueKind = VK_LValue;
}
// HLSL Change Ends
if (ValueKind == VK_RValue && !DestType->isRecordType() &&
!isPlaceholder(BuiltinType::Overload)) {
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
if (SrcExpr.isInvalid())
return;
}
// AltiVec vector initialization with a single literal.
if (const VectorType *vecTy = DestType->getAs<VectorType>())
if (vecTy->getVectorKind() == VectorType::AltiVecVector
&& (SrcExpr.get()->getType()->isIntegerType()
|| SrcExpr.get()->getType()->isFloatingType())) {
Kind = CK_VectorSplat;
return;
}
// C++ [expr.cast]p5: The conversions performed by
// - a const_cast,
// - a static_cast,
// - a static_cast followed by a const_cast,
// - a reinterpret_cast, or
// - a reinterpret_cast followed by a const_cast,
// can be performed using the cast notation of explicit type conversion.
// [...] If a conversion can be interpreted in more than one of the ways
// listed above, the interpretation that appears first in the list is used,
// even if a cast resulting from that interpretation is ill-formed.
// In plain language, this means trying a const_cast ...
unsigned msg = diag::err_bad_cxx_cast_generic;
TryCastResult tcr = TryConstCast(Self, SrcExpr, DestType,
/*CStyle*/true, msg);
if (SrcExpr.isInvalid())
return;
if (tcr == TC_Success)
Kind = CK_NoOp;
Sema::CheckedConversionKind CCK
= FunctionalStyle? Sema::CCK_FunctionalCast
: Sema::CCK_CStyleCast;
if (tcr == TC_NotApplicable) {
// ... or if that is not possible, a static_cast, ignoring const, ...
tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange,
msg, Kind, BasePath, ListInitialization);
if (SrcExpr.isInvalid())
return;
if (tcr == TC_NotApplicable) {
// ... and finally a reinterpret_cast, ignoring const.
tcr = TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/true,
OpRange, msg, Kind);
if (SrcExpr.isInvalid())
return;
}
}
if (Self.getLangOpts().ObjCAutoRefCount && tcr == TC_Success)
checkObjCARCConversion(CCK);
if (tcr != TC_Success && msg != 0) {
if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
DeclAccessPair Found;
FunctionDecl *Fn = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(),
DestType,
/*Complain*/ true,
Found);
if (Fn) {
// If DestType is a function type (not to be confused with the function
// pointer type), it will be possible to resolve the function address,
// but the type cast should be considered as failure.
OverloadExpr *OE = OverloadExpr::find(SrcExpr.get()).Expression;
Self.Diag(OpRange.getBegin(), diag::err_bad_cstyle_cast_overload)
<< OE->getName() << DestType << OpRange
<< OE->getQualifierLoc().getSourceRange();
Self.NoteAllOverloadCandidates(SrcExpr.get());
}
} else {
diagnoseBadCast(Self, msg, (FunctionalStyle ? CT_Functional : CT_CStyle),
OpRange, SrcExpr.get(), DestType, ListInitialization);
}
} else if (Kind == CK_BitCast) {
checkCastAlign();
}
// Clear out SrcExpr if there was a fatal error.
if (tcr != TC_Success)
SrcExpr = ExprError();
}
/// DiagnoseBadFunctionCast - Warn whenever a function call is cast to a
/// non-matching type. Such as enum function call to int, int call to
/// pointer; etc. Cast to 'void' is an exception.
static void DiagnoseBadFunctionCast(Sema &Self, const ExprResult &SrcExpr,
QualType DestType) {
if (Self.Diags.isIgnored(diag::warn_bad_function_cast,
SrcExpr.get()->getExprLoc()))
return;
if (!isa<CallExpr>(SrcExpr.get()))
return;
QualType SrcType = SrcExpr.get()->getType();
if (DestType.getUnqualifiedType()->isVoidType())
return;
if ((SrcType->isAnyPointerType() || SrcType->isBlockPointerType())
&& (DestType->isAnyPointerType() || DestType->isBlockPointerType()))
return;
if (SrcType->isIntegerType() && DestType->isIntegerType() &&
(SrcType->isBooleanType() == DestType->isBooleanType()) &&
(SrcType->isEnumeralType() == DestType->isEnumeralType()))
return;
if (SrcType->isRealFloatingType() && DestType->isRealFloatingType())
return;
if (SrcType->isEnumeralType() && DestType->isEnumeralType())
return;
if (SrcType->isComplexType() && DestType->isComplexType())
return;
if (SrcType->isComplexIntegerType() && DestType->isComplexIntegerType())
return;
Self.Diag(SrcExpr.get()->getExprLoc(),
diag::warn_bad_function_cast)
<< SrcType << DestType << SrcExpr.get()->getSourceRange();
}
/// Check the semantics of a C-style cast operation, in C.
void CastOperation::CheckCStyleCast() {
assert(!Self.getLangOpts().CPlusPlus);
// C-style casts can resolve __unknown_any types.
if (claimPlaceholder(BuiltinType::UnknownAny)) {
SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
SrcExpr.get(), Kind,
ValueKind, BasePath);
return;
}
// C99 6.5.4p2: the cast type needs to be void or scalar and the expression
// type needs to be scalar.
if (DestType->isVoidType()) {
// We don't necessarily do lvalue-to-rvalue conversions on this.
SrcExpr = Self.IgnoredValueConversions(SrcExpr.get());
if (SrcExpr.isInvalid())
return;
// Cast to void allows any expr type.
Kind = CK_ToVoid;
return;
}
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
if (SrcExpr.isInvalid())
return;
QualType SrcType = SrcExpr.get()->getType();
assert(!SrcType->isPlaceholderType());
// OpenCL v1 s6.5: Casting a pointer to address space A to a pointer to
// address space B is illegal.
if (Self.getLangOpts().OpenCL && DestType->isPointerType() &&
SrcType->isPointerType()) {
const PointerType *DestPtr = DestType->getAs<PointerType>();
if (!DestPtr->isAddressSpaceOverlapping(*SrcType->getAs<PointerType>())) {
Self.Diag(OpRange.getBegin(),
diag::err_typecheck_incompatible_address_space)
<< SrcType << DestType << Sema::AA_Casting
<< SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
}
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
diag::err_typecheck_cast_to_incomplete)) {
SrcExpr = ExprError();
return;
}
if (!DestType->isScalarType() && !DestType->isVectorType()) {
const RecordType *DestRecordTy = DestType->getAs<RecordType>();
if (DestRecordTy && Self.Context.hasSameUnqualifiedType(DestType, SrcType)){
// GCC struct/union extension: allow cast to self.
Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_nonscalar)
<< DestType << SrcExpr.get()->getSourceRange();
Kind = CK_NoOp;
return;
}
// GCC's cast to union extension.
if (DestRecordTy && DestRecordTy->getDecl()->isUnion()) {
RecordDecl *RD = DestRecordTy->getDecl();
RecordDecl::field_iterator Field, FieldEnd;
for (Field = RD->field_begin(), FieldEnd = RD->field_end();
Field != FieldEnd; ++Field) {
if (Self.Context.hasSameUnqualifiedType(Field->getType(), SrcType) &&
!Field->isUnnamedBitfield()) {
Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_to_union)
<< SrcExpr.get()->getSourceRange();
break;
}
}
if (Field == FieldEnd) {
Self.Diag(OpRange.getBegin(), diag::err_typecheck_cast_to_union_no_type)
<< SrcType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
Kind = CK_ToUnion;
return;
}
// Reject any other conversions to non-scalar types.
Self.Diag(OpRange.getBegin(), diag::err_typecheck_cond_expect_scalar)
<< DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
// The type we're casting to is known to be a scalar or vector.
// Require the operand to be a scalar or vector.
if (!SrcType->isScalarType() && !SrcType->isVectorType()) {
Self.Diag(SrcExpr.get()->getExprLoc(),
diag::err_typecheck_expect_scalar_operand)
<< SrcType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
if (DestType->isExtVectorType()) {
SrcExpr = Self.CheckExtVectorCast(OpRange, DestType, SrcExpr.get(), Kind);
return;
}
if (const VectorType *DestVecTy = DestType->getAs<VectorType>()) {
if (DestVecTy->getVectorKind() == VectorType::AltiVecVector &&
(SrcType->isIntegerType() || SrcType->isFloatingType())) {
Kind = CK_VectorSplat;
} else if (Self.CheckVectorCast(OpRange, DestType, SrcType, Kind)) {
SrcExpr = ExprError();
}
return;
}
if (SrcType->isVectorType()) {
if (Self.CheckVectorCast(OpRange, SrcType, DestType, Kind))
SrcExpr = ExprError();
return;
}
// The source and target types are both scalars, i.e.
// - arithmetic types (fundamental, enum, and complex)
// - all kinds of pointers
// Note that member pointers were filtered out with C++, above.
if (isa<ObjCSelectorExpr>(SrcExpr.get())) {
Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_selector_expr);
SrcExpr = ExprError();
return;
}
// If either type is a pointer, the other type has to be either an
// integer or a pointer.
if (!DestType->isArithmeticType()) {
if (!SrcType->isIntegralType(Self.Context) && SrcType->isArithmeticType()) {
Self.Diag(SrcExpr.get()->getExprLoc(),
diag::err_cast_pointer_from_non_pointer_int)
<< SrcType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
checkIntToPointerCast(/* CStyle */ true, OpRange.getBegin(), SrcExpr.get(),
DestType, Self);
} else if (!SrcType->isArithmeticType()) {
if (!DestType->isIntegralType(Self.Context) &&
DestType->isArithmeticType()) {
Self.Diag(SrcExpr.get()->getLocStart(),
diag::err_cast_pointer_to_non_pointer_int)
<< DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
}
if (Self.getLangOpts().OpenCL && !Self.getOpenCLOptions().cl_khr_fp16) {
if (DestType->isHalfType()) {
Self.Diag(SrcExpr.get()->getLocStart(), diag::err_opencl_cast_to_half)
<< DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
}
// ARC imposes extra restrictions on casts.
if (Self.getLangOpts().ObjCAutoRefCount) {
checkObjCARCConversion(Sema::CCK_CStyleCast);
if (SrcExpr.isInvalid())
return;
if (const PointerType *CastPtr = DestType->getAs<PointerType>()) {
if (const PointerType *ExprPtr = SrcType->getAs<PointerType>()) {
Qualifiers CastQuals = CastPtr->getPointeeType().getQualifiers();
Qualifiers ExprQuals = ExprPtr->getPointeeType().getQualifiers();
if (CastPtr->getPointeeType()->isObjCLifetimeType() &&
ExprPtr->getPointeeType()->isObjCLifetimeType() &&
!CastQuals.compatiblyIncludesObjCLifetime(ExprQuals)) {
Self.Diag(SrcExpr.get()->getLocStart(),
diag::err_typecheck_incompatible_ownership)
<< SrcType << DestType << Sema::AA_Casting
<< SrcExpr.get()->getSourceRange();
return;
}
}
}
else if (!Self.CheckObjCARCUnavailableWeakConversion(DestType, SrcType)) {
Self.Diag(SrcExpr.get()->getLocStart(),
diag::err_arc_convesion_of_weak_unavailable)
<< 1 << SrcType << DestType << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
}
DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
DiagnoseBadFunctionCast(Self, SrcExpr, DestType);
Kind = Self.PrepareScalarCast(SrcExpr, DestType);
if (SrcExpr.isInvalid())
return;
if (Kind == CK_BitCast)
checkCastAlign();
// -Wcast-qual
QualType TheOffendingSrcType, TheOffendingDestType;
Qualifiers CastAwayQualifiers;
if (SrcType->isAnyPointerType() && DestType->isAnyPointerType() &&
CastsAwayConstness(Self, SrcType, DestType, true, false,
&TheOffendingSrcType, &TheOffendingDestType,
&CastAwayQualifiers)) {
int qualifiers = -1;
if (CastAwayQualifiers.hasConst() && CastAwayQualifiers.hasVolatile()) {
qualifiers = 0;
} else if (CastAwayQualifiers.hasConst()) {
qualifiers = 1;
} else if (CastAwayQualifiers.hasVolatile()) {
qualifiers = 2;
}
// This is a variant of int **x; const int **y = (const int **)x;
if (qualifiers == -1)
Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual2) <<
SrcType << DestType;
else
Self.Diag(SrcExpr.get()->getLocStart(), diag::warn_cast_qual) <<
TheOffendingSrcType << TheOffendingDestType << qualifiers;
}
}
ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
TypeSourceInfo *CastTypeInfo,
SourceLocation RPLoc,
Expr *CastExpr) {
CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
Op.OpRange = SourceRange(LPLoc, CastExpr->getLocEnd());
if (getLangOpts().CPlusPlus) {
Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ false,
isa<InitListExpr>(CastExpr));
} else {
Op.CheckCStyleCast();
}
if (Op.SrcExpr.isInvalid())
return ExprError();
return Op.complete(CStyleCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
&Op.BasePath, CastTypeInfo, LPLoc, RPLoc));
}
ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
SourceLocation LPLoc,
Expr *CastExpr,
SourceLocation RPLoc) {
assert(LPLoc.isValid() && "List-initialization shouldn't get here.");
CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getLocEnd());
Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
if (Op.SrcExpr.isInvalid())
return ExprError();
if (CXXConstructExpr *ConstructExpr = dyn_cast<CXXConstructExpr>(Op.SrcExpr.get()))
ConstructExpr->setParenOrBraceRange(SourceRange(LPLoc, RPLoc));
return Op.complete(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, CastTypeInfo, Op.Kind,
Op.SrcExpr.get(), &Op.BasePath, LPLoc, RPLoc));
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaStmtAsm.cpp | //===--- SemaStmtAsm.cpp - Semantic Analysis for Asm Statements -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements semantic analysis for inline asm statements.
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
using namespace clang;
using namespace sema;
/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
/// ignore "noop" casts in places where an lvalue is required by an inline asm.
/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
/// provide a strong guidance to not use it.
///
/// This method checks to see if the argument is an acceptable l-value and
/// returns false if it is a case we can handle.
static bool CheckAsmLValue(const Expr *E, Sema &S) {
// Type dependent expressions will be checked during instantiation.
if (E->isTypeDependent())
return false;
if (E->isLValue())
return false; // Cool, this is an lvalue.
// Okay, this is not an lvalue, but perhaps it is the result of a cast that we
// are supposed to allow.
const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
if (E != E2 && E2->isLValue()) {
if (!S.getLangOpts().HeinousExtensions)
S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
<< E->getSourceRange();
else
S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
<< E->getSourceRange();
// Accept, even if we emitted an error diagnostic.
return false;
}
// None of the above, just randomly invalid non-lvalue.
return true;
}
/// isOperandMentioned - Return true if the specified operand # is mentioned
/// anywhere in the decomposed asm string.
static bool isOperandMentioned(unsigned OpNo,
ArrayRef<GCCAsmStmt::AsmStringPiece> AsmStrPieces) {
for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) {
const GCCAsmStmt::AsmStringPiece &Piece = AsmStrPieces[p];
if (!Piece.isOperand()) continue;
// If this is a reference to the input and if the input was the smaller
// one, then we have to reject this asm.
if (Piece.getOperandNo() == OpNo)
return true;
}
return false;
}
static bool CheckNakedParmReference(Expr *E, Sema &S) {
FunctionDecl *Func = dyn_cast<FunctionDecl>(S.CurContext);
if (!Func)
return false;
if (!Func->hasAttr<NakedAttr>())
return false;
SmallVector<Expr*, 4> WorkList;
WorkList.push_back(E);
while (WorkList.size()) {
Expr *E = WorkList.pop_back_val();
if (isa<CXXThisExpr>(E)) {
S.Diag(E->getLocStart(), diag::err_asm_naked_this_ref);
S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
return true;
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
if (isa<ParmVarDecl>(DRE->getDecl())) {
S.Diag(DRE->getLocStart(), diag::err_asm_naked_parm_ref);
S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
return true;
}
}
for (Stmt *Child : E->children()) {
if (Expr *E = dyn_cast_or_null<Expr>(Child))
WorkList.push_back(E);
}
}
return false;
}
StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg constraints, MultiExprArg Exprs,
Expr *asmString, MultiExprArg clobbers,
SourceLocation RParenLoc) {
unsigned NumClobbers = clobbers.size();
StringLiteral **Constraints =
reinterpret_cast<StringLiteral**>(constraints.data());
StringLiteral *AsmString = cast<StringLiteral>(asmString);
StringLiteral **Clobbers = reinterpret_cast<StringLiteral**>(clobbers.data());
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
// The parser verifies that there is a string literal here.
assert(AsmString->isAscii());
bool ValidateConstraints =
DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl());
for (unsigned i = 0; i != NumOutputs; i++) {
StringLiteral *Literal = Constraints[i];
assert(Literal->isAscii());
StringRef OutputName;
if (Names[i])
OutputName = Names[i]->getName();
TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
if (ValidateConstraints &&
!Context.getTargetInfo().validateOutputConstraint(Info))
return StmtError(Diag(Literal->getLocStart(),
diag::err_asm_invalid_output_constraint)
<< Info.getConstraintStr());
ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
if (ER.isInvalid())
return StmtError();
Exprs[i] = ER.get();
// Check that the output exprs are valid lvalues.
Expr *OutputExpr = Exprs[i];
// Referring to parameters is not allowed in naked functions.
if (CheckNakedParmReference(OutputExpr, *this))
return StmtError();
// Bitfield can't be referenced with a pointer.
if (Info.allowsMemory() && OutputExpr->refersToBitField())
return StmtError(Diag(OutputExpr->getLocStart(),
diag::err_asm_bitfield_in_memory_constraint)
<< 1
<< Info.getConstraintStr()
<< OutputExpr->getSourceRange());
OutputConstraintInfos.push_back(Info);
// If this is dependent, just continue.
if (OutputExpr->isTypeDependent())
continue;
Expr::isModifiableLvalueResult IsLV =
OutputExpr->isModifiableLvalue(Context, /*Loc=*/nullptr);
switch (IsLV) {
case Expr::MLV_Valid:
// Cool, this is an lvalue.
break;
case Expr::MLV_ArrayType:
// This is OK too.
break;
case Expr::MLV_LValueCast: {
const Expr *LVal = OutputExpr->IgnoreParenNoopCasts(Context);
if (!getLangOpts().HeinousExtensions) {
Diag(LVal->getLocStart(), diag::err_invalid_asm_cast_lvalue)
<< OutputExpr->getSourceRange();
} else {
Diag(LVal->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
<< OutputExpr->getSourceRange();
}
// Accept, even if we emitted an error diagnostic.
break;
}
case Expr::MLV_IncompleteType:
case Expr::MLV_IncompleteVoidType:
if (RequireCompleteType(OutputExpr->getLocStart(), Exprs[i]->getType(),
diag::err_dereference_incomplete_type))
return StmtError();
LLVM_FALLTHROUGH; // HLSL Change
default:
return StmtError(Diag(OutputExpr->getLocStart(),
diag::err_asm_invalid_lvalue_in_output)
<< OutputExpr->getSourceRange());
}
unsigned Size = Context.getTypeSize(OutputExpr->getType());
if (!Context.getTargetInfo().validateOutputSize(Literal->getString(),
Size))
return StmtError(Diag(OutputExpr->getLocStart(),
diag::err_asm_invalid_output_size)
<< Info.getConstraintStr());
}
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
StringLiteral *Literal = Constraints[i];
assert(Literal->isAscii());
StringRef InputName;
if (Names[i])
InputName = Names[i]->getName();
TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
if (ValidateConstraints &&
!Context.getTargetInfo().validateInputConstraint(
OutputConstraintInfos.data(), NumOutputs, Info)) {
return StmtError(Diag(Literal->getLocStart(),
diag::err_asm_invalid_input_constraint)
<< Info.getConstraintStr());
}
ExprResult ER = CheckPlaceholderExpr(Exprs[i]);
if (ER.isInvalid())
return StmtError();
Exprs[i] = ER.get();
Expr *InputExpr = Exprs[i];
// Referring to parameters is not allowed in naked functions.
if (CheckNakedParmReference(InputExpr, *this))
return StmtError();
// Bitfield can't be referenced with a pointer.
if (Info.allowsMemory() && InputExpr->refersToBitField())
return StmtError(Diag(InputExpr->getLocStart(),
diag::err_asm_bitfield_in_memory_constraint)
<< 0
<< Info.getConstraintStr()
<< InputExpr->getSourceRange());
// Only allow void types for memory constraints.
if (Info.allowsMemory() && !Info.allowsRegister()) {
if (CheckAsmLValue(InputExpr, *this))
return StmtError(Diag(InputExpr->getLocStart(),
diag::err_asm_invalid_lvalue_in_input)
<< Info.getConstraintStr()
<< InputExpr->getSourceRange());
} else if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
if (!InputExpr->isValueDependent()) {
llvm::APSInt Result;
if (!InputExpr->EvaluateAsInt(Result, Context))
return StmtError(
Diag(InputExpr->getLocStart(), diag::err_asm_immediate_expected)
<< Info.getConstraintStr() << InputExpr->getSourceRange());
if (Result.slt(Info.getImmConstantMin()) ||
Result.sgt(Info.getImmConstantMax()))
return StmtError(Diag(InputExpr->getLocStart(),
diag::err_invalid_asm_value_for_constraint)
<< Result.toString(10) << Info.getConstraintStr()
<< InputExpr->getSourceRange());
}
} else {
ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]);
if (Result.isInvalid())
return StmtError();
Exprs[i] = Result.get();
}
if (Info.allowsRegister()) {
if (InputExpr->getType()->isVoidType()) {
return StmtError(Diag(InputExpr->getLocStart(),
diag::err_asm_invalid_type_in_input)
<< InputExpr->getType() << Info.getConstraintStr()
<< InputExpr->getSourceRange());
}
}
InputConstraintInfos.push_back(Info);
const Type *Ty = Exprs[i]->getType().getTypePtr();
if (Ty->isDependentType())
continue;
if (!Ty->isVoidType() || !Info.allowsMemory())
if (RequireCompleteType(InputExpr->getLocStart(), Exprs[i]->getType(),
diag::err_dereference_incomplete_type))
return StmtError();
unsigned Size = Context.getTypeSize(Ty);
if (!Context.getTargetInfo().validateInputSize(Literal->getString(),
Size))
return StmtError(Diag(InputExpr->getLocStart(),
diag::err_asm_invalid_input_size)
<< Info.getConstraintStr());
}
// Check that the clobbers are valid.
for (unsigned i = 0; i != NumClobbers; i++) {
StringLiteral *Literal = Clobbers[i];
assert(Literal->isAscii());
StringRef Clobber = Literal->getString();
if (!Context.getTargetInfo().isValidClobber(Clobber))
return StmtError(Diag(Literal->getLocStart(),
diag::err_asm_unknown_register_name) << Clobber);
}
GCCAsmStmt *NS =
new (Context) GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs,
NumInputs, Names, Constraints, Exprs.data(),
AsmString, NumClobbers, Clobbers, RParenLoc);
// Validate the asm string, ensuring it makes sense given the operands we
// have.
SmallVector<GCCAsmStmt::AsmStringPiece, 8> Pieces;
unsigned DiagOffs;
if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) {
Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
<< AsmString->getSourceRange();
return StmtError();
}
// Validate constraints and modifiers.
for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
GCCAsmStmt::AsmStringPiece &Piece = Pieces[i];
if (!Piece.isOperand()) continue;
// Look for the correct constraint index.
unsigned ConstraintIdx = Piece.getOperandNo();
unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs();
// Look for the (ConstraintIdx - NumOperands + 1)th constraint with
// modifier '+'.
if (ConstraintIdx >= NumOperands) {
unsigned I = 0, E = NS->getNumOutputs();
for (unsigned Cnt = ConstraintIdx - NumOperands; I != E; ++I)
if (OutputConstraintInfos[I].isReadWrite() && Cnt-- == 0) {
ConstraintIdx = I;
break;
}
assert(I != E && "Invalid operand number should have been caught in "
" AnalyzeAsmString");
}
// Now that we have the right indexes go ahead and check.
StringLiteral *Literal = Constraints[ConstraintIdx];
const Type *Ty = Exprs[ConstraintIdx]->getType().getTypePtr();
if (Ty->isDependentType() || Ty->isIncompleteType())
continue;
unsigned Size = Context.getTypeSize(Ty);
std::string SuggestedModifier;
if (!Context.getTargetInfo().validateConstraintModifier(
Literal->getString(), Piece.getModifier(), Size,
SuggestedModifier)) {
Diag(Exprs[ConstraintIdx]->getLocStart(),
diag::warn_asm_mismatched_size_modifier);
if (!SuggestedModifier.empty()) {
auto B = Diag(Piece.getRange().getBegin(),
diag::note_asm_missing_constraint_modifier)
<< SuggestedModifier;
SuggestedModifier = "%" + SuggestedModifier + Piece.getString();
B.AddFixItHint(FixItHint::CreateReplacement(Piece.getRange(),
SuggestedModifier));
}
}
}
// Validate tied input operands for type mismatches.
unsigned NumAlternatives = ~0U;
for (unsigned i = 0, e = OutputConstraintInfos.size(); i != e; ++i) {
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
StringRef ConstraintStr = Info.getConstraintStr();
unsigned AltCount = ConstraintStr.count(',') + 1;
if (NumAlternatives == ~0U)
NumAlternatives = AltCount;
else if (NumAlternatives != AltCount)
return StmtError(Diag(NS->getOutputExpr(i)->getLocStart(),
diag::err_asm_unexpected_constraint_alternatives)
<< NumAlternatives << AltCount);
}
for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
StringRef ConstraintStr = Info.getConstraintStr();
unsigned AltCount = ConstraintStr.count(',') + 1;
if (NumAlternatives == ~0U)
NumAlternatives = AltCount;
else if (NumAlternatives != AltCount)
return StmtError(Diag(NS->getInputExpr(i)->getLocStart(),
diag::err_asm_unexpected_constraint_alternatives)
<< NumAlternatives << AltCount);
// If this is a tied constraint, verify that the output and input have
// either exactly the same type, or that they are int/ptr operands with the
// same size (int/long, int*/long, are ok etc).
if (!Info.hasTiedOperand()) continue;
unsigned TiedTo = Info.getTiedOperand();
unsigned InputOpNo = i+NumOutputs;
Expr *OutputExpr = Exprs[TiedTo];
Expr *InputExpr = Exprs[InputOpNo];
if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent())
continue;
QualType InTy = InputExpr->getType();
QualType OutTy = OutputExpr->getType();
if (Context.hasSameType(InTy, OutTy))
continue; // All types can be tied to themselves.
// Decide if the input and output are in the same domain (integer/ptr or
// floating point.
enum AsmDomain {
AD_Int, AD_FP, AD_Other
} InputDomain, OutputDomain;
if (InTy->isIntegerType() || InTy->isPointerType())
InputDomain = AD_Int;
else if (InTy->isRealFloatingType())
InputDomain = AD_FP;
else
InputDomain = AD_Other;
if (OutTy->isIntegerType() || OutTy->isPointerType())
OutputDomain = AD_Int;
else if (OutTy->isRealFloatingType())
OutputDomain = AD_FP;
else
OutputDomain = AD_Other;
// They are ok if they are the same size and in the same domain. This
// allows tying things like:
// void* to int*
// void* to int if they are the same size.
// double to long double if they are the same size.
//
uint64_t OutSize = Context.getTypeSize(OutTy);
uint64_t InSize = Context.getTypeSize(InTy);
if (OutSize == InSize && InputDomain == OutputDomain &&
InputDomain != AD_Other)
continue;
// If the smaller input/output operand is not mentioned in the asm string,
// then we can promote the smaller one to a larger input and the asm string
// won't notice.
bool SmallerValueMentioned = false;
// If this is a reference to the input and if the input was the smaller
// one, then we have to reject this asm.
if (isOperandMentioned(InputOpNo, Pieces)) {
// This is a use in the asm string of the smaller operand. Since we
// codegen this by promoting to a wider value, the asm will get printed
// "wrong".
SmallerValueMentioned |= InSize < OutSize;
}
if (isOperandMentioned(TiedTo, Pieces)) {
// If this is a reference to the output, and if the output is the larger
// value, then it's ok because we'll promote the input to the larger type.
SmallerValueMentioned |= OutSize < InSize;
}
// If the smaller value wasn't mentioned in the asm string, and if the
// output was a register, just extend the shorter one to the size of the
// larger one.
if (!SmallerValueMentioned && InputDomain != AD_Other &&
OutputConstraintInfos[TiedTo].allowsRegister())
continue;
// Either both of the operands were mentioned or the smaller one was
// mentioned. One more special case that we'll allow: if the tied input is
// integer, unmentioned, and is a constant, then we'll allow truncating it
// down to the size of the destination.
if (InputDomain == AD_Int && OutputDomain == AD_Int &&
!isOperandMentioned(InputOpNo, Pieces) &&
InputExpr->isEvaluatable(Context)) {
CastKind castKind =
(OutTy->isBooleanType() ? CK_IntegralToBoolean : CK_IntegralCast);
InputExpr = ImpCastExprToType(InputExpr, OutTy, castKind).get();
Exprs[InputOpNo] = InputExpr;
NS->setInputExpr(i, InputExpr);
continue;
}
Diag(InputExpr->getLocStart(),
diag::err_asm_tying_incompatible_types)
<< InTy << OutTy << OutputExpr->getSourceRange()
<< InputExpr->getSourceRange();
return StmtError();
}
return NS;
}
ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext) {
Info.clear();
if (IsUnevaluatedContext)
PushExpressionEvaluationContext(UnevaluatedAbstract,
ReuseLambdaContextDecl);
ExprResult Result = ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Id,
/*trailing lparen*/ false,
/*is & operand*/ false,
/*CorrectionCandidateCallback=*/nullptr,
/*IsInlineAsmIdentifier=*/ true);
if (IsUnevaluatedContext)
PopExpressionEvaluationContext();
if (!Result.isUsable()) return Result;
Result = CheckPlaceholderExpr(Result.get());
if (!Result.isUsable()) return Result;
// Referring to parameters is not allowed in naked functions.
if (CheckNakedParmReference(Result.get(), *this))
return ExprError();
QualType T = Result.get()->getType();
// For now, reject dependent types.
if (T->isDependentType()) {
Diag(Id.getLocStart(), diag::err_asm_incomplete_type) << T;
return ExprError();
}
// Any sort of function type is fine.
if (T->isFunctionType()) {
return Result;
}
// Otherwise, it needs to be a complete type.
if (RequireCompleteExprType(Result.get(), diag::err_asm_incomplete_type)) {
return ExprError();
}
// Compute the type size (and array length if applicable?).
Info.Type = Info.Size = Context.getTypeSizeInChars(T).getQuantity();
if (T->isArrayType()) {
const ArrayType *ATy = Context.getAsArrayType(T);
Info.Type = Context.getTypeSizeInChars(ATy->getElementType()).getQuantity();
Info.Length = Info.Size / Info.Type;
}
// We can work with the expression as long as it's not an r-value.
if (!Result.get()->isRValue())
Info.IsVarDecl = true;
return Result;
}
bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc) {
Offset = 0;
LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(),
LookupOrdinaryName);
if (!LookupName(BaseResult, getCurScope()))
return true;
if (!BaseResult.isSingleResult())
return true;
const RecordType *RT = nullptr;
NamedDecl *FoundDecl = BaseResult.getFoundDecl();
if (VarDecl *VD = dyn_cast<VarDecl>(FoundDecl))
RT = VD->getType()->getAs<RecordType>();
else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(FoundDecl)) {
MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
RT = TD->getUnderlyingType()->getAs<RecordType>();
} else if (TypeDecl *TD = dyn_cast<TypeDecl>(FoundDecl))
RT = TD->getTypeForDecl()->getAs<RecordType>();
if (!RT)
return true;
if (RequireCompleteType(AsmLoc, QualType(RT, 0), 0))
return true;
LookupResult FieldResult(*this, &Context.Idents.get(Member), SourceLocation(),
LookupMemberName);
if (!LookupQualifiedName(FieldResult, RT->getDecl()))
return true;
// FIXME: Handle IndirectFieldDecl?
FieldDecl *FD = dyn_cast<FieldDecl>(FieldResult.getFoundDecl());
if (!FD)
return true;
const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl());
unsigned i = FD->getFieldIndex();
CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i));
Offset = (unsigned)Result.getQuantity();
return false;
}
StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc) {
bool IsSimple = (NumOutputs != 0 || NumInputs != 0);
getCurFunction()->setHasBranchProtectedScope();
MSAsmStmt *NS =
new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
/*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs,
Constraints, Exprs, AsmString,
Clobbers, EndLoc);
return NS;
}
LabelDecl *Sema::GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate) {
LabelDecl* Label = LookupOrCreateLabel(PP.getIdentifierInfo(ExternalLabelName),
Location);
if (Label->isMSAsmLabel()) {
// If we have previously created this label implicitly, mark it as used.
Label->markUsed(Context);
} else {
// Otherwise, insert it, but only resolve it if we have seen the label itself.
std::string InternalName;
llvm::raw_string_ostream OS(InternalName);
// Create an internal name for the label. The name should not be a valid mangled
// name, and should be unique. We use a dot to make the name an invalid mangled
// name.
OS << "__MSASMLABEL_." << MSAsmLabelNameCounter++ << "__" << ExternalLabelName;
Label->setMSAsmLabel(OS.str());
}
if (AlwaysCreate) {
// The label might have been created implicitly from a previously encountered
// goto statement. So, for both newly created and looked up labels, we mark
// them as resolved.
Label->setMSAsmLabelResolved();
}
// Adjust their location for being able to generate accurate diagnostics.
Label->setLocation(Location);
return Label;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp | //===------- SemaTemplateInstantiate.cpp - C++ Template Instantiation ------===/
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===/
//
// This file implements C++ template instantiation.
//
//===----------------------------------------------------------------------===/
#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/Support/TimeProfiler.h" // HLSL Change
using namespace clang;
using namespace sema;
//===----------------------------------------------------------------------===/
// Template Instantiation Support
//===----------------------------------------------------------------------===/
/// \brief Retrieve the template argument list(s) that should be used to
/// instantiate the definition of the given declaration.
///
/// \param D the declaration for which we are computing template instantiation
/// arguments.
///
/// \param Innermost if non-NULL, the innermost template argument list.
///
/// \param RelativeToPrimary true if we should get the template
/// arguments relative to the primary template, even when we're
/// dealing with a specialization. This is only relevant for function
/// template specializations.
///
/// \param Pattern If non-NULL, indicates the pattern from which we will be
/// instantiating the definition of the given declaration, \p D. This is
/// used to determine the proper set of template instantiation arguments for
/// friend function template specializations.
MultiLevelTemplateArgumentList
Sema::getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost,
bool RelativeToPrimary,
const FunctionDecl *Pattern) {
// Accumulate the set of template argument lists in this structure.
MultiLevelTemplateArgumentList Result;
if (Innermost)
Result.addOuterTemplateArguments(Innermost);
DeclContext *Ctx = dyn_cast<DeclContext>(D);
if (!Ctx) {
Ctx = D->getDeclContext();
// Add template arguments from a variable template instantiation.
if (VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(D)) {
// We're done when we hit an explicit specialization.
if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
!isa<VarTemplatePartialSpecializationDecl>(Spec))
return Result;
Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
// If this variable template specialization was instantiated from a
// specialized member that is a variable template, we're done.
assert(Spec->getSpecializedTemplate() && "No variable template?");
llvm::PointerUnion<VarTemplateDecl*,
VarTemplatePartialSpecializationDecl*> Specialized
= Spec->getSpecializedTemplateOrPartial();
if (VarTemplatePartialSpecializationDecl *Partial =
Specialized.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
if (Partial->isMemberSpecialization())
return Result;
} else {
VarTemplateDecl *Tmpl = Specialized.get<VarTemplateDecl *>();
if (Tmpl->isMemberSpecialization())
return Result;
}
}
// If we have a template template parameter with translation unit context,
// then we're performing substitution into a default template argument of
// this template template parameter before we've constructed the template
// that will own this template template parameter. In this case, we
// use empty template parameter lists for all of the outer templates
// to avoid performing any substitutions.
if (Ctx->isTranslationUnit()) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(D)) {
for (unsigned I = 0, N = TTP->getDepth() + 1; I != N; ++I)
Result.addOuterTemplateArguments(None);
return Result;
}
}
}
while (!Ctx->isFileContext()) {
// Add template arguments from a class template instantiation.
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Ctx)) {
// We're done when we hit an explicit specialization.
if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
!isa<ClassTemplatePartialSpecializationDecl>(Spec))
break;
Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
// If this class template specialization was instantiated from a
// specialized member that is a class template, we're done.
assert(Spec->getSpecializedTemplate() && "No class template?");
if (Spec->getSpecializedTemplate()->isMemberSpecialization())
break;
}
// Add template arguments from a function template specialization.
else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Ctx)) {
if (!RelativeToPrimary &&
(Function->getTemplateSpecializationKind() ==
TSK_ExplicitSpecialization &&
!Function->getClassScopeSpecializationPattern()))
break;
if (const TemplateArgumentList *TemplateArgs
= Function->getTemplateSpecializationArgs()) {
// Add the template arguments for this specialization.
Result.addOuterTemplateArguments(TemplateArgs);
// If this function was instantiated from a specialized member that is
// a function template, we're done.
assert(Function->getPrimaryTemplate() && "No function template?");
if (Function->getPrimaryTemplate()->isMemberSpecialization())
break;
// If this function is a generic lambda specialization, we are done.
if (isGenericLambdaCallOperatorSpecialization(Function))
break;
} else if (FunctionTemplateDecl *FunTmpl
= Function->getDescribedFunctionTemplate()) {
// Add the "injected" template arguments.
Result.addOuterTemplateArguments(FunTmpl->getInjectedTemplateArgs());
}
// If this is a friend declaration and it declares an entity at
// namespace scope, take arguments from its lexical parent
// instead of its semantic parent, unless of course the pattern we're
// instantiating actually comes from the file's context!
if (Function->getFriendObjectKind() &&
Function->getDeclContext()->isFileContext() &&
(!Pattern || !Pattern->getLexicalDeclContext()->isFileContext())) {
Ctx = Function->getLexicalDeclContext();
RelativeToPrimary = false;
continue;
}
} else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Ctx)) {
if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) {
QualType T = ClassTemplate->getInjectedClassNameSpecialization();
const TemplateSpecializationType *TST =
cast<TemplateSpecializationType>(Context.getCanonicalType(T));
Result.addOuterTemplateArguments(
llvm::makeArrayRef(TST->getArgs(), TST->getNumArgs()));
if (ClassTemplate->isMemberSpecialization())
break;
}
}
Ctx = Ctx->getParent();
RelativeToPrimary = false;
}
return Result;
}
bool Sema::ActiveTemplateInstantiation::isInstantiationRecord() const {
switch (Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
case DefaultTemplateArgumentInstantiation:
case DefaultFunctionArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case PriorTemplateArgumentSubstitution:
return true;
case DefaultTemplateArgumentChecking:
return false;
}
llvm_unreachable("Invalid InstantiationKind!");
}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo *DeductionInfo)
: SemaRef(SemaRef), SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext) {
// Don't allow further instantiation if a fatal error has occcured. Any
// diagnostics we might have raised will not be visible.
if (SemaRef.Diags.hasFatalErrorOccurred()) {
Invalid = true;
return;
}
Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
if (!Invalid) {
ActiveTemplateInstantiation Inst;
Inst.Kind = Kind;
Inst.PointOfInstantiation = PointOfInstantiation;
Inst.Entity = Entity;
Inst.Template = Template;
Inst.TemplateArgs = TemplateArgs.data();
Inst.NumTemplateArgs = TemplateArgs.size();
Inst.DeductionInfo = DeductionInfo;
Inst.InstantiationRange = InstantiationRange;
SemaRef.InNonInstantiationSFINAEContext = false;
SemaRef.ActiveTemplateInstantiations.push_back(Inst);
if (!Inst.isInstantiationRecord())
++SemaRef.NonInstantiationEntries;
}
}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity,
SourceRange InstantiationRange)
: InstantiatingTemplate(SemaRef,
ActiveTemplateInstantiation::TemplateInstantiation,
PointOfInstantiation, InstantiationRange, Entity) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity,
ExceptionSpecification, SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef, ActiveTemplateInstantiation::ExceptionSpecInstantiation,
PointOfInstantiation, InstantiationRange, Entity) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef,
ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation,
PointOfInstantiation, InstantiationRange, Template, nullptr,
TemplateArgs) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
: InstantiatingTemplate(SemaRef, Kind, PointOfInstantiation,
InstantiationRange, FunctionTemplate, nullptr,
TemplateArgs, &DeductionInfo) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef,
ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
PointOfInstantiation, InstantiationRange, PartialSpec, nullptr,
TemplateArgs, &DeductionInfo) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef,
ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
PointOfInstantiation, InstantiationRange, PartialSpec, nullptr,
TemplateArgs, &DeductionInfo) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef,
ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation,
PointOfInstantiation, InstantiationRange, Param, nullptr,
TemplateArgs) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template,
NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef,
ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution,
PointOfInstantiation, InstantiationRange, Param, Template,
TemplateArgs) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template,
TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef,
ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution,
PointOfInstantiation, InstantiationRange, Param, Template,
TemplateArgs) {}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template,
NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange)
: InstantiatingTemplate(
SemaRef, ActiveTemplateInstantiation::DefaultTemplateArgumentChecking,
PointOfInstantiation, InstantiationRange, Param, Template,
TemplateArgs) {}
void Sema::InstantiatingTemplate::Clear() {
if (!Invalid) {
if (!SemaRef.ActiveTemplateInstantiations.back().isInstantiationRecord()) {
assert(SemaRef.NonInstantiationEntries > 0);
--SemaRef.NonInstantiationEntries;
}
SemaRef.InNonInstantiationSFINAEContext
= SavedInNonInstantiationSFINAEContext;
// Name lookup no longer looks in this template's defining module.
assert(SemaRef.ActiveTemplateInstantiations.size() >=
SemaRef.ActiveTemplateInstantiationLookupModules.size() &&
"forgot to remove a lookup module for a template instantiation");
if (SemaRef.ActiveTemplateInstantiations.size() ==
SemaRef.ActiveTemplateInstantiationLookupModules.size()) {
if (Module *M = SemaRef.ActiveTemplateInstantiationLookupModules.back())
SemaRef.LookupModulesCache.erase(M);
SemaRef.ActiveTemplateInstantiationLookupModules.pop_back();
}
SemaRef.ActiveTemplateInstantiations.pop_back();
Invalid = true;
}
}
bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
SourceLocation PointOfInstantiation,
SourceRange InstantiationRange) {
assert(SemaRef.NonInstantiationEntries <=
SemaRef.ActiveTemplateInstantiations.size());
if ((SemaRef.ActiveTemplateInstantiations.size() -
SemaRef.NonInstantiationEntries)
<= SemaRef.getLangOpts().InstantiationDepth)
return false;
SemaRef.Diag(PointOfInstantiation,
diag::err_template_recursion_depth_exceeded)
<< SemaRef.getLangOpts().InstantiationDepth
<< InstantiationRange;
SemaRef.Diag(PointOfInstantiation, diag::note_template_recursion_depth)
<< SemaRef.getLangOpts().InstantiationDepth;
return true;
}
/// \brief Prints the current instantiation stack through a series of
/// notes.
void Sema::PrintInstantiationStack() {
// Determine which template instantiations to skip, if any.
unsigned SkipStart = ActiveTemplateInstantiations.size(), SkipEnd = SkipStart;
unsigned Limit = Diags.getTemplateBacktraceLimit();
if (Limit && Limit < ActiveTemplateInstantiations.size()) {
SkipStart = Limit / 2 + Limit % 2;
SkipEnd = ActiveTemplateInstantiations.size() - Limit / 2;
}
// FIXME: In all of these cases, we need to show the template arguments
unsigned InstantiationIdx = 0;
for (SmallVectorImpl<ActiveTemplateInstantiation>::reverse_iterator
Active = ActiveTemplateInstantiations.rbegin(),
ActiveEnd = ActiveTemplateInstantiations.rend();
Active != ActiveEnd;
++Active, ++InstantiationIdx) {
// Skip this instantiation?
if (InstantiationIdx >= SkipStart && InstantiationIdx < SkipEnd) {
if (InstantiationIdx == SkipStart) {
// Note that we're skipping instantiations.
Diags.Report(Active->PointOfInstantiation,
diag::note_instantiation_contexts_suppressed)
<< unsigned(ActiveTemplateInstantiations.size() - Limit);
}
continue;
}
switch (Active->Kind) {
case ActiveTemplateInstantiation::TemplateInstantiation: {
Decl *D = Active->Entity;
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
unsigned DiagID = diag::note_template_member_class_here;
if (isa<ClassTemplateSpecializationDecl>(Record))
DiagID = diag::note_template_class_instantiation_here;
Diags.Report(Active->PointOfInstantiation, DiagID)
<< Context.getTypeDeclType(Record)
<< Active->InstantiationRange;
} else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
unsigned DiagID;
if (Function->getPrimaryTemplate())
DiagID = diag::note_function_template_spec_here;
else
DiagID = diag::note_template_member_function_here;
Diags.Report(Active->PointOfInstantiation, DiagID)
<< Function
<< Active->InstantiationRange;
} else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
Diags.Report(Active->PointOfInstantiation,
VD->isStaticDataMember()?
diag::note_template_static_data_member_def_here
: diag::note_template_variable_def_here)
<< VD
<< Active->InstantiationRange;
} else if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_enum_def_here)
<< ED
<< Active->InstantiationRange;
} else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_nsdmi_here)
<< FD << Active->InstantiationRange;
} else {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_type_alias_instantiation_here)
<< cast<TypeAliasTemplateDecl>(D)
<< Active->InstantiationRange;
}
break;
}
case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation: {
TemplateDecl *Template = cast<TemplateDecl>(Active->Entity);
SmallVector<char, 128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
Template->printName(OS);
TemplateSpecializationType::PrintTemplateArgumentList(OS,
Active->TemplateArgs,
Active->NumTemplateArgs,
getPrintingPolicy());
Diags.Report(Active->PointOfInstantiation,
diag::note_default_arg_instantiation_here)
<< OS.str()
<< Active->InstantiationRange;
break;
}
case ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution: {
FunctionTemplateDecl *FnTmpl = cast<FunctionTemplateDecl>(Active->Entity);
Diags.Report(Active->PointOfInstantiation,
diag::note_explicit_template_arg_substitution_here)
<< FnTmpl
<< getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
break;
}
case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
if (ClassTemplatePartialSpecializationDecl *PartialSpec =
dyn_cast<ClassTemplatePartialSpecializationDecl>(Active->Entity)) {
Diags.Report(Active->PointOfInstantiation,
diag::note_partial_spec_deduct_instantiation_here)
<< Context.getTypeDeclType(PartialSpec)
<< getTemplateArgumentBindingsText(
PartialSpec->getTemplateParameters(),
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
} else {
FunctionTemplateDecl *FnTmpl
= cast<FunctionTemplateDecl>(Active->Entity);
Diags.Report(Active->PointOfInstantiation,
diag::note_function_template_deduction_instantiation_here)
<< FnTmpl
<< getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
}
break;
case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation: {
ParmVarDecl *Param = cast<ParmVarDecl>(Active->Entity);
FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
SmallVector<char, 128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
FD->printName(OS);
TemplateSpecializationType::PrintTemplateArgumentList(OS,
Active->TemplateArgs,
Active->NumTemplateArgs,
getPrintingPolicy());
Diags.Report(Active->PointOfInstantiation,
diag::note_default_function_arg_instantiation_here)
<< OS.str()
<< Active->InstantiationRange;
break;
}
case ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution: {
NamedDecl *Parm = cast<NamedDecl>(Active->Entity);
std::string Name;
if (!Parm->getName().empty())
Name = std::string(" '") + Parm->getName().str() + "'";
TemplateParameterList *TemplateParams = nullptr;
if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
TemplateParams = Template->getTemplateParameters();
else
TemplateParams =
cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
->getTemplateParameters();
Diags.Report(Active->PointOfInstantiation,
diag::note_prior_template_arg_substitution)
<< isa<TemplateTemplateParmDecl>(Parm)
<< Name
<< getTemplateArgumentBindingsText(TemplateParams,
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
break;
}
case ActiveTemplateInstantiation::DefaultTemplateArgumentChecking: {
TemplateParameterList *TemplateParams = nullptr;
if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
TemplateParams = Template->getTemplateParameters();
else
TemplateParams =
cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
->getTemplateParameters();
Diags.Report(Active->PointOfInstantiation,
diag::note_template_default_arg_checking)
<< getTemplateArgumentBindingsText(TemplateParams,
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
break;
}
case ActiveTemplateInstantiation::ExceptionSpecInstantiation:
Diags.Report(Active->PointOfInstantiation,
diag::note_template_exception_spec_instantiation_here)
<< cast<FunctionDecl>(Active->Entity)
<< Active->InstantiationRange;
break;
}
}
}
Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
if (InNonInstantiationSFINAEContext)
return Optional<TemplateDeductionInfo *>(nullptr);
for (SmallVectorImpl<ActiveTemplateInstantiation>::const_reverse_iterator
Active = ActiveTemplateInstantiations.rbegin(),
ActiveEnd = ActiveTemplateInstantiations.rend();
Active != ActiveEnd;
++Active)
{
switch(Active->Kind) {
case ActiveTemplateInstantiation::TemplateInstantiation:
// An instantiation of an alias template may or may not be a SFINAE
// context, depending on what else is on the stack.
if (isa<TypeAliasTemplateDecl>(Active->Entity))
break;
LLVM_FALLTHROUGH; // HLSL Change
case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation:
case ActiveTemplateInstantiation::ExceptionSpecInstantiation:
// This is a template instantiation, so there is no SFINAE.
return None;
case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation:
case ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution:
case ActiveTemplateInstantiation::DefaultTemplateArgumentChecking:
// A default template argument instantiation and substitution into
// template parameters with arguments for prior parameters may or may
// not be a SFINAE context; look further up the stack.
break;
case ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution:
case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
// We're either substitution explicitly-specified template arguments
// or deduced template arguments, so SFINAE applies.
assert(Active->DeductionInfo && "Missing deduction info pointer");
return Active->DeductionInfo;
}
}
return None;
}
/// \brief Retrieve the depth and index of a parameter pack.
static std::pair<unsigned, unsigned>
getDepthAndIndex(NamedDecl *ND) {
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
return std::make_pair(TTP->getDepth(), TTP->getIndex());
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
return std::make_pair(TTP->getDepth(), TTP->getIndex());
}
//===----------------------------------------------------------------------===/
// Template Instantiation for Types
//===----------------------------------------------------------------------===/
namespace {
class TemplateInstantiator : public TreeTransform<TemplateInstantiator> {
const MultiLevelTemplateArgumentList &TemplateArgs;
SourceLocation Loc;
DeclarationName Entity;
public:
typedef TreeTransform<TemplateInstantiator> inherited;
TemplateInstantiator(Sema &SemaRef,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity)
: inherited(SemaRef), TemplateArgs(TemplateArgs), Loc(Loc),
Entity(Entity) { }
/// \brief Determine whether the given type \p T has already been
/// transformed.
///
/// For the purposes of template instantiation, a type has already been
/// transformed if it is NULL or if it is not dependent.
bool AlreadyTransformed(QualType T);
/// \brief Returns the location of the entity being instantiated, if known.
SourceLocation getBaseLocation() { return Loc; }
/// \brief Returns the name of the entity being instantiated, if any.
DeclarationName getBaseEntity() { return Entity; }
/// \brief Sets the "base" location and entity when that
/// information is known based on another transformation.
void setBase(SourceLocation Loc, DeclarationName Entity) {
this->Loc = Loc;
this->Entity = Entity;
}
bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
bool &ShouldExpand, bool &RetainExpansion,
Optional<unsigned> &NumExpansions) {
return getSema().CheckParameterPacksForExpansion(EllipsisLoc,
PatternRange, Unexpanded,
TemplateArgs,
ShouldExpand,
RetainExpansion,
NumExpansions);
}
void ExpandingFunctionParameterPack(ParmVarDecl *Pack) {
SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(Pack);
}
TemplateArgument ForgetPartiallySubstitutedPack() {
TemplateArgument Result;
if (NamedDecl *PartialPack
= SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
MultiLevelTemplateArgumentList &TemplateArgs
= const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
unsigned Depth, Index;
std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
if (TemplateArgs.hasTemplateArgument(Depth, Index)) {
Result = TemplateArgs(Depth, Index);
TemplateArgs.setArgument(Depth, Index, TemplateArgument());
}
}
return Result;
}
void RememberPartiallySubstitutedPack(TemplateArgument Arg) {
if (Arg.isNull())
return;
if (NamedDecl *PartialPack
= SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
MultiLevelTemplateArgumentList &TemplateArgs
= const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
unsigned Depth, Index;
std::tie(Depth, Index) = getDepthAndIndex(PartialPack);
TemplateArgs.setArgument(Depth, Index, Arg);
}
}
/// \brief Transform the given declaration by instantiating a reference to
/// this declaration.
Decl *TransformDecl(SourceLocation Loc, Decl *D);
void transformAttrs(Decl *Old, Decl *New) {
SemaRef.InstantiateAttrs(TemplateArgs, Old, New);
}
void transformedLocalDecl(Decl *Old, Decl *New) {
// If we've instantiated the call operator of a lambda or the call
// operator template of a generic lambda, update the "instantiation of"
// information.
auto *NewMD = dyn_cast<CXXMethodDecl>(New);
if (NewMD && isLambdaCallOperator(NewMD)) {
auto *OldMD = dyn_cast<CXXMethodDecl>(Old);
if (auto *NewTD = NewMD->getDescribedFunctionTemplate())
NewTD->setInstantiatedFromMemberTemplate(
OldMD->getDescribedFunctionTemplate());
else
NewMD->setInstantiationOfMemberFunction(OldMD,
TSK_ImplicitInstantiation);
}
SemaRef.CurrentInstantiationScope->InstantiatedLocal(Old, New);
}
/// \brief Transform the definition of the given declaration by
/// instantiating it.
Decl *TransformDefinition(SourceLocation Loc, Decl *D);
/// \brief Transform the first qualifier within a scope by instantiating the
/// declaration.
NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc);
/// \brief Rebuild the exception declaration and register the declaration
/// as an instantiated local.
VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *Declarator,
SourceLocation StartLoc,
SourceLocation NameLoc,
IdentifierInfo *Name);
/// \brief Rebuild the Objective-C exception declaration and register the
/// declaration as an instantiated local.
VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *TSInfo, QualType T);
/// \brief Check for tag mismatches when instantiating an
/// elaborated type.
QualType RebuildElaboratedType(SourceLocation KeywordLoc,
ElaboratedTypeKeyword Keyword,
NestedNameSpecifierLoc QualifierLoc,
QualType T);
TemplateName
TransformTemplateName(CXXScopeSpec &SS, TemplateName Name,
SourceLocation NameLoc,
QualType ObjectType = QualType(),
NamedDecl *FirstQualifierInScope = nullptr);
const LoopHintAttr *TransformLoopHintAttr(const LoopHintAttr *LH);
ExprResult TransformPredefinedExpr(PredefinedExpr *E);
ExprResult TransformDeclRefExpr(DeclRefExpr *E);
ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E);
ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E,
NonTypeTemplateParmDecl *D);
ExprResult TransformSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E);
/// \brief Rebuild a DeclRefExpr for a ParmVarDecl reference.
ExprResult RebuildParmVarDeclRefExpr(ParmVarDecl *PD, SourceLocation Loc);
/// \brief Transform a reference to a function parameter pack.
ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E,
ParmVarDecl *PD);
/// \brief Transform a FunctionParmPackExpr which was built when we couldn't
/// expand a function parameter pack reference which refers to an expanded
/// pack.
ExprResult TransformFunctionParmPackExpr(FunctionParmPackExpr *E);
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL) {
// Call the base version; it will forward to our overridden version below.
return inherited::TransformFunctionProtoType(TLB, TL);
}
template<typename Fn>
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals,
Fn TransformExceptionSpec);
ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
/// \brief Transforms a template type parameter type by performing
/// substitution of the corresponding template type argument.
QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
TemplateTypeParmTypeLoc TL);
/// \brief Transforms an already-substituted template type parameter pack
/// into either itself (if we aren't substituting into its pack expansion)
/// or the appropriate substituted argument.
QualType TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
SubstTemplateTypeParmPackTypeLoc TL);
ExprResult TransformCallExpr(CallExpr *CE) {
getSema().CallsUndergoingInstantiation.push_back(CE);
ExprResult Result =
TreeTransform<TemplateInstantiator>::TransformCallExpr(CE);
getSema().CallsUndergoingInstantiation.pop_back();
return Result;
}
ExprResult TransformLambdaExpr(LambdaExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E);
}
TemplateParameterList *TransformTemplateParameterList(
TemplateParameterList *OrigTPL) {
if (!OrigTPL || !OrigTPL->size()) return OrigTPL;
DeclContext *Owner = OrigTPL->getParam(0)->getDeclContext();
TemplateDeclInstantiator DeclInstantiator(getSema(),
/* DeclContext *Owner */ Owner, TemplateArgs);
return DeclInstantiator.SubstTemplateParams(OrigTPL);
}
private:
ExprResult transformNonTypeTemplateParmRef(NonTypeTemplateParmDecl *parm,
SourceLocation loc,
TemplateArgument arg);
};
}
bool TemplateInstantiator::AlreadyTransformed(QualType T) {
if (T.isNull())
return true;
if (T->isInstantiationDependentType() || T->isVariablyModifiedType())
return false;
getSema().MarkDeclarationsReferencedInType(Loc, T);
return true;
}
static TemplateArgument
getPackSubstitutedTemplateArgument(Sema &S, TemplateArgument Arg) {
assert(S.ArgumentPackSubstitutionIndex >= 0);
assert(S.ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
Arg = Arg.pack_begin()[S.ArgumentPackSubstitutionIndex];
if (Arg.isPackExpansion())
Arg = Arg.getPackExpansionPattern();
return Arg;
}
Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) {
if (!D)
return nullptr;
if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) {
if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
// If the corresponding template argument is NULL or non-existent, it's
// because we are performing instantiation from explicitly-specified
// template arguments in a function template, but there were some
// arguments left unspecified.
if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
TTP->getPosition()))
return D;
TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
if (TTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && Template.getAsTemplateDecl() &&
"Wrong kind of template template argument");
return Template.getAsTemplateDecl();
}
// Fall through to find the instantiated declaration for this template
// template parameter.
}
return SemaRef.FindInstantiatedDecl(Loc, cast<NamedDecl>(D), TemplateArgs);
}
Decl *TemplateInstantiator::TransformDefinition(SourceLocation Loc, Decl *D) {
Decl *Inst = getSema().SubstDecl(D, getSema().CurContext, TemplateArgs);
if (!Inst)
return nullptr;
getSema().CurrentInstantiationScope->InstantiatedLocal(D, Inst);
return Inst;
}
NamedDecl *
TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
SourceLocation Loc) {
// If the first part of the nested-name-specifier was a template type
// parameter, instantiate that type parameter down to a tag type.
if (TemplateTypeParmDecl *TTPD = dyn_cast_or_null<TemplateTypeParmDecl>(D)) {
const TemplateTypeParmType *TTP
= cast<TemplateTypeParmType>(getSema().Context.getTypeDeclType(TTPD));
if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
// FIXME: This needs testing w/ member access expressions.
TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getIndex());
if (TTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
if (getSema().ArgumentPackSubstitutionIndex == -1)
return nullptr;
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
QualType T = Arg.getAsType();
if (T.isNull())
return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
if (const TagType *Tag = T->getAs<TagType>())
return Tag->getDecl();
// The resulting type is not a tag; complain.
getSema().Diag(Loc, diag::err_nested_name_spec_non_tag) << T;
return nullptr;
}
}
return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
}
VarDecl *
TemplateInstantiator::RebuildExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *Declarator,
SourceLocation StartLoc,
SourceLocation NameLoc,
IdentifierInfo *Name) {
VarDecl *Var = inherited::RebuildExceptionDecl(ExceptionDecl, Declarator,
StartLoc, NameLoc, Name);
if (Var)
getSema().CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
return Var;
}
VarDecl *TemplateInstantiator::RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
TypeSourceInfo *TSInfo,
QualType T) {
VarDecl *Var = inherited::RebuildObjCExceptionDecl(ExceptionDecl, TSInfo, T);
if (Var)
getSema().CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
return Var;
}
QualType
TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
ElaboratedTypeKeyword Keyword,
NestedNameSpecifierLoc QualifierLoc,
QualType T) {
if (const TagType *TT = T->getAs<TagType>()) {
TagDecl* TD = TT->getDecl();
SourceLocation TagLocation = KeywordLoc;
IdentifierInfo *Id = TD->getIdentifier();
// TODO: should we even warn on struct/class mismatches for this? Seems
// like it's likely to produce a lot of spurious errors.
if (Id && Keyword != ETK_None && Keyword != ETK_Typename) {
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false,
TagLocation, Id)) {
SemaRef.Diag(TagLocation, diag::err_use_with_wrong_tag)
<< Id
<< FixItHint::CreateReplacement(SourceRange(TagLocation),
TD->getKindName());
SemaRef.Diag(TD->getLocation(), diag::note_previous_use);
}
}
}
return TreeTransform<TemplateInstantiator>::RebuildElaboratedType(KeywordLoc,
Keyword,
QualifierLoc,
T);
}
TemplateName TemplateInstantiator::TransformTemplateName(CXXScopeSpec &SS,
TemplateName Name,
SourceLocation NameLoc,
QualType ObjectType,
NamedDecl *FirstQualifierInScope) {
if (TemplateTemplateParmDecl *TTP
= dyn_cast_or_null<TemplateTemplateParmDecl>(Name.getAsTemplateDecl())) {
if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
// If the corresponding template argument is NULL or non-existent, it's
// because we are performing instantiation from explicitly-specified
// template arguments in a function template, but there were some
// arguments left unspecified.
if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
TTP->getPosition()))
return Name;
TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
if (TTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
if (getSema().ArgumentPackSubstitutionIndex == -1) {
// We have the template argument pack to substitute, but we're not
// actually expanding the enclosing pack expansion yet. So, just
// keep the entire argument pack.
return getSema().Context.getSubstTemplateTemplateParmPack(TTP, Arg);
}
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
TemplateName Template = Arg.getAsTemplate();
assert(!Template.isNull() && "Null template template argument");
// We don't ever want to substitute for a qualified template name, since
// the qualifier is handled separately. So, look through the qualified
// template name to its underlying declaration.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
Template = TemplateName(QTN->getTemplateDecl());
Template = getSema().Context.getSubstTemplateTemplateParm(TTP, Template);
return Template;
}
}
if (SubstTemplateTemplateParmPackStorage *SubstPack
= Name.getAsSubstTemplateTemplateParmPack()) {
if (getSema().ArgumentPackSubstitutionIndex == -1)
return Name;
TemplateArgument Arg = SubstPack->getArgumentPack();
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
return Arg.getAsTemplate();
}
return inherited::TransformTemplateName(SS, Name, NameLoc, ObjectType,
FirstQualifierInScope);
}
ExprResult
TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
if (!E->isTypeDependent())
return E;
return getSema().BuildPredefinedExpr(E->getLocation(), E->getIdentType());
}
ExprResult
TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
NonTypeTemplateParmDecl *NTTP) {
// If the corresponding template argument is NULL or non-existent, it's
// because we are performing instantiation from explicitly-specified
// template arguments in a function template, but there were some
// arguments left unspecified.
if (!TemplateArgs.hasTemplateArgument(NTTP->getDepth(),
NTTP->getPosition()))
return E;
TemplateArgument Arg = TemplateArgs(NTTP->getDepth(), NTTP->getPosition());
if (NTTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
if (getSema().ArgumentPackSubstitutionIndex == -1) {
// We have an argument pack, but we can't select a particular argument
// out of it yet. Therefore, we'll build an expression to hold on to that
// argument pack.
QualType TargetType = SemaRef.SubstType(NTTP->getType(), TemplateArgs,
E->getLocation(),
NTTP->getDeclName());
if (TargetType.isNull())
return ExprError();
return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(TargetType,
NTTP,
E->getLocation(),
Arg);
}
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
return transformNonTypeTemplateParmRef(NTTP, E->getLocation(), Arg);
}
const LoopHintAttr *
TemplateInstantiator::TransformLoopHintAttr(const LoopHintAttr *LH) {
Expr *TransformedExpr = getDerived().TransformExpr(LH->getValue()).get();
if (TransformedExpr == LH->getValue())
return LH;
// Generate error if there is a problem with the value.
if (getSema().CheckLoopHintExpr(TransformedExpr, LH->getLocation()))
return LH;
// Create new LoopHintValueAttr with integral expression in place of the
// non-type template parameter.
return LoopHintAttr::CreateImplicit(
getSema().Context, LH->getSemanticSpelling(), LH->getOption(),
LH->getState(), TransformedExpr, LH->getRange());
}
ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
NonTypeTemplateParmDecl *parm,
SourceLocation loc,
TemplateArgument arg) {
ExprResult result;
QualType type;
// The template argument itself might be an expression, in which
// case we just return that expression.
if (arg.getKind() == TemplateArgument::Expression) {
Expr *argExpr = arg.getAsExpr();
result = argExpr;
type = argExpr->getType();
} else if (arg.getKind() == TemplateArgument::Declaration ||
arg.getKind() == TemplateArgument::NullPtr) {
ValueDecl *VD;
if (arg.getKind() == TemplateArgument::Declaration) {
VD = cast<ValueDecl>(arg.getAsDecl());
// Find the instantiation of the template argument. This is
// required for nested templates.
VD = cast_or_null<ValueDecl>(
getSema().FindInstantiatedDecl(loc, VD, TemplateArgs));
if (!VD)
return ExprError();
} else {
// Propagate NULL template argument.
VD = nullptr;
}
// Derive the type we want the substituted decl to have. This had
// better be non-dependent, or these checks will have serious problems.
if (parm->isExpandedParameterPack()) {
type = parm->getExpansionType(SemaRef.ArgumentPackSubstitutionIndex);
} else if (parm->isParameterPack() &&
isa<PackExpansionType>(parm->getType())) {
type = SemaRef.SubstType(
cast<PackExpansionType>(parm->getType())->getPattern(),
TemplateArgs, loc, parm->getDeclName());
} else {
type = SemaRef.SubstType(parm->getType(), TemplateArgs,
loc, parm->getDeclName());
}
assert(!type.isNull() && "type substitution failed for param type");
assert(!type->isDependentType() && "param type still dependent");
result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, type, loc);
if (!result.isInvalid()) type = result.get()->getType();
} else {
result = SemaRef.BuildExpressionFromIntegralTemplateArgument(arg, loc);
// Note that this type can be different from the type of 'result',
// e.g. if it's an enum type.
type = arg.getIntegralType();
}
if (result.isInvalid()) return ExprError();
Expr *resultExpr = result.get();
return new (SemaRef.Context) SubstNonTypeTemplateParmExpr(
type, resultExpr->getValueKind(), loc, parm, resultExpr);
}
ExprResult
TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E) {
if (getSema().ArgumentPackSubstitutionIndex == -1) {
// We aren't expanding the parameter pack, so just return ourselves.
return E;
}
TemplateArgument Arg = E->getArgumentPack();
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
return transformNonTypeTemplateParmRef(E->getParameterPack(),
E->getParameterPackLocation(),
Arg);
}
ExprResult
TemplateInstantiator::RebuildParmVarDeclRefExpr(ParmVarDecl *PD,
SourceLocation Loc) {
DeclarationNameInfo NameInfo(PD->getDeclName(), Loc);
return getSema().BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo, PD);
}
ExprResult
TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) {
if (getSema().ArgumentPackSubstitutionIndex != -1) {
// We can expand this parameter pack now.
ParmVarDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex);
ValueDecl *VD = cast_or_null<ValueDecl>(TransformDecl(E->getExprLoc(), D));
if (!VD)
return ExprError();
return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(VD), E->getExprLoc());
}
QualType T = TransformType(E->getType());
if (T.isNull())
return ExprError();
// Transform each of the parameter expansions into the corresponding
// parameters in the instantiation of the function decl.
SmallVector<Decl *, 8> Parms;
Parms.reserve(E->getNumExpansions());
for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end();
I != End; ++I) {
ParmVarDecl *D =
cast_or_null<ParmVarDecl>(TransformDecl(E->getExprLoc(), *I));
if (!D)
return ExprError();
Parms.push_back(D);
}
return FunctionParmPackExpr::Create(getSema().Context, T,
E->getParameterPack(),
E->getParameterPackLocation(), Parms);
}
ExprResult
TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E,
ParmVarDecl *PD) {
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
llvm::PointerUnion<Decl *, DeclArgumentPack *> *Found
= getSema().CurrentInstantiationScope->findInstantiationOf(PD);
assert(Found && "no instantiation for parameter pack");
Decl *TransformedDecl;
if (DeclArgumentPack *Pack = Found->dyn_cast<DeclArgumentPack *>()) {
// If this is a reference to a function parameter pack which we can
// substitute but can't yet expand, build a FunctionParmPackExpr for it.
if (getSema().ArgumentPackSubstitutionIndex == -1) {
QualType T = TransformType(E->getType());
if (T.isNull())
return ExprError();
return FunctionParmPackExpr::Create(getSema().Context, T, PD,
E->getExprLoc(), *Pack);
}
TransformedDecl = (*Pack)[getSema().ArgumentPackSubstitutionIndex];
} else {
TransformedDecl = Found->get<Decl*>();
}
// We have either an unexpanded pack or a specific expansion.
return RebuildParmVarDeclRefExpr(cast<ParmVarDecl>(TransformedDecl),
E->getExprLoc());
}
ExprResult
TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
NamedDecl *D = E->getDecl();
// Handle references to non-type template parameters and non-type template
// parameter packs.
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
if (NTTP->getDepth() < TemplateArgs.getNumLevels())
return TransformTemplateParmRefExpr(E, NTTP);
// We have a non-type template parameter that isn't fully substituted;
// FindInstantiatedDecl will find it in the local instantiation scope.
}
// Handle references to function parameter packs.
if (ParmVarDecl *PD = dyn_cast<ParmVarDecl>(D))
if (PD->isParameterPack())
return TransformFunctionParmPackRefExpr(E, PD);
return TreeTransform<TemplateInstantiator>::TransformDeclRefExpr(E);
}
ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
CXXDefaultArgExpr *E) {
assert(!cast<FunctionDecl>(E->getParam()->getDeclContext())->
getDescribedFunctionTemplate() &&
"Default arg expressions are never formed in dependent cases.");
return SemaRef.BuildCXXDefaultArgExpr(E->getUsedLocation(),
cast<FunctionDecl>(E->getParam()->getDeclContext()),
E->getParam());
}
template<typename Fn>
QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals,
Fn TransformExceptionSpec) {
// We need a local instantiation scope for this function prototype.
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
return inherited::TransformFunctionProtoType(
TLB, TL, ThisContext, ThisTypeQuals, TransformExceptionSpec);
}
ParmVarDecl *
TemplateInstantiator::TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack) {
return SemaRef.SubstParmVarDecl(OldParm, TemplateArgs, indexAdjustment,
NumExpansions, ExpectParameterPack);
}
QualType
TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
TemplateTypeParmTypeLoc TL) {
const TemplateTypeParmType *T = TL.getTypePtr();
if (T->getDepth() < TemplateArgs.getNumLevels()) {
// Replace the template type parameter with its corresponding
// template argument.
// If the corresponding template argument is NULL or doesn't exist, it's
// because we are performing instantiation from explicitly-specified
// template arguments in a function template class, but there were some
// arguments left unspecified.
if (!TemplateArgs.hasTemplateArgument(T->getDepth(), T->getIndex())) {
TemplateTypeParmTypeLoc NewTL
= TLB.push<TemplateTypeParmTypeLoc>(TL.getType());
NewTL.setNameLoc(TL.getNameLoc());
return TL.getType();
}
TemplateArgument Arg = TemplateArgs(T->getDepth(), T->getIndex());
if (T->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
if (getSema().ArgumentPackSubstitutionIndex == -1) {
// We have the template argument pack, but we're not expanding the
// enclosing pack expansion yet. Just save the template argument
// pack for later substitution.
QualType Result
= getSema().Context.getSubstTemplateTypeParmPackType(T, Arg);
SubstTemplateTypeParmPackTypeLoc NewTL
= TLB.push<SubstTemplateTypeParmPackTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
assert(Arg.getKind() == TemplateArgument::Type &&
"Template argument kind mismatch");
QualType Replacement = Arg.getAsType();
// TODO: only do this uniquing once, at the start of instantiation.
QualType Result
= getSema().Context.getSubstTemplateTypeParmType(T, Replacement);
SubstTemplateTypeParmTypeLoc NewTL
= TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
// The template type parameter comes from an inner template (e.g.,
// the template parameter list of a member template inside the
// template we are instantiating). Create a new template type
// parameter with the template "level" reduced by one.
TemplateTypeParmDecl *NewTTPDecl = nullptr;
if (TemplateTypeParmDecl *OldTTPDecl = T->getDecl())
NewTTPDecl = cast_or_null<TemplateTypeParmDecl>(
TransformDecl(TL.getNameLoc(), OldTTPDecl));
QualType Result
= getSema().Context.getTemplateTypeParmType(T->getDepth()
- TemplateArgs.getNumLevels(),
T->getIndex(),
T->isParameterPack(),
NewTTPDecl);
TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
QualType
TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
TypeLocBuilder &TLB,
SubstTemplateTypeParmPackTypeLoc TL) {
if (getSema().ArgumentPackSubstitutionIndex == -1) {
// We aren't expanding the parameter pack, so just return ourselves.
SubstTemplateTypeParmPackTypeLoc NewTL
= TLB.push<SubstTemplateTypeParmPackTypeLoc>(TL.getType());
NewTL.setNameLoc(TL.getNameLoc());
return TL.getType();
}
TemplateArgument Arg = TL.getTypePtr()->getArgumentPack();
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
QualType Result = Arg.getAsType();
Result = getSema().Context.getSubstTemplateTypeParmType(
TL.getTypePtr()->getReplacedParameter(),
Result);
SubstTemplateTypeParmTypeLoc NewTL
= TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
/// \brief Perform substitution on the type T with a given set of template
/// arguments.
///
/// This routine substitutes the given template arguments into the
/// type T and produces the instantiated type.
///
/// \param T the type into which the template arguments will be
/// substituted. If this type is not dependent, it will be returned
/// immediately.
///
/// \param Args the template arguments that will be
/// substituted for the top-level template parameters within T.
///
/// \param Loc the location in the source code where this substitution
/// is being performed. It will typically be the location of the
/// declarator (if we're instantiating the type of some declaration)
/// or the location of the type in the source code (if, e.g., we're
/// instantiating the type of a cast expression).
///
/// \param Entity the name of the entity associated with a declaration
/// being instantiated (if any). May be empty to indicate that there
/// is no such entity (if, e.g., this is a type that occurs as part of
/// a cast expression) or that the entity has no name (e.g., an
/// unnamed function parameter).
///
/// \returns If the instantiation succeeds, the instantiated
/// type. Otherwise, produces diagnostics and returns a NULL type.
TypeSourceInfo *Sema::SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &Args,
SourceLocation Loc,
DeclarationName Entity) {
assert(!ActiveTemplateInstantiations.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
if (!T->getType()->isInstantiationDependentType() &&
!T->getType()->isVariablyModifiedType())
return T;
TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
return Instantiator.TransformType(T);
}
TypeSourceInfo *Sema::SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &Args,
SourceLocation Loc,
DeclarationName Entity) {
assert(!ActiveTemplateInstantiations.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
if (TL.getType().isNull())
return nullptr;
if (!TL.getType()->isInstantiationDependentType() &&
!TL.getType()->isVariablyModifiedType()) {
// FIXME: Make a copy of the TypeLoc data here, so that we can
// return a new TypeSourceInfo. Inefficient!
TypeLocBuilder TLB;
TLB.pushFullCopy(TL);
return TLB.getTypeSourceInfo(Context, TL.getType());
}
TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
TypeLocBuilder TLB;
TLB.reserve(TL.getFullDataSize());
QualType Result = Instantiator.TransformType(TLB, TL);
if (Result.isNull())
return nullptr;
return TLB.getTypeSourceInfo(Context, Result);
}
/// Deprecated form of the above.
QualType Sema::SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity) {
assert(!ActiveTemplateInstantiations.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
// If T is not a dependent type or a variably-modified type, there
// is nothing to do.
if (!T->isInstantiationDependentType() && !T->isVariablyModifiedType())
return T;
TemplateInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
return Instantiator.TransformType(T);
}
static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
if (T->getType()->isInstantiationDependentType() ||
T->getType()->isVariablyModifiedType())
return true;
TypeLoc TL = T->getTypeLoc().IgnoreParens();
if (!TL.getAs<FunctionProtoTypeLoc>())
return false;
FunctionProtoTypeLoc FP = TL.castAs<FunctionProtoTypeLoc>();
for (unsigned I = 0, E = FP.getNumParams(); I != E; ++I) {
ParmVarDecl *P = FP.getParam(I);
// This must be synthesized from a typedef.
if (!P) continue;
// The parameter's type as written might be dependent even if the
// decayed type was not dependent.
if (TypeSourceInfo *TSInfo = P->getTypeSourceInfo())
if (TSInfo->getType()->isInstantiationDependentType())
return true;
// TODO: currently we always rebuild expressions. When we
// properly get lazier about this, we should use the same
// logic to avoid rebuilding prototypes here.
if (P->hasDefaultArg())
return true;
}
return false;
}
/// A form of SubstType intended specifically for instantiating the
/// type of a FunctionDecl. Its purpose is solely to force the
/// instantiation of default-argument expressions and to avoid
/// instantiating an exception-specification.
TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &Args,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals) {
assert(!ActiveTemplateInstantiations.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
if (!NeedsInstantiationAsFunctionType(T))
return T;
TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
TypeLocBuilder TLB;
TypeLoc TL = T->getTypeLoc();
TLB.reserve(TL.getFullDataSize());
QualType Result;
if (FunctionProtoTypeLoc Proto =
TL.IgnoreParens().getAs<FunctionProtoTypeLoc>()) {
// Instantiate the type, other than its exception specification. The
// exception specification is instantiated in InitFunctionInstantiation
// once we've built the FunctionDecl.
// FIXME: Set the exception specification to EST_Uninstantiated here,
// instead of rebuilding the function type again later.
Result = Instantiator.TransformFunctionProtoType(
TLB, Proto, ThisContext, ThisTypeQuals,
[](FunctionProtoType::ExceptionSpecInfo &ESI,
bool &Changed) { return false; });
} else {
Result = Instantiator.TransformType(TLB, TL);
}
if (Result.isNull())
return nullptr;
return TLB.getTypeSourceInfo(Context, Result);
}
void Sema::SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args) {
FunctionProtoType::ExceptionSpecInfo ESI =
Proto->getExtProtoInfo().ExceptionSpec;
assert(ESI.Type != EST_Uninstantiated);
TemplateInstantiator Instantiator(*this, Args, New->getLocation(),
New->getDeclName());
SmallVector<QualType, 4> ExceptionStorage;
bool Changed = false;
if (Instantiator.TransformExceptionSpec(
New->getTypeSourceInfo()->getTypeLoc().getLocEnd(), ESI,
ExceptionStorage, Changed))
// On error, recover by dropping the exception specification.
ESI.Type = EST_None;
UpdateExceptionSpec(New, ESI);
}
ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
TypeSourceInfo *NewDI = nullptr;
TypeLoc OldTL = OldDI->getTypeLoc();
if (PackExpansionTypeLoc ExpansionTL = OldTL.getAs<PackExpansionTypeLoc>()) {
// We have a function parameter pack. Substitute into the pattern of the
// expansion.
NewDI = SubstType(ExpansionTL.getPatternLoc(), TemplateArgs,
OldParm->getLocation(), OldParm->getDeclName());
if (!NewDI)
return nullptr;
if (NewDI->getType()->containsUnexpandedParameterPack()) {
// We still have unexpanded parameter packs, which means that
// our function parameter is still a function parameter pack.
// Therefore, make its type a pack expansion type.
NewDI = CheckPackExpansion(NewDI, ExpansionTL.getEllipsisLoc(),
NumExpansions);
} else if (ExpectParameterPack) {
// We expected to get a parameter pack but didn't (because the type
// itself is not a pack expansion type), so complain. This can occur when
// the substitution goes through an alias template that "loses" the
// pack expansion.
Diag(OldParm->getLocation(),
diag::err_function_parameter_pack_without_parameter_packs)
<< NewDI->getType();
return nullptr;
}
} else {
NewDI = SubstType(OldDI, TemplateArgs, OldParm->getLocation(),
OldParm->getDeclName());
}
if (!NewDI)
return nullptr;
if (NewDI->getType()->isVoidType()) {
Diag(OldParm->getLocation(), diag::err_param_with_void_type);
return nullptr;
}
ParmVarDecl *NewParm = CheckParameter(Context.getTranslationUnitDecl(),
OldParm->getInnerLocStart(),
OldParm->getLocation(),
OldParm->getIdentifier(),
NewDI->getType(), NewDI,
OldParm->getStorageClass(),
OldParm->getParamModifiers()); // HLSL Change - add param mod
if (!NewParm)
return nullptr;
// Mark the (new) default argument as uninstantiated (if any).
if (OldParm->hasUninstantiatedDefaultArg()) {
Expr *Arg = OldParm->getUninstantiatedDefaultArg();
NewParm->setUninstantiatedDefaultArg(Arg);
} else if (OldParm->hasUnparsedDefaultArg()) {
NewParm->setUnparsedDefaultArg();
UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg()) {
FunctionDecl *OwningFunc = cast<FunctionDecl>(OldParm->getDeclContext());
CXXRecordDecl *ClassD = dyn_cast<CXXRecordDecl>(OwningFunc->getDeclContext());
if (ClassD && ClassD->isLocalClass() && !ClassD->isLambda()) {
// If this is a method of a local class, as per DR1484 its default
// arguments must be instantiated.
Sema::ContextRAII SavedContext(*this, ClassD);
LocalInstantiationScope Local(*this);
ExprResult NewArg = SubstExpr(Arg, TemplateArgs);
if (NewArg.isUsable())
NewParm->setDefaultArg(NewArg.get());
} else {
// FIXME: if we non-lazily instantiated non-dependent default args for
// non-dependent parameter types we could remove a bunch of duplicate
// conversion warnings for such arguments.
NewParm->setUninstantiatedDefaultArg(Arg);
}
}
NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
if (OldParm->isParameterPack() && !NewParm->isParameterPack()) {
// Add the new parameter to the instantiated parameter pack.
CurrentInstantiationScope->InstantiatedLocalPackArg(OldParm, NewParm);
} else {
// Introduce an Old -> New mapping
CurrentInstantiationScope->InstantiatedLocal(OldParm, NewParm);
}
// FIXME: OldParm may come from a FunctionProtoType, in which case CurContext
// can be anything, is this right ?
NewParm->setDeclContext(CurContext);
NewParm->setScopeInfo(OldParm->getFunctionScopeDepth(),
OldParm->getFunctionScopeIndex() + indexAdjustment);
InstantiateAttrs(TemplateArgs, OldParm, NewParm);
return NewParm;
}
/// \brief Substitute the given template arguments into the given set of
/// parameters, producing the set of parameter types that would be generated
/// from such a substitution.
bool Sema::SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams) {
assert(!ActiveTemplateInstantiations.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
DeclarationName());
return Instantiator.TransformFunctionTypeParams(Loc, Params, NumParams,
nullptr, ParamTypes,
OutParams);
}
/// HLSL Change Begin - back ported from llvm-project/4409a83c2935.
/// Substitute the given template arguments into the default argument.
bool Sema::SubstDefaultArgument(
SourceLocation Loc,
ParmVarDecl *Param,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool ForCallExpr) {
FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
Expr *PatternExpr = Param->getUninstantiatedDefaultArg();
EnterExpressionEvaluationContext EvalContext(
*this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
InstantiatingTemplate Inst(*this, Loc, Param, TemplateArgs.getInnermost());
if (Inst.isInvalid())
return true;
ExprResult Result;
{
// C++ [dcl.fct.default]p5:
// The names in the [default argument] expression are bound, and
// the semantic constraints are checked, at the point where the
// default argument expression appears.
ContextRAII SavedContext(*this, FD);
std::unique_ptr<LocalInstantiationScope> LIS;
if (ForCallExpr) {
// When instantiating a default argument due to use in a call expression,
// an instantiation scope that includes the parameters of the callee is
// required to satisfy references from the default argument. For example:
// template<typename T> void f(T a, int = decltype(a)());
// void g() { f(0); }
LIS = std::make_unique<LocalInstantiationScope>(*this);
FunctionDecl *PatternFD = FD->getTemplateInstantiationPattern();
if (addInstantiatedParametersToScope(FD, PatternFD, *LIS, TemplateArgs))
return true;
}
Result = SubstInitializer(PatternExpr, TemplateArgs,
/*DirectInit*/false);
}
if (Result.isInvalid())
return true;
if (ForCallExpr) {
// Check the expression as an initializer for the parameter.
if (RequireCompleteType(Param->getLocation(), Param->getType(),
diag::err_typecheck_decl_incomplete_type))
return true;
InitializedEntity Entity
= InitializedEntity::InitializeParameter(Context, Param);
InitializationKind Kind = InitializationKind::CreateCopy(
Param->getLocation(),
/*FIXME:EqualLoc*/ PatternExpr->getLocStart());
Expr *ResultE = Result.getAs<Expr>();
InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
if (Result.isInvalid())
return true;
Result =
ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
/*DiscardedValue*/ false);
} else {
// FIXME: Obtain the source location for the '=' token.
SourceLocation EqualLoc = PatternExpr->getLocStart();
Result = SetParamDefaultArgument(Param, Result.getAs<Expr>(), EqualLoc);
}
if (Result.isInvalid())
return true;
// Remember the instantiated default argument.
Param->setDefaultArg(Result.getAs<Expr>());
return false;
}
/// HLSL Change End - back ported from llvm-project/4409a83c2935.
/// \brief Perform substitution on the base class specifiers of the
/// given class template specialization.
///
/// Produces a diagnostic and returns true on error, returns false and
/// attaches the instantiated base classes to the class template
/// specialization if successful.
bool
Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs) {
bool Invalid = false;
SmallVector<CXXBaseSpecifier*, 4> InstantiatedBases;
for (const auto &Base : Pattern->bases()) {
if (!Base.getType()->isDependentType()) {
if (const CXXRecordDecl *RD = Base.getType()->getAsCXXRecordDecl()) {
if (RD->isInvalidDecl())
Instantiation->setInvalidDecl();
}
InstantiatedBases.push_back(new (Context) CXXBaseSpecifier(Base));
continue;
}
SourceLocation EllipsisLoc;
TypeSourceInfo *BaseTypeLoc;
if (Base.isPackExpansion()) {
// This is a pack expansion. See whether we should expand it now, or
// wait until later.
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
collectUnexpandedParameterPacks(Base.getTypeSourceInfo()->getTypeLoc(),
Unexpanded);
bool ShouldExpand = false;
bool RetainExpansion = false;
Optional<unsigned> NumExpansions;
if (CheckParameterPacksForExpansion(Base.getEllipsisLoc(),
Base.getSourceRange(),
Unexpanded,
TemplateArgs, ShouldExpand,
RetainExpansion,
NumExpansions)) {
Invalid = true;
continue;
}
// If we should expand this pack expansion now, do so.
if (ShouldExpand) {
for (unsigned I = 0; I != *NumExpansions; ++I) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
TypeSourceInfo *BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
TemplateArgs,
Base.getSourceRange().getBegin(),
DeclarationName());
if (!BaseTypeLoc) {
Invalid = true;
continue;
}
if (CXXBaseSpecifier *InstantiatedBase
= CheckBaseSpecifier(Instantiation,
Base.getSourceRange(),
Base.isVirtual(),
Base.getAccessSpecifierAsWritten(),
BaseTypeLoc,
SourceLocation()))
InstantiatedBases.push_back(InstantiatedBase);
else
Invalid = true;
}
continue;
}
// The resulting base specifier will (still) be a pack expansion.
EllipsisLoc = Base.getEllipsisLoc();
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
TemplateArgs,
Base.getSourceRange().getBegin(),
DeclarationName());
} else {
BaseTypeLoc = SubstType(Base.getTypeSourceInfo(),
TemplateArgs,
Base.getSourceRange().getBegin(),
DeclarationName());
}
if (!BaseTypeLoc) {
Invalid = true;
continue;
}
if (CXXBaseSpecifier *InstantiatedBase
= CheckBaseSpecifier(Instantiation,
Base.getSourceRange(),
Base.isVirtual(),
Base.getAccessSpecifierAsWritten(),
BaseTypeLoc,
EllipsisLoc))
InstantiatedBases.push_back(InstantiatedBase);
else
Invalid = true;
}
if (!Invalid &&
AttachBaseSpecifiers(Instantiation, InstantiatedBases.data(),
InstantiatedBases.size()))
Invalid = true;
return Invalid;
}
// Defined via #include from SemaTemplateInstantiateDecl.cpp
namespace clang {
namespace sema {
Attr *instantiateTemplateAttribute(const Attr *At, ASTContext &C, Sema &S,
const MultiLevelTemplateArgumentList &TemplateArgs);
}
}
/// Determine whether we would be unable to instantiate this template (because
/// it either has no definition, or is in the process of being instantiated).
static bool DiagnoseUninstantiableTemplate(Sema &S,
SourceLocation PointOfInstantiation,
TagDecl *Instantiation,
bool InstantiatedFromMember,
TagDecl *Pattern,
TagDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true) {
if (PatternDef && !PatternDef->isBeingDefined())
return false;
if (!Complain || (PatternDef && PatternDef->isInvalidDecl())) {
// Say nothing
} else if (PatternDef) {
assert(PatternDef->isBeingDefined());
S.Diag(PointOfInstantiation,
diag::err_template_instantiate_within_definition)
<< (TSK != TSK_ImplicitInstantiation)
<< S.Context.getTypeDeclType(Instantiation);
// Not much point in noting the template declaration here, since
// we're lexically inside it.
Instantiation->setInvalidDecl();
} else if (InstantiatedFromMember) {
S.Diag(PointOfInstantiation,
diag::err_implicit_instantiate_member_undefined)
<< S.Context.getTypeDeclType(Instantiation);
S.Diag(Pattern->getLocation(), diag::note_member_declared_at);
} else {
S.Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
<< (TSK != TSK_ImplicitInstantiation)
<< S.Context.getTypeDeclType(Instantiation);
if (Pattern->getLocation().isValid()) { // HLSL Change - ellide location notes for built-ins
S.Diag(Pattern->getLocation(), diag::note_template_decl_here);
}
}
// In general, Instantiation isn't marked invalid to get more than one
// error for multiple undefined instantiations. But the code that does
// explicit declaration -> explicit definition conversion can't handle
// invalid declarations, so mark as invalid in that case.
if (TSK == TSK_ExplicitInstantiationDeclaration)
Instantiation->setInvalidDecl();
return true;
}
/// \brief Instantiate the definition of a class from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
/// source code.
///
/// \param Instantiation is the declaration whose definition is being
/// instantiated. This will be either a class template specialization
/// or a member class of a class template specialization.
///
/// \param Pattern is the pattern from which the instantiation
/// occurs. This will be either the declaration of a class template or
/// the declaration of a member class of a class template.
///
/// \param TemplateArgs The template arguments to be substituted into
/// the pattern.
///
/// \param TSK the kind of implicit or explicit instantiation to perform.
///
/// \param Complain whether to complain if the class cannot be instantiated due
/// to the lack of a definition.
///
/// \returns true if an error occurred, false otherwise.
bool
Sema::InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain) {
CXXRecordDecl *PatternDef
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
Instantiation->getInstantiatedFromMemberClass(),
Pattern, PatternDef, TSK, Complain))
return true;
// HLSL Change Begin - Support hierarchial time tracing.
llvm::TimeTraceScope TimeScope("InstantiateClass", [&]() {
return Instantiation->getQualifiedNameAsString();
});
// HLSL Change End - Support hierarchial time tracing.
Pattern = PatternDef;
// \brief Record the point of instantiation.
if (MemberSpecializationInfo *MSInfo
= Instantiation->getMemberSpecializationInfo()) {
MSInfo->setTemplateSpecializationKind(TSK);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
} else if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Instantiation)) {
Spec->setTemplateSpecializationKind(TSK);
Spec->setPointOfInstantiation(PointOfInstantiation);
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
ContextRAII SavedContext(*this, Instantiation);
EnterExpressionEvaluationContext EvalContext(*this,
Sema::PotentiallyEvaluated);
// If this is an instantiation of a local class, merge this local
// instantiation scope with the enclosing scope. Otherwise, every
// instantiation of a class has its own local instantiation scope.
bool MergeWithParentScope = !Instantiation->isDefinedOutsideFunctionOrMethod();
LocalInstantiationScope Scope(*this, MergeWithParentScope);
// Pull attributes from the pattern onto the instantiation.
InstantiateAttrs(TemplateArgs, Pattern, Instantiation);
// Start the definition of this instantiation.
Instantiation->startDefinition();
// The instantiation is visible here, even if it was first declared in an
// unimported module.
Instantiation->setHidden(false);
// FIXME: This loses the as-written tag kind for an explicit instantiation.
Instantiation->setTagKind(Pattern->getTagKind());
// Do substitution on the base class specifiers.
if (SubstBaseSpecifiers(Instantiation, Pattern, TemplateArgs))
Instantiation->setInvalidDecl();
TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
SmallVector<Decl*, 4> Fields;
// Delay instantiation of late parsed attributes.
LateInstantiatedAttrVec LateAttrs;
Instantiator.enableLateAttributeInstantiation(&LateAttrs);
for (auto *Member : Pattern->decls()) {
// Don't instantiate members not belonging in this semantic context.
// e.g. for:
// @code
// template <int i> class A {
// class B *g;
// };
// @endcode
// 'class B' has the template as lexical context but semantically it is
// introduced in namespace scope.
if (Member->getDeclContext() != Pattern)
continue;
if (Member->isInvalidDecl()) {
Instantiation->setInvalidDecl();
continue;
}
Decl *NewMember = Instantiator.Visit(Member);
if (NewMember) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(NewMember)) {
Fields.push_back(Field);
} else if (EnumDecl *Enum = dyn_cast<EnumDecl>(NewMember)) {
// C++11 [temp.inst]p1: The implicit instantiation of a class template
// specialization causes the implicit instantiation of the definitions
// of unscoped member enumerations.
// Record a point of instantiation for this implicit instantiation.
if (TSK == TSK_ImplicitInstantiation && !Enum->isScoped() &&
Enum->isCompleteDefinition()) {
MemberSpecializationInfo *MSInfo =Enum->getMemberSpecializationInfo();
assert(MSInfo && "no spec info for member enum specialization");
MSInfo->setTemplateSpecializationKind(TSK_ImplicitInstantiation);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
}
} else if (StaticAssertDecl *SA = dyn_cast<StaticAssertDecl>(NewMember)) {
if (SA->isFailed()) {
// A static_assert failed. Bail out; instantiating this
// class is probably not meaningful.
Instantiation->setInvalidDecl();
break;
}
}
if (NewMember->isInvalidDecl())
Instantiation->setInvalidDecl();
} else {
// FIXME: Eventually, a NULL return will mean that one of the
// instantiations was a semantic disaster, and we'll want to mark the
// declaration invalid.
// For now, we expect to skip some members that we can't yet handle.
}
}
// Finish checking fields.
ActOnFields(nullptr, Instantiation->getLocation(), Instantiation, Fields,
SourceLocation(), SourceLocation(), nullptr);
CheckCompletedCXXClass(Instantiation);
// Default arguments are parsed, if not instantiated. We can go instantiate
// default arg exprs for default constructors if necessary now.
ActOnFinishCXXMemberDefaultArgs(Instantiation);
// Instantiate late parsed attributes, and attach them to their decls.
// See Sema::InstantiateAttrs
for (LateInstantiatedAttrVec::iterator I = LateAttrs.begin(),
E = LateAttrs.end(); I != E; ++I) {
assert(CurrentInstantiationScope == Instantiator.getStartingScope());
CurrentInstantiationScope = I->Scope;
// Allow 'this' within late-parsed attributes.
NamedDecl *ND = dyn_cast<NamedDecl>(I->NewDecl);
CXXRecordDecl *ThisContext =
dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0,
ND && ND->isCXXInstanceMember());
Attr *NewAttr =
instantiateTemplateAttribute(I->TmplAttr, Context, *this, TemplateArgs);
I->NewDecl->addAttr(NewAttr);
LocalInstantiationScope::deleteScopes(I->Scope,
Instantiator.getStartingScope());
}
Instantiator.disableLateAttributeInstantiation();
LateAttrs.clear();
ActOnFinishDelayedMemberInitializers(Instantiation);
// FIXME: We should do something similar for explicit instantiations so they
// end up in the right module.
if (TSK == TSK_ImplicitInstantiation) {
Instantiation->setLocation(Pattern->getLocation());
Instantiation->setLocStart(Pattern->getInnerLocStart());
Instantiation->setRBraceLoc(Pattern->getRBraceLoc());
}
if (!Instantiation->isInvalidDecl()) {
// Perform any dependent diagnostics from the pattern.
PerformDependentDiagnostics(Pattern, TemplateArgs);
// Instantiate any out-of-line class template partial
// specializations now.
for (TemplateDeclInstantiator::delayed_partial_spec_iterator
P = Instantiator.delayed_partial_spec_begin(),
PEnd = Instantiator.delayed_partial_spec_end();
P != PEnd; ++P) {
if (!Instantiator.InstantiateClassTemplatePartialSpecialization(
P->first, P->second)) {
Instantiation->setInvalidDecl();
break;
}
}
// Instantiate any out-of-line variable template partial
// specializations now.
for (TemplateDeclInstantiator::delayed_var_partial_spec_iterator
P = Instantiator.delayed_var_partial_spec_begin(),
PEnd = Instantiator.delayed_var_partial_spec_end();
P != PEnd; ++P) {
if (!Instantiator.InstantiateVarTemplatePartialSpecialization(
P->first, P->second)) {
Instantiation->setInvalidDecl();
break;
}
}
}
// Exit the scope of this instantiation.
SavedContext.pop();
if (!Instantiation->isInvalidDecl()) {
Consumer.HandleTagDeclDefinition(Instantiation);
// Always emit the vtable for an explicit instantiation definition
// of a polymorphic class template specialization.
if (TSK == TSK_ExplicitInstantiationDefinition)
MarkVTableUsed(PointOfInstantiation, Instantiation, true);
}
return Instantiation->isInvalidDecl();
}
/// \brief Instantiate the definition of an enum from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
/// source code.
/// \param Instantiation is the declaration whose definition is being
/// instantiated. This will be a member enumeration of a class
/// temploid specialization, or a local enumeration within a
/// function temploid specialization.
/// \param Pattern The templated declaration from which the instantiation
/// occurs.
/// \param TemplateArgs The template arguments to be substituted into
/// the pattern.
/// \param TSK The kind of implicit or explicit instantiation to perform.
///
/// \return \c true if an error occurred, \c false otherwise.
bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK) {
EnumDecl *PatternDef = Pattern->getDefinition();
if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
Instantiation->getInstantiatedFromMemberEnum(),
Pattern, PatternDef, TSK,/*Complain*/true))
return true;
Pattern = PatternDef;
// Record the point of instantiation.
if (MemberSpecializationInfo *MSInfo
= Instantiation->getMemberSpecializationInfo()) {
MSInfo->setTemplateSpecializationKind(TSK);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
// The instantiation is visible here, even if it was first declared in an
// unimported module.
Instantiation->setHidden(false);
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
ContextRAII SavedContext(*this, Instantiation);
EnterExpressionEvaluationContext EvalContext(*this,
Sema::PotentiallyEvaluated);
LocalInstantiationScope Scope(*this, /*MergeWithParentScope*/true);
// Pull attributes from the pattern onto the instantiation.
InstantiateAttrs(TemplateArgs, Pattern, Instantiation);
TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
Instantiator.InstantiateEnumDefinition(Instantiation, Pattern);
// Exit the scope of this instantiation.
SavedContext.pop();
return Instantiation->isInvalidDecl();
}
/// \brief Instantiate the definition of a field from the given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
/// source code.
/// \param Instantiation is the declaration whose definition is being
/// instantiated. This will be a class of a class temploid
/// specialization, or a local enumeration within a function temploid
/// specialization.
/// \param Pattern The templated declaration from which the instantiation
/// occurs.
/// \param TemplateArgs The template arguments to be substituted into
/// the pattern.
///
/// \return \c true if an error occurred, \c false otherwise.
bool Sema::InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs) {
// If there is no initializer, we don't need to do anything.
if (!Pattern->hasInClassInitializer())
return false;
assert(Instantiation->getInClassInitStyle() ==
Pattern->getInClassInitStyle() &&
"pattern and instantiation disagree about init style");
// Error out if we haven't parsed the initializer of the pattern yet because
// we are waiting for the closing brace of the outer class.
Expr *OldInit = Pattern->getInClassInitializer();
if (!OldInit) {
RecordDecl *PatternRD = Pattern->getParent();
RecordDecl *OutermostClass = PatternRD->getOuterLexicalRecordContext();
if (OutermostClass == PatternRD) {
Diag(Pattern->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed)
<< PatternRD << Pattern;
} else {
Diag(Pattern->getLocEnd(),
diag::err_in_class_initializer_not_yet_parsed_outer_class)
<< PatternRD << OutermostClass << Pattern;
}
Instantiation->setInvalidDecl();
return true;
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
// Enter the scope of this instantiation. We don't use PushDeclContext because
// we don't have a scope.
ContextRAII SavedContext(*this, Instantiation->getParent());
EnterExpressionEvaluationContext EvalContext(*this,
Sema::PotentiallyEvaluated);
LocalInstantiationScope Scope(*this, true);
// Instantiate the initializer.
ActOnStartCXXInClassMemberInitializer();
CXXThisScopeRAII ThisScope(*this, Instantiation->getParent(), /*TypeQuals=*/0);
ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
/*CXXDirectInit=*/false);
Expr *Init = NewInit.get();
assert((!Init || !isa<ParenListExpr>(Init)) && "call-style init in class");
ActOnFinishCXXInClassMemberInitializer(
Instantiation, Init ? Init->getLocStart() : SourceLocation(), Init);
// Exit the scope of this instantiation.
SavedContext.pop();
// Return true if the in-class initializer is still missing.
return !Instantiation->getInClassInitializer();
}
namespace {
/// \brief A partial specialization whose template arguments have matched
/// a given template-id.
struct PartialSpecMatchResult {
ClassTemplatePartialSpecializationDecl *Partial;
TemplateArgumentList *Args;
};
}
bool Sema::InstantiateClassTemplateSpecialization(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK, bool Complain) {
// Perform the actual instantiation on the canonical declaration.
ClassTemplateSpec = cast<ClassTemplateSpecializationDecl>(
ClassTemplateSpec->getCanonicalDecl());
if (ClassTemplateSpec->isInvalidDecl())
return true;
ClassTemplateDecl *Template = ClassTemplateSpec->getSpecializedTemplate();
CXXRecordDecl *Pattern = nullptr;
// C++ [temp.class.spec.match]p1:
// When a class template is used in a context that requires an
// instantiation of the class, it is necessary to determine
// whether the instantiation is to be generated using the primary
// template or one of the partial specializations. This is done by
// matching the template arguments of the class template
// specialization with the template argument lists of the partial
// specializations.
typedef PartialSpecMatchResult MatchResult;
SmallVector<MatchResult, 4> Matched;
SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
Template->getPartialSpecializations(PartialSpecs);
TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation);
for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
TemplateDeductionInfo Info(FailedCandidates.getLocation());
if (TemplateDeductionResult Result
= DeduceTemplateArguments(Partial,
ClassTemplateSpec->getTemplateArgs(),
Info)) {
// Store the failed-deduction information for use in diagnostics, later.
// TODO: Actually use the failed-deduction info?
FailedCandidates.addCandidate()
.set(Partial, MakeDeductionFailureInfo(Context, Result, Info));
(void)Result;
} else {
Matched.push_back(PartialSpecMatchResult());
Matched.back().Partial = Partial;
Matched.back().Args = Info.take();
}
}
// If we're dealing with a member template where the template parameters
// have been instantiated, this provides the original template parameters
// from which the member template's parameters were instantiated.
if (Matched.size() >= 1) {
SmallVectorImpl<MatchResult>::iterator Best = Matched.begin();
if (Matched.size() == 1) {
// -- If exactly one matching specialization is found, the
// instantiation is generated from that specialization.
// We don't need to do anything for this.
} else {
// -- If more than one matching specialization is found, the
// partial order rules (14.5.4.2) are used to determine
// whether one of the specializations is more specialized
// than the others. If none of the specializations is more
// specialized than all of the other matching
// specializations, then the use of the class template is
// ambiguous and the program is ill-formed.
for (SmallVectorImpl<MatchResult>::iterator P = Best + 1,
PEnd = Matched.end();
P != PEnd; ++P) {
if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
PointOfInstantiation)
== P->Partial)
Best = P;
}
// Determine if the best partial specialization is more specialized than
// the others.
bool Ambiguous = false;
for (SmallVectorImpl<MatchResult>::iterator P = Matched.begin(),
PEnd = Matched.end();
P != PEnd; ++P) {
if (P != Best &&
getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
PointOfInstantiation)
!= Best->Partial) {
Ambiguous = true;
break;
}
}
if (Ambiguous) {
// Partial ordering did not produce a clear winner. Complain.
ClassTemplateSpec->setInvalidDecl();
Diag(PointOfInstantiation, diag::err_partial_spec_ordering_ambiguous)
<< ClassTemplateSpec;
// Print the matching partial specializations.
for (SmallVectorImpl<MatchResult>::iterator P = Matched.begin(),
PEnd = Matched.end();
P != PEnd; ++P)
Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
<< getTemplateArgumentBindingsText(
P->Partial->getTemplateParameters(),
*P->Args);
return true;
}
}
// Instantiate using the best class template partial specialization.
ClassTemplatePartialSpecializationDecl *OrigPartialSpec = Best->Partial;
while (OrigPartialSpec->getInstantiatedFromMember()) {
// If we've found an explicit specialization of this class template,
// stop here and use that as the pattern.
if (OrigPartialSpec->isMemberSpecialization())
break;
OrigPartialSpec = OrigPartialSpec->getInstantiatedFromMember();
}
Pattern = OrigPartialSpec;
ClassTemplateSpec->setInstantiationOf(Best->Partial, Best->Args);
} else {
// -- If no matches are found, the instantiation is generated
// from the primary template.
ClassTemplateDecl *OrigTemplate = Template;
while (OrigTemplate->getInstantiatedFromMemberTemplate()) {
// If we've found an explicit specialization of this class template,
// stop here and use that as the pattern.
if (OrigTemplate->isMemberSpecialization())
break;
OrigTemplate = OrigTemplate->getInstantiatedFromMemberTemplate();
}
Pattern = OrigTemplate->getTemplatedDecl();
}
bool Result = InstantiateClass(PointOfInstantiation, ClassTemplateSpec,
Pattern,
getTemplateInstantiationArgs(ClassTemplateSpec),
TSK,
Complain);
return Result;
}
/// \brief Instantiates the definitions of all of the member
/// of the given class, which is an instantiation of a class template
/// or a member class of a template.
void
Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK) {
// FIXME: We need to notify the ASTMutationListener that we did all of these
// things, in case we have an explicit instantiation definition in a PCM, a
// module, or preamble, and the declaration is in an imported AST.
assert(
(TSK == TSK_ExplicitInstantiationDefinition ||
TSK == TSK_ExplicitInstantiationDeclaration ||
(TSK == TSK_ImplicitInstantiation && Instantiation->isLocalClass())) &&
"Unexpected template specialization kind!");
for (auto *D : Instantiation->decls()) {
bool SuppressNew = false;
if (auto *Function = dyn_cast<FunctionDecl>(D)) {
if (FunctionDecl *Pattern
= Function->getInstantiatedFromMemberFunction()) {
MemberSpecializationInfo *MSInfo
= Function->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization)
continue;
if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
Function,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
SuppressNew) ||
SuppressNew)
continue;
// C++11 [temp.explicit]p8:
// An explicit instantiation definition that names a class template
// specialization explicitly instantiates the class template
// specialization and is only an explicit instantiation definition
// of members whose definition is visible at the point of
// instantiation.
if (TSK == TSK_ExplicitInstantiationDefinition && !Pattern->isDefined())
continue;
Function->setTemplateSpecializationKind(TSK, PointOfInstantiation);
if (Function->isDefined()) {
// Let the ASTConsumer know that this function has been explicitly
// instantiated now, and its linkage might have changed.
Consumer.HandleTopLevelDecl(DeclGroupRef(Function));
} else if (TSK == TSK_ExplicitInstantiationDefinition) {
InstantiateFunctionDefinition(PointOfInstantiation, Function);
} else if (TSK == TSK_ImplicitInstantiation) {
PendingLocalImplicitInstantiations.push_back(
std::make_pair(Function, PointOfInstantiation));
}
}
} else if (auto *Var = dyn_cast<VarDecl>(D)) {
if (isa<VarTemplateSpecializationDecl>(Var))
continue;
if (Var->isStaticDataMember()) {
MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization)
continue;
if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
Var,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
SuppressNew) ||
SuppressNew)
continue;
if (TSK == TSK_ExplicitInstantiationDefinition) {
// C++0x [temp.explicit]p8:
// An explicit instantiation definition that names a class template
// specialization explicitly instantiates the class template
// specialization and is only an explicit instantiation definition
// of members whose definition is visible at the point of
// instantiation.
if (!Var->getInstantiatedFromStaticDataMember()
->getOutOfLineDefinition())
continue;
Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
InstantiateStaticDataMemberDefinition(PointOfInstantiation, Var);
} else {
Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
}
}
} else if (auto *Record = dyn_cast<CXXRecordDecl>(D)) {
// Always skip the injected-class-name, along with any
// redeclarations of nested classes, since both would cause us
// to try to instantiate the members of a class twice.
// Skip closure types; they'll get instantiated when we instantiate
// the corresponding lambda-expression.
if (Record->isInjectedClassName() || Record->getPreviousDecl() ||
Record->isLambda())
continue;
MemberSpecializationInfo *MSInfo = Record->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization)
continue;
if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
Record,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
SuppressNew) ||
SuppressNew)
continue;
CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
assert(Pattern && "Missing instantiated-from-template information");
if (!Record->getDefinition()) {
if (!Pattern->getDefinition()) {
// C++0x [temp.explicit]p8:
// An explicit instantiation definition that names a class template
// specialization explicitly instantiates the class template
// specialization and is only an explicit instantiation definition
// of members whose definition is visible at the point of
// instantiation.
if (TSK == TSK_ExplicitInstantiationDeclaration) {
MSInfo->setTemplateSpecializationKind(TSK);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
}
continue;
}
InstantiateClass(PointOfInstantiation, Record, Pattern,
TemplateArgs,
TSK);
} else {
if (TSK == TSK_ExplicitInstantiationDefinition &&
Record->getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration) {
Record->setTemplateSpecializationKind(TSK);
MarkVTableUsed(PointOfInstantiation, Record, true);
}
}
Pattern = cast_or_null<CXXRecordDecl>(Record->getDefinition());
if (Pattern)
InstantiateClassMembers(PointOfInstantiation, Pattern, TemplateArgs,
TSK);
} else if (auto *Enum = dyn_cast<EnumDecl>(D)) {
MemberSpecializationInfo *MSInfo = Enum->getMemberSpecializationInfo();
assert(MSInfo && "No member specialization information?");
if (MSInfo->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization)
continue;
if (CheckSpecializationInstantiationRedecl(
PointOfInstantiation, TSK, Enum,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(), SuppressNew) ||
SuppressNew)
continue;
if (Enum->getDefinition())
continue;
EnumDecl *Pattern = Enum->getInstantiatedFromMemberEnum();
assert(Pattern && "Missing instantiated-from-template information");
if (TSK == TSK_ExplicitInstantiationDefinition) {
if (!Pattern->getDefinition())
continue;
InstantiateEnum(PointOfInstantiation, Enum, Pattern, TemplateArgs, TSK);
} else {
MSInfo->setTemplateSpecializationKind(TSK);
MSInfo->setPointOfInstantiation(PointOfInstantiation);
}
} else if (auto *Field = dyn_cast<FieldDecl>(D)) {
// No need to instantiate in-class initializers during explicit
// instantiation.
if (Field->hasInClassInitializer() && TSK == TSK_ImplicitInstantiation) {
CXXRecordDecl *ClassPattern =
Instantiation->getTemplateInstantiationPattern();
DeclContext::lookup_result Lookup =
ClassPattern->lookup(Field->getDeclName());
assert(Lookup.size() == 1);
FieldDecl *Pattern = cast<FieldDecl>(Lookup[0]);
InstantiateInClassInitializer(PointOfInstantiation, Field, Pattern,
TemplateArgs);
}
}
}
}
/// \brief Instantiate the definitions of all of the members of the
/// given class template specialization, which was named as part of an
/// explicit instantiation.
void
Sema::InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK) {
// C++0x [temp.explicit]p7:
// An explicit instantiation that names a class template
// specialization is an explicit instantion of the same kind
// (declaration or definition) of each of its members (not
// including members inherited from base classes) that has not
// been previously explicitly specialized in the translation unit
// containing the explicit instantiation, except as described
// below.
InstantiateClassMembers(PointOfInstantiation, ClassTemplateSpec,
getTemplateInstantiationArgs(ClassTemplateSpec),
TSK);
}
StmtResult
Sema::SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs) {
if (!S)
return S;
TemplateInstantiator Instantiator(*this, TemplateArgs,
SourceLocation(),
DeclarationName());
return Instantiator.TransformStmt(S);
}
ExprResult
Sema::SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs) {
if (!E)
return E;
TemplateInstantiator Instantiator(*this, TemplateArgs,
SourceLocation(),
DeclarationName());
return Instantiator.TransformExpr(E);
}
ExprResult Sema::SubstInitializer(Expr *Init,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit) {
TemplateInstantiator Instantiator(*this, TemplateArgs,
SourceLocation(),
DeclarationName());
return Instantiator.TransformInitializer(Init, CXXDirectInit);
}
bool Sema::SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs) {
if (NumExprs == 0)
return false;
TemplateInstantiator Instantiator(*this, TemplateArgs,
SourceLocation(),
DeclarationName());
return Instantiator.TransformExprs(Exprs, NumExprs, IsCall, Outputs);
}
NestedNameSpecifierLoc
Sema::SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs) {
if (!NNS)
return NestedNameSpecifierLoc();
TemplateInstantiator Instantiator(*this, TemplateArgs, NNS.getBeginLoc(),
DeclarationName());
return Instantiator.TransformNestedNameSpecifierLoc(NNS);
}
/// \brief Do template substitution on declaration name info.
DeclarationNameInfo
Sema::SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs) {
TemplateInstantiator Instantiator(*this, TemplateArgs, NameInfo.getLoc(),
NameInfo.getName());
return Instantiator.TransformDeclarationNameInfo(NameInfo);
}
TemplateName
Sema::SubstTemplateName(NestedNameSpecifierLoc QualifierLoc,
TemplateName Name, SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs) {
TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
DeclarationName());
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
return Instantiator.TransformTemplateName(SS, Name, Loc);
}
bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs) {
TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
DeclarationName());
return Instantiator.TransformTemplateArguments(Args, NumArgs, Result);
}
static const Decl *getCanonicalParmVarDecl(const Decl *D) {
// When storing ParmVarDecls in the local instantiation scope, we always
// want to use the ParmVarDecl from the canonical function declaration,
// since the map is then valid for any redeclaration or definition of that
// function.
if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(D)) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
unsigned i = PV->getFunctionScopeIndex();
// This parameter might be from a freestanding function type within the
// function and isn't necessarily referring to one of FD's parameters.
if (FD->getParamDecl(i) == PV)
return FD->getCanonicalDecl()->getParamDecl(i);
}
}
return D;
}
llvm::PointerUnion<Decl *, LocalInstantiationScope::DeclArgumentPack *> *
LocalInstantiationScope::findInstantiationOf(const Decl *D) {
D = getCanonicalParmVarDecl(D);
for (LocalInstantiationScope *Current = this; Current;
Current = Current->Outer) {
// Check if we found something within this scope.
const Decl *CheckD = D;
do {
LocalDeclsMap::iterator Found = Current->LocalDecls.find(CheckD);
if (Found != Current->LocalDecls.end())
return &Found->second;
// If this is a tag declaration, it's possible that we need to look for
// a previous declaration.
if (const TagDecl *Tag = dyn_cast<TagDecl>(CheckD))
CheckD = Tag->getPreviousDecl();
else
CheckD = nullptr;
} while (CheckD);
// If we aren't combined with our outer scope, we're done.
if (!Current->CombineWithOuterScope)
break;
}
// If we're performing a partial substitution during template argument
// deduction, we may not have values for template parameters yet.
if (isa<NonTypeTemplateParmDecl>(D) || isa<TemplateTypeParmDecl>(D) ||
isa<TemplateTemplateParmDecl>(D))
return nullptr;
// Local types referenced prior to definition may require instantiation.
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
if (RD->isLocalClass())
return nullptr;
// Enumeration types referenced prior to definition may appear as a result of
// error recovery.
if (isa<EnumDecl>(D))
return nullptr;
// If we didn't find the decl, then we either have a sema bug, or we have a
// forward reference to a label declaration. Return null to indicate that
// we have an uninstantiated label.
assert(isa<LabelDecl>(D) && "declaration not instantiated in this scope");
return nullptr;
}
void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
D = getCanonicalParmVarDecl(D);
llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
if (Stored.isNull()) {
#ifndef NDEBUG
// It should not be present in any surrounding scope either.
LocalInstantiationScope *Current = this;
while (Current->CombineWithOuterScope && Current->Outer) {
Current = Current->Outer;
assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
"Instantiated local in inner and outer scopes");
}
#endif
Stored = Inst;
} else if (DeclArgumentPack *Pack = Stored.dyn_cast<DeclArgumentPack *>()) {
Pack->push_back(Inst);
} else {
assert(Stored.get<Decl *>() == Inst && "Already instantiated this local");
}
}
void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
Decl *Inst) {
D = getCanonicalParmVarDecl(D);
DeclArgumentPack *Pack = LocalDecls[D].get<DeclArgumentPack *>();
Pack->push_back(Inst);
}
void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
#ifndef NDEBUG
// This should be the first time we've been told about this decl.
for (LocalInstantiationScope *Current = this;
Current && Current->CombineWithOuterScope; Current = Current->Outer)
assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
"Creating local pack after instantiation of local");
#endif
D = getCanonicalParmVarDecl(D);
llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
DeclArgumentPack *Pack = new DeclArgumentPack;
Stored = Pack;
ArgumentPacks.push_back(Pack);
}
void LocalInstantiationScope::SetPartiallySubstitutedPack(NamedDecl *Pack,
const TemplateArgument *ExplicitArgs,
unsigned NumExplicitArgs) {
assert((!PartiallySubstitutedPack || PartiallySubstitutedPack == Pack) &&
"Already have a partially-substituted pack");
assert((!PartiallySubstitutedPack
|| NumArgsInPartiallySubstitutedPack == NumExplicitArgs) &&
"Wrong number of arguments in partially-substituted pack");
PartiallySubstitutedPack = Pack;
ArgsInPartiallySubstitutedPack = ExplicitArgs;
NumArgsInPartiallySubstitutedPack = NumExplicitArgs;
}
NamedDecl *LocalInstantiationScope::getPartiallySubstitutedPack(
const TemplateArgument **ExplicitArgs,
unsigned *NumExplicitArgs) const {
if (ExplicitArgs)
*ExplicitArgs = nullptr;
if (NumExplicitArgs)
*NumExplicitArgs = 0;
for (const LocalInstantiationScope *Current = this; Current;
Current = Current->Outer) {
if (Current->PartiallySubstitutedPack) {
if (ExplicitArgs)
*ExplicitArgs = Current->ArgsInPartiallySubstitutedPack;
if (NumExplicitArgs)
*NumExplicitArgs = Current->NumArgsInPartiallySubstitutedPack;
return Current->PartiallySubstitutedPack;
}
if (!Current->CombineWithOuterScope)
break;
}
return nullptr;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/DelayedDiagnostic.cpp | //===--- DelayedDiagnostic.cpp - Delayed declarator diagnostics -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the DelayedDiagnostic class implementation, which
// is used to record diagnostics that are being conditionally produced
// during declarator parsing.
//
// This file also defines AccessedEntity.
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/DelayedDiagnostic.h"
#include <string.h>
using namespace clang;
using namespace sema;
// //
///////////////////////////////////////////////////////////////////////////////
DelayedDiagnostic
DelayedDiagnostic::makeAvailability(Sema::AvailabilityDiagnostic AD,
SourceLocation Loc,
const NamedDecl *D,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
StringRef Msg,
bool ObjCPropertyAccess) {
DelayedDiagnostic DD;
switch (AD) {
case Sema::AD_Deprecation:
DD.Kind = Deprecation;
break;
case Sema::AD_Unavailable:
DD.Kind = Unavailable;
break;
case Sema::AD_Partial:
llvm_unreachable("AD_Partial diags should not be delayed");
}
DD.Triggered = false;
DD.Loc = Loc;
DD.DeprecationData.Decl = D;
DD.DeprecationData.UnknownObjCClass = UnknownObjCClass;
DD.DeprecationData.ObjCProperty = ObjCProperty;
char *MessageData = nullptr;
if (Msg.size()) {
MessageData = new char [Msg.size()];
memcpy(MessageData, Msg.data(), Msg.size());
}
DD.DeprecationData.Message = MessageData;
DD.DeprecationData.MessageLen = Msg.size();
DD.DeprecationData.ObjCPropertyAccess = ObjCPropertyAccess;
return DD;
}
void DelayedDiagnostic::Destroy() {
switch (static_cast<DDKind>(Kind)) {
case Access:
getAccessData().~AccessedEntity();
break;
case Deprecation:
case Unavailable:
delete [] DeprecationData.Message;
break;
case ForbiddenType:
break;
}
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/TypeLocBuilder.h | //===--- TypeLocBuilder.h - Type Source Info collector ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This files defines TypeLocBuilder, a class for building TypeLocs
// bottom-up.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_SEMA_TYPELOCBUILDER_H
#define LLVM_CLANG_LIB_SEMA_TYPELOCBUILDER_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/TypeLoc.h"
namespace clang {
class TypeLocBuilder {
enum { InlineCapacity = 8 * sizeof(SourceLocation) };
/// The underlying location-data buffer. Data grows from the end
/// of the buffer backwards.
char *Buffer;
/// The capacity of the current buffer.
size_t Capacity;
/// The index of the first occupied byte in the buffer.
size_t Index;
#ifndef NDEBUG
/// The last type pushed on this builder.
QualType LastTy;
#endif
/// The inline buffer.
enum { BufferMaxAlignment = llvm::AlignOf<void*>::Alignment };
llvm::AlignedCharArray<BufferMaxAlignment, InlineCapacity> InlineBuffer;
unsigned NumBytesAtAlign4, NumBytesAtAlign8;
public:
TypeLocBuilder()
: Buffer(InlineBuffer.buffer), Capacity(InlineCapacity),
Index(InlineCapacity), NumBytesAtAlign4(0), NumBytesAtAlign8(0)
{
}
~TypeLocBuilder() {
if (Buffer != InlineBuffer.buffer)
delete[] Buffer;
}
/// Ensures that this buffer has at least as much capacity as described.
void reserve(size_t Requested) {
if (Requested > Capacity)
// For now, match the request exactly.
grow(Requested);
}
/// Pushes a copy of the given TypeLoc onto this builder. The builder
/// must be empty for this to work.
void pushFullCopy(TypeLoc L);
/// Pushes space for a typespec TypeLoc. Invalidates any TypeLocs
/// previously retrieved from this builder.
TypeSpecTypeLoc pushTypeSpec(QualType T) {
size_t LocalSize = TypeSpecTypeLoc::LocalDataSize;
unsigned LocalAlign = TypeSpecTypeLoc::LocalDataAlignment;
return pushImpl(T, LocalSize, LocalAlign).castAs<TypeSpecTypeLoc>();
}
/// Resets this builder to the newly-initialized state.
void clear() {
#ifndef NDEBUG
LastTy = QualType();
#endif
Index = Capacity;
NumBytesAtAlign4 = NumBytesAtAlign8 = 0;
}
/// \brief Tell the TypeLocBuilder that the type it is storing has been
/// modified in some safe way that doesn't affect type-location information.
void TypeWasModifiedSafely(QualType T) {
#ifndef NDEBUG
LastTy = T;
#endif
}
/// Pushes space for a new TypeLoc of the given type. Invalidates
/// any TypeLocs previously retrieved from this builder.
template <class TyLocType> TyLocType push(QualType T) {
TyLocType Loc = TypeLoc(T, nullptr).castAs<TyLocType>();
size_t LocalSize = Loc.getLocalDataSize();
unsigned LocalAlign = Loc.getLocalDataAlignment();
return pushImpl(T, LocalSize, LocalAlign).castAs<TyLocType>();
}
/// Creates a TypeSourceInfo for the given type.
TypeSourceInfo *getTypeSourceInfo(ASTContext& Context, QualType T) {
#ifndef NDEBUG
assert(T == LastTy && "type doesn't match last type pushed!");
#endif
size_t FullDataSize = Capacity - Index;
TypeSourceInfo *DI = Context.CreateTypeSourceInfo(T, FullDataSize);
memcpy(DI->getTypeLoc().getOpaqueData(), &Buffer[Index], FullDataSize);
return DI;
}
/// \brief Copies the type-location information to the given AST context and
/// returns a \c TypeLoc referring into the AST context.
TypeLoc getTypeLocInContext(ASTContext &Context, QualType T) {
#ifndef NDEBUG
assert(T == LastTy && "type doesn't match last type pushed!");
#endif
size_t FullDataSize = Capacity - Index;
void *Mem = Context.Allocate(FullDataSize);
memcpy(Mem, &Buffer[Index], FullDataSize);
return TypeLoc(T, Mem);
}
private:
TypeLoc pushImpl(QualType T, size_t LocalSize, unsigned LocalAlignment);
/// Grow to the given capacity.
void grow(size_t NewCapacity);
/// \brief Retrieve a temporary TypeLoc that refers into this \c TypeLocBuilder
/// object.
///
/// The resulting \c TypeLoc should only be used so long as the
/// \c TypeLocBuilder is active and has not had more type information
/// pushed into it.
TypeLoc getTemporaryTypeLoc(QualType T) {
#ifndef NDEBUG
assert(LastTy == T && "type doesn't match last type pushed!");
#endif
return TypeLoc(T, &Buffer[Index]);
}
};
}
#endif
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaConsumer.cpp | //===-- SemaConsumer.cpp - Abstract interface for AST semantics -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaConsumer.h"
using namespace clang;
void SemaConsumer::anchor() { }
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaStmt.cpp | //===--- SemaStmt.cpp - Semantic Analysis for Statements ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements semantic analysis for statements.
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "clang/Sema/SemaHLSL.h" // HLSL Change
using namespace clang;
using namespace sema;
StmtResult Sema::ActOnExprStmt(ExprResult FE) {
if (FE.isInvalid())
return StmtError();
FE = ActOnFinishFullExpr(FE.get(), FE.get()->getExprLoc(),
/*DiscardedValue*/ true);
if (FE.isInvalid())
return StmtError();
// C99 6.8.3p2: The expression in an expression statement is evaluated as a
// void expression for its side effects. Conversion to void allows any
// operand, even incomplete types.
// Same thing in for stmt first clause (when expr) and third clause.
return StmtResult(FE.getAs<Stmt>());
}
StmtResult Sema::ActOnExprStmtError() {
DiscardCleanupsInEvaluationContext();
return StmtError();
}
StmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro) {
return new (Context) NullStmt(SemiLoc, HasLeadingEmptyMacro);
}
StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg, SourceLocation StartLoc,
SourceLocation EndLoc) {
DeclGroupRef DG = dg.get();
// If we have an invalid decl, just return an error.
if (DG.isNull()) return StmtError();
return new (Context) DeclStmt(DG, StartLoc, EndLoc);
}
void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
DeclGroupRef DG = dg.get();
// If we don't have a declaration, or we have an invalid declaration,
// just return.
if (DG.isNull() || !DG.isSingleDecl())
return;
Decl *decl = DG.getSingleDecl();
if (!decl || decl->isInvalidDecl())
return;
// Only variable declarations are permitted.
VarDecl *var = dyn_cast<VarDecl>(decl);
if (!var) {
Diag(decl->getLocation(), diag::err_non_variable_decl_in_for);
decl->setInvalidDecl();
return;
}
// foreach variables are never actually initialized in the way that
// the parser came up with.
var->setInit(nullptr);
// In ARC, we don't need to retain the iteration variable of a fast
// enumeration loop. Rather than actually trying to catch that
// during declaration processing, we remove the consequences here.
if (getLangOpts().ObjCAutoRefCount) {
QualType type = var->getType();
// Only do this if we inferred the lifetime. Inferred lifetime
// will show up as a local qualifier because explicit lifetime
// should have shown up as an AttributedType instead.
if (type.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong) {
// Add 'const' and mark the variable as pseudo-strong.
var->setType(type.withConst());
var->setARCPseudoStrong(true);
}
}
}
/// \brief Diagnose unused comparisons, both builtin and overloaded operators.
/// For '==' and '!=', suggest fixits for '=' or '|='.
///
/// Adding a cast to void (or other expression wrappers) will prevent the
/// warning from firing.
static bool DiagnoseUnusedComparison(Sema &S, const Expr *E) {
SourceLocation Loc;
bool IsNotEqual, CanAssign, IsRelational;
if (const BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) {
if (!Op->isComparisonOp())
return false;
IsRelational = Op->isRelationalOp();
Loc = Op->getOperatorLoc();
IsNotEqual = Op->getOpcode() == BO_NE;
CanAssign = Op->getLHS()->IgnoreParenImpCasts()->isLValue();
} else if (const CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) {
switch (Op->getOperator()) {
default:
return false;
case OO_EqualEqual:
case OO_ExclaimEqual:
IsRelational = false;
break;
case OO_Less:
case OO_Greater:
case OO_GreaterEqual:
case OO_LessEqual:
IsRelational = true;
break;
}
Loc = Op->getOperatorLoc();
IsNotEqual = Op->getOperator() == OO_ExclaimEqual;
CanAssign = Op->getArg(0)->IgnoreParenImpCasts()->isLValue();
} else {
// Not a typo-prone comparison.
return false;
}
// Suppress warnings when the operator, suspicious as it may be, comes from
// a macro expansion.
if (S.SourceMgr.isMacroBodyExpansion(Loc))
return false;
S.Diag(Loc, diag::warn_unused_comparison)
<< (unsigned)IsRelational << (unsigned)IsNotEqual << E->getSourceRange();
// If the LHS is a plausible entity to assign to, provide a fixit hint to
// correct common typos.
if (!IsRelational && CanAssign) {
if (IsNotEqual)
S.Diag(Loc, diag::note_inequality_comparison_to_or_assign)
<< FixItHint::CreateReplacement(Loc, "|=");
else
S.Diag(Loc, diag::note_equality_comparison_to_assign)
<< FixItHint::CreateReplacement(Loc, "=");
}
return true;
}
void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (const LabelStmt *Label = dyn_cast_or_null<LabelStmt>(S))
return DiagnoseUnusedExprResult(Label->getSubStmt());
const Expr *E = dyn_cast_or_null<Expr>(S);
if (!E)
return;
// If we are in an unevaluated expression context, then there can be no unused
// results because the results aren't expected to be used in the first place.
if (isUnevaluatedContext())
return;
SourceLocation ExprLoc = E->IgnoreParens()->getExprLoc();
// In most cases, we don't want to warn if the expression is written in a
// macro body, or if the macro comes from a system header. If the offending
// expression is a call to a function with the warn_unused_result attribute,
// we warn no matter the location. Because of the order in which the various
// checks need to happen, we factor out the macro-related test here.
bool ShouldSuppress =
SourceMgr.isMacroBodyExpansion(ExprLoc) ||
SourceMgr.isInSystemMacro(ExprLoc);
const Expr *WarnExpr;
SourceLocation Loc;
SourceRange R1, R2;
if (!E->isUnusedResultAWarning(WarnExpr, Loc, R1, R2, Context))
return;
// If this is a GNU statement expression expanded from a macro, it is probably
// unused because it is a function-like macro that can be used as either an
// expression or statement. Don't warn, because it is almost certainly a
// false positive.
if (isa<StmtExpr>(E) && Loc.isMacroID())
return;
// Okay, we have an unused result. Depending on what the base expression is,
// we might want to make a more specific diagnostic. Check for one of these
// cases now.
unsigned DiagID = diag::warn_unused_expr;
if (const ExprWithCleanups *Temps = dyn_cast<ExprWithCleanups>(E))
E = Temps->getSubExpr();
if (const CXXBindTemporaryExpr *TempExpr = dyn_cast<CXXBindTemporaryExpr>(E))
E = TempExpr->getSubExpr();
if (DiagnoseUnusedComparison(*this, E))
return;
E = WarnExpr;
if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
if (E->getType()->isVoidType())
return;
// If the callee has attribute pure, const, or warn_unused_result, warn with
// a more specific message to make it clear what is happening. If the call
// is written in a macro body, only warn if it has the warn_unused_result
// attribute.
if (const Decl *FD = CE->getCalleeDecl()) {
const FunctionDecl *Func = dyn_cast<FunctionDecl>(FD);
if (Func ? Func->hasUnusedResultAttr()
: FD->hasAttr<WarnUnusedResultAttr>()) {
Diag(Loc, diag::warn_unused_result) << R1 << R2;
return;
}
if (ShouldSuppress)
return;
if (FD->hasAttr<PureAttr>()) {
Diag(Loc, diag::warn_hlsl_unused_call) << R1 << R2 << "pure"; // HLSL Change: rather than warn_unused_call
return;
}
if (FD->hasAttr<ConstAttr>()) {
Diag(Loc, diag::warn_unused_call) << R1 << R2 << "const";
return;
}
}
} else if (ShouldSuppress)
return;
if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
if (getLangOpts().ObjCAutoRefCount && ME->isDelegateInitCall()) {
Diag(Loc, diag::err_arc_unused_init_message) << R1;
return;
}
const ObjCMethodDecl *MD = ME->getMethodDecl();
if (MD) {
if (MD->hasAttr<WarnUnusedResultAttr>()) {
Diag(Loc, diag::warn_unused_result) << R1 << R2;
return;
}
}
} else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
const Expr *Source = POE->getSyntacticForm();
if (isa<ObjCSubscriptRefExpr>(Source))
DiagID = diag::warn_unused_container_subscript_expr;
else
DiagID = diag::warn_unused_property_expr;
} else if (const CXXFunctionalCastExpr *FC
= dyn_cast<CXXFunctionalCastExpr>(E)) {
if (isa<CXXConstructExpr>(FC->getSubExpr()) ||
isa<CXXTemporaryObjectExpr>(FC->getSubExpr()))
return;
}
// Diagnose "(void*) blah" as a typo for "(void) blah".
else if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E)) {
TypeSourceInfo *TI = CE->getTypeInfoAsWritten();
QualType T = TI->getType();
// We really do want to use the non-canonical type here.
if (T == Context.VoidPtrTy) {
PointerTypeLoc TL = TI->getTypeLoc().castAs<PointerTypeLoc>();
Diag(Loc, diag::warn_unused_voidptr)
<< FixItHint::CreateRemoval(TL.getStarLoc());
return;
}
}
if (E->isGLValue() && E->getType().isVolatileQualified()) {
Diag(Loc, diag::warn_unused_volatile) << R1 << R2;
return;
}
DiagRuntimeBehavior(Loc, nullptr, PDiag(DiagID) << R1 << R2);
}
void Sema::ActOnStartOfCompoundStmt() {
PushCompoundScope();
}
void Sema::ActOnFinishOfCompoundStmt() {
PopCompoundScope();
}
sema::CompoundScopeInfo &Sema::getCurCompoundScope() const {
return getCurFunction()->CompoundScopes.back();
}
StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
// If we're in C89 mode, check that we don't have any decls after stmts. If
// so, emit an extension diagnostic.
if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
// Note that __extension__ can be around a decl.
unsigned i = 0;
// Skip over all declarations.
for (; i != NumElts && isa<DeclStmt>(Elts[i]); ++i)
/*empty*/;
// We found the end of the list or a statement. Scan for another declstmt.
for (; i != NumElts && !isa<DeclStmt>(Elts[i]); ++i)
/*empty*/;
if (i != NumElts) {
Decl *D = *cast<DeclStmt>(Elts[i])->decl_begin();
Diag(D->getLocation(), diag::ext_mixed_decls_code);
}
}
// Warn about unused expressions in statements.
for (unsigned i = 0; i != NumElts; ++i) {
// Ignore statements that are last in a statement expression.
if (isStmtExpr && i == NumElts - 1)
continue;
DiagnoseUnusedExprResult(Elts[i]);
}
// Check for suspicious empty body (null statement) in `for' and `while'
// statements. Don't do anything for template instantiations, this just adds
// noise.
if (NumElts != 0 && !CurrentInstantiationScope &&
getCurCompoundScope().HasEmptyLoopBodies) {
for (unsigned i = 0; i != NumElts - 1; ++i)
DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]);
}
return new (Context) CompoundStmt(Context, Elts, L, R);
}
StmtResult
Sema::ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc) {
assert(LHSVal && "missing expression in case statement");
if (getCurFunction()->SwitchStack.empty()) {
Diag(CaseLoc, diag::err_case_not_in_switch);
return StmtError();
}
ExprResult LHS =
CorrectDelayedTyposInExpr(LHSVal, [this](class Expr *E) {
if (!getLangOpts().CPlusPlus11)
return VerifyIntegerConstantExpression(E);
if (Expr *CondExpr =
getCurFunction()->SwitchStack.back()->getCond()) {
QualType CondType = CondExpr->getType();
llvm::APSInt TempVal;
return CheckConvertedConstantExpression(E, CondType, TempVal,
CCEK_CaseValue);
}
return ExprError();
});
if (LHS.isInvalid())
return StmtError();
LHSVal = LHS.get();
if (!getLangOpts().CPlusPlus11 &&
getLangOpts().HLSLVersion < hlsl::LangStd::v2017) {
// C99 6.8.4.2p3: The expression shall be an integer constant.
// However, GCC allows any evaluatable integer expression.
if (!LHSVal->isTypeDependent() && !LHSVal->isValueDependent()) {
LHSVal = VerifyIntegerConstantExpression(LHSVal).get();
if (!LHSVal)
return StmtError();
}
// GCC extension: The expression shall be an integer constant.
if (RHSVal && !RHSVal->isTypeDependent() && !RHSVal->isValueDependent()) {
RHSVal = VerifyIntegerConstantExpression(RHSVal).get();
// Recover from an error by just forgetting about it.
}
}
LHS = ActOnFinishFullExpr(LHSVal, LHSVal->getExprLoc(), false,
getLangOpts().CPlusPlus11);
if (LHS.isInvalid())
return StmtError();
auto RHS = RHSVal ? ActOnFinishFullExpr(RHSVal, RHSVal->getExprLoc(), false,
getLangOpts().CPlusPlus11)
: ExprResult();
if (RHS.isInvalid())
return StmtError();
CaseStmt *CS = new (Context)
CaseStmt(LHS.get(), RHS.get(), CaseLoc, DotDotDotLoc, ColonLoc);
getCurFunction()->SwitchStack.back()->addSwitchCase(CS);
return CS;
}
/// ActOnCaseStmtBody - This installs a statement as the body of a case.
void Sema::ActOnCaseStmtBody(Stmt *caseStmt, Stmt *SubStmt) {
DiagnoseUnusedExprResult(SubStmt);
CaseStmt *CS = static_cast<CaseStmt*>(caseStmt);
CS->setSubStmt(SubStmt);
}
StmtResult
Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope) {
DiagnoseUnusedExprResult(SubStmt);
if (getCurFunction()->SwitchStack.empty()) {
Diag(DefaultLoc, diag::err_default_not_in_switch);
return SubStmt;
}
DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt);
getCurFunction()->SwitchStack.back()->addSwitchCase(DS);
return DS;
}
StmtResult
Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt) {
// If the label was multiply defined, reject it now.
if (TheDecl->getStmt()) {
Diag(IdentLoc, diag::err_redefinition_of_label) << TheDecl->getDeclName();
Diag(TheDecl->getLocation(), diag::note_previous_definition);
return SubStmt;
}
// Otherwise, things are good. Fill in the declaration and return it.
LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
TheDecl->setStmt(LS);
if (!TheDecl->isGnuLocal()) {
TheDecl->setLocStart(IdentLoc);
if (!TheDecl->isMSAsmLabel()) {
// Don't update the location of MS ASM labels. These will result in
// a diagnostic, and changing the location here will mess that up.
TheDecl->setLocation(IdentLoc);
}
}
return LS;
}
StmtResult Sema::ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt) {
// HLSL Change - Validate inconsistent attributes being set on statemetns.
if (getLangOpts().HLSL) {
bool loopFound = false;
bool unrollFound = false;
bool fastOptFound = false;
for (const Attr* a : Attrs) {
unrollFound = unrollFound || (a->getKind() == attr::HLSLUnroll);
fastOptFound = fastOptFound || (a->getKind() == attr::HLSLFastOpt);
loopFound = loopFound || (a->getKind() == attr::HLSLLoop);
}
if (loopFound && unrollFound) {
Diag(AttrLoc, diag::err_attributes_are_not_compatible) << "loop" << "unroll";
}
if (fastOptFound && unrollFound) {
Diag(AttrLoc, diag::err_attributes_are_not_compatible) << "fastopt" << "unroll";
}
}
// HLSL Change Ends
// Fill in the declaration and return it.
AttributedStmt *LS = AttributedStmt::Create(Context, AttrLoc, Attrs, SubStmt);
return LS;
}
StmtResult
Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
Stmt *thenStmt, SourceLocation ElseLoc,
Stmt *elseStmt) {
// If the condition was invalid, discard the if statement. We could recover
// better by replacing it with a valid expr, but don't do that yet.
if (!CondVal.get() && !CondVar) {
getCurFunction()->setHasDroppedStmt();
return StmtError();
}
ExprResult CondResult(CondVal.release());
VarDecl *ConditionVar = nullptr;
if (CondVar) {
ConditionVar = cast<VarDecl>(CondVar);
CondResult = CheckConditionVariable(ConditionVar, IfLoc, true);
CondResult = ActOnFinishFullExpr(CondResult.get(), IfLoc);
if (CondResult.isInvalid())
return StmtError();
}
Expr *ConditionExpr = CondResult.getAs<Expr>();
if (!ConditionExpr)
return StmtError();
// HLSL Change Begin.
hlsl::DiagnoseControlFlowConditionForHLSL(this, ConditionExpr, "if");
// HLSL Change End.
DiagnoseUnusedExprResult(thenStmt);
if (!elseStmt) {
DiagnoseEmptyStmtBody(ConditionExpr->getLocEnd(), thenStmt,
diag::warn_empty_if_body);
}
DiagnoseUnusedExprResult(elseStmt);
return new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr,
thenStmt, ElseLoc, elseStmt);
}
namespace {
struct CaseCompareFunctor {
bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
const llvm::APSInt &RHS) {
return LHS.first < RHS;
}
bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
return LHS.first < RHS.first;
}
bool operator()(const llvm::APSInt &LHS,
const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
return LHS < RHS.first;
}
};
}
/// CmpCaseVals - Comparison predicate for sorting case values.
///
static bool CmpCaseVals(const std::pair<llvm::APSInt, CaseStmt*>& lhs,
const std::pair<llvm::APSInt, CaseStmt*>& rhs) {
if (lhs.first < rhs.first)
return true;
if (lhs.first == rhs.first &&
lhs.second->getCaseLoc().getRawEncoding()
< rhs.second->getCaseLoc().getRawEncoding())
return true;
return false;
}
/// CmpEnumVals - Comparison predicate for sorting enumeration values.
///
static bool CmpEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs)
{
return lhs.first < rhs.first;
}
/// EqEnumVals - Comparison preficate for uniqing enumeration values.
///
static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs)
{
return lhs.first == rhs.first;
}
/// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of
/// potentially integral-promoted expression @p expr.
static QualType GetTypeBeforeIntegralPromotion(Expr *&expr) {
if (ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(expr))
expr = cleanups->getSubExpr();
while (ImplicitCastExpr *impcast = dyn_cast<ImplicitCastExpr>(expr)) {
if (impcast->getCastKind() != CK_IntegralCast) break;
expr = impcast->getSubExpr();
}
return expr->getType();
}
StmtResult
Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
Decl *CondVar) {
ExprResult CondResult;
VarDecl *ConditionVar = nullptr;
if (CondVar) {
ConditionVar = cast<VarDecl>(CondVar);
CondResult = CheckConditionVariable(ConditionVar, SourceLocation(), false);
if (CondResult.isInvalid())
return StmtError();
Cond = CondResult.get();
}
if (!Cond)
return StmtError();
// HLSL Change Begins.
if (Cond->getType()->isFloatingType()) {
// Cast float to int for switch.
QualType intTy = Context.getIntTypeForBitwidth(32, /*isSigned*/true);
ExprResult Res = ImplicitCastExpr::Create(Context, intTy, CK_FloatingToIntegral, Cond,
nullptr, Cond->getValueKind());
Cond = Res.get();
}
// HLSL Change Ends.
class SwitchConvertDiagnoser : public ICEConvertDiagnoser {
Expr *Cond;
public:
SwitchConvertDiagnoser(Expr *Cond)
: ICEConvertDiagnoser(/*AllowScopedEnumerations*/true, false, true),
Cond(Cond) {}
SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
QualType T) override {
return S.Diag(Loc, diag::err_typecheck_statement_requires_integer) << T;
}
SemaDiagnosticBuilder diagnoseIncomplete(
Sema &S, SourceLocation Loc, QualType T) override {
return S.Diag(Loc, diag::err_switch_incomplete_class_type)
<< T << Cond->getSourceRange();
}
SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
return S.Diag(Loc, diag::err_switch_explicit_conversion) << T << ConvTy;
}
SemaDiagnosticBuilder noteExplicitConv(
Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
<< ConvTy->isEnumeralType() << ConvTy;
}
SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
QualType T) override {
return S.Diag(Loc, diag::err_switch_multiple_conversions) << T;
}
SemaDiagnosticBuilder noteAmbiguous(
Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override {
return S.Diag(Conv->getLocation(), diag::note_switch_conversion)
<< ConvTy->isEnumeralType() << ConvTy;
}
SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override {
llvm_unreachable("conversion functions are permitted");
}
} SwitchDiagnoser(Cond);
CondResult =
PerformContextualImplicitConversion(SwitchLoc, Cond, SwitchDiagnoser);
if (CondResult.isInvalid()) return StmtError();
Cond = CondResult.get();
// C99 6.8.4.2p5 - Integer promotions are performed on the controlling expr.
CondResult = UsualUnaryConversions(Cond);
if (CondResult.isInvalid()) return StmtError();
Cond = CondResult.get();
CondResult = ActOnFinishFullExpr(Cond, SwitchLoc);
if (CondResult.isInvalid())
return StmtError();
Cond = CondResult.get();
getCurFunction()->setHasBranchIntoScope();
SwitchStmt *SS = new (Context) SwitchStmt(Context, ConditionVar, Cond);
getCurFunction()->SwitchStack.push_back(SS);
return SS;
}
static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) {
Val = Val.extOrTrunc(BitWidth);
Val.setIsSigned(IsSigned);
}
/// Check the specified case value is in range for the given unpromoted switch
/// type.
static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val,
unsigned UnpromotedWidth, bool UnpromotedSign) {
// If the case value was signed and negative and the switch expression is
// unsigned, don't bother to warn: this is implementation-defined behavior.
// FIXME: Introduce a second, default-ignored warning for this case?
if (UnpromotedWidth < Val.getBitWidth()) {
llvm::APSInt ConvVal(Val);
AdjustAPSInt(ConvVal, UnpromotedWidth, UnpromotedSign);
AdjustAPSInt(ConvVal, Val.getBitWidth(), Val.isSigned());
// FIXME: Use different diagnostics for overflow in conversion to promoted
// type versus "switch expression cannot have this value". Use proper
// IntRange checking rather than just looking at the unpromoted type here.
if (ConvVal != Val)
S.Diag(Loc, diag::warn_case_value_overflow) << Val.toString(10)
<< ConvVal.toString(10);
}
}
typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64> EnumValsTy;
/// Returns true if we should emit a diagnostic about this case expression not
/// being a part of the enum used in the switch controlling expression.
static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S,
const EnumDecl *ED,
const Expr *CaseExpr,
EnumValsTy::iterator &EI,
EnumValsTy::iterator &EIEnd,
const llvm::APSInt &Val) {
bool FlagType = ED->hasAttr<FlagEnumAttr>();
if (const DeclRefExpr *DRE =
dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
QualType VarType = VD->getType();
QualType EnumType = S.Context.getTypeDeclType(ED);
if (VD->hasGlobalStorage() && VarType.isConstQualified() &&
S.Context.hasSameUnqualifiedType(EnumType, VarType))
return false;
}
}
if (FlagType) {
return !S.IsValueInFlagEnum(ED, Val, false);
} else {
while (EI != EIEnd && EI->first < Val)
EI++;
if (EI != EIEnd && EI->first == Val)
return false;
}
return true;
}
StmtResult
Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
Stmt *BodyStmt) {
SwitchStmt *SS = cast<SwitchStmt>(Switch);
assert(SS == getCurFunction()->SwitchStack.back() &&
"switch stack missing push/pop!");
getCurFunction()->SwitchStack.pop_back();
if (!BodyStmt) return StmtError();
SS->setBody(BodyStmt, SwitchLoc);
Expr *CondExpr = SS->getCond();
if (!CondExpr) return StmtError();
QualType CondType = CondExpr->getType();
Expr *CondExprBeforePromotion = CondExpr;
QualType CondTypeBeforePromotion =
GetTypeBeforeIntegralPromotion(CondExprBeforePromotion);
// C++ 6.4.2.p2:
// Integral promotions are performed (on the switch condition).
//
// A case value unrepresentable by the original switch condition
// type (before the promotion) doesn't make sense, even when it can
// be represented by the promoted type. Therefore we need to find
// the pre-promotion type of the switch condition.
if (!CondExpr->isTypeDependent()) {
// We have already converted the expression to an integral or enumeration
// type, when we started the switch statement. If we don't have an
// appropriate type now, just return an error.
if (!CondType->isIntegralOrEnumerationType())
return StmtError();
if (CondExpr->isKnownToHaveBooleanValue()) {
// switch(bool_expr) {...} is often a programmer error, e.g.
// switch(n && mask) { ... } // Doh - should be "n & mask".
// One can always use an if statement instead of switch(bool_expr).
Diag(SwitchLoc, diag::warn_bool_switch_condition)
<< CondExpr->getSourceRange();
}
}
// Get the bitwidth of the switched-on value after promotions. We must
// convert the integer case values to this width before comparison.
bool HasDependentValue
= CondExpr->isTypeDependent() || CondExpr->isValueDependent();
unsigned CondWidth = HasDependentValue ? 0 : Context.getIntWidth(CondType);
bool CondIsSigned = CondType->isSignedIntegerOrEnumerationType();
// Get the width and signedness that the condition might actually have, for
// warning purposes.
// FIXME: Grab an IntRange for the condition rather than using the unpromoted
// type.
unsigned CondWidthBeforePromotion
= HasDependentValue ? 0 : Context.getIntWidth(CondTypeBeforePromotion);
bool CondIsSignedBeforePromotion
= CondTypeBeforePromotion->isSignedIntegerOrEnumerationType();
// Accumulate all of the case values in a vector so that we can sort them
// and detect duplicates. This vector contains the APInt for the case after
// it has been converted to the condition type.
typedef SmallVector<std::pair<llvm::APSInt, CaseStmt*>, 64> CaseValsTy;
CaseValsTy CaseVals;
// Keep track of any GNU case ranges we see. The APSInt is the low value.
typedef std::vector<std::pair<llvm::APSInt, CaseStmt*> > CaseRangesTy;
CaseRangesTy CaseRanges;
DefaultStmt *TheDefaultStmt = nullptr;
bool CaseListIsErroneous = false;
for (SwitchCase *SC = SS->getSwitchCaseList(); SC && !HasDependentValue;
SC = SC->getNextSwitchCase()) {
if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC)) {
if (TheDefaultStmt) {
Diag(DS->getDefaultLoc(), diag::err_multiple_default_labels_defined);
Diag(TheDefaultStmt->getDefaultLoc(), diag::note_duplicate_case_prev);
// FIXME: Remove the default statement from the switch block so that
// we'll return a valid AST. This requires recursing down the AST and
// finding it, not something we are set up to do right now. For now,
// just lop the entire switch stmt out of the AST.
CaseListIsErroneous = true;
}
TheDefaultStmt = DS;
} else {
CaseStmt *CS = cast<CaseStmt>(SC);
Expr *Lo = CS->getLHS();
if (Lo->isTypeDependent() || Lo->isValueDependent()) {
HasDependentValue = true;
break;
}
llvm::APSInt LoVal;
if (getLangOpts().CPlusPlus11 ||
getLangOpts().HLSLVersion >= hlsl::LangStd::v2017) {
// C++11 [stmt.switch]p2: the constant-expression shall be a converted
// constant expression of the promoted type of the switch condition.
ExprResult ConvLo =
CheckConvertedConstantExpression(Lo, CondType, LoVal, CCEK_CaseValue);
if (ConvLo.isInvalid()) {
CaseListIsErroneous = true;
continue;
}
Lo = ConvLo.get();
} else {
// We already verified that the expression has a i-c-e value (C99
// 6.8.4.2p3) - get that value now.
LoVal = Lo->EvaluateKnownConstInt(Context);
// If the LHS is not the same type as the condition, insert an implicit
// cast.
Lo = DefaultLvalueConversion(Lo).get();
Lo = ImpCastExprToType(Lo, CondType, CK_IntegralCast).get();
}
// Check the unconverted value is within the range of possible values of
// the switch expression.
checkCaseValue(*this, Lo->getLocStart(), LoVal,
CondWidthBeforePromotion, CondIsSignedBeforePromotion);
// Convert the value to the same width/sign as the condition.
AdjustAPSInt(LoVal, CondWidth, CondIsSigned);
CS->setLHS(Lo);
// If this is a case range, remember it in CaseRanges, otherwise CaseVals.
if (CS->getRHS()) {
if (CS->getRHS()->isTypeDependent() ||
CS->getRHS()->isValueDependent()) {
HasDependentValue = true;
break;
}
CaseRanges.push_back(std::make_pair(LoVal, CS));
} else
CaseVals.push_back(std::make_pair(LoVal, CS));
}
}
if (!HasDependentValue) {
// If we don't have a default statement, check whether the
// condition is constant.
llvm::APSInt ConstantCondValue;
bool HasConstantCond = false;
if (!HasDependentValue && !TheDefaultStmt) {
HasConstantCond = CondExpr->EvaluateAsInt(ConstantCondValue, Context,
Expr::SE_AllowSideEffects);
assert(!HasConstantCond ||
(ConstantCondValue.getBitWidth() == CondWidth &&
ConstantCondValue.isSigned() == CondIsSigned));
}
bool ShouldCheckConstantCond = HasConstantCond;
// Sort all the scalar case values so we can easily detect duplicates.
std::stable_sort(CaseVals.begin(), CaseVals.end(), CmpCaseVals);
if (!CaseVals.empty()) {
for (unsigned i = 0, e = CaseVals.size(); i != e; ++i) {
if (ShouldCheckConstantCond &&
CaseVals[i].first == ConstantCondValue)
ShouldCheckConstantCond = false;
if (i != 0 && CaseVals[i].first == CaseVals[i-1].first) {
// If we have a duplicate, report it.
// First, determine if either case value has a name
StringRef PrevString, CurrString;
Expr *PrevCase = CaseVals[i-1].second->getLHS()->IgnoreParenCasts();
Expr *CurrCase = CaseVals[i].second->getLHS()->IgnoreParenCasts();
if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(PrevCase)) {
PrevString = DeclRef->getDecl()->getName();
}
if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(CurrCase)) {
CurrString = DeclRef->getDecl()->getName();
}
SmallString<16> CaseValStr;
CaseVals[i-1].first.toString(CaseValStr);
if (PrevString == CurrString)
Diag(CaseVals[i].second->getLHS()->getLocStart(),
diag::err_duplicate_case) <<
(PrevString.empty() ? StringRef(CaseValStr) : PrevString);
else
Diag(CaseVals[i].second->getLHS()->getLocStart(),
diag::err_duplicate_case_differing_expr) <<
(PrevString.empty() ? StringRef(CaseValStr) : PrevString) <<
(CurrString.empty() ? StringRef(CaseValStr) : CurrString) <<
CaseValStr;
Diag(CaseVals[i-1].second->getLHS()->getLocStart(),
diag::note_duplicate_case_prev);
// FIXME: We really want to remove the bogus case stmt from the
// substmt, but we have no way to do this right now.
CaseListIsErroneous = true;
}
}
}
// Detect duplicate case ranges, which usually don't exist at all in
// the first place.
if (!CaseRanges.empty()) {
// Sort all the case ranges by their low value so we can easily detect
// overlaps between ranges.
std::stable_sort(CaseRanges.begin(), CaseRanges.end());
// Scan the ranges, computing the high values and removing empty ranges.
std::vector<llvm::APSInt> HiVals;
for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
llvm::APSInt &LoVal = CaseRanges[i].first;
CaseStmt *CR = CaseRanges[i].second;
Expr *Hi = CR->getRHS();
llvm::APSInt HiVal;
if (getLangOpts().CPlusPlus11) {
// C++11 [stmt.switch]p2: the constant-expression shall be a converted
// constant expression of the promoted type of the switch condition.
ExprResult ConvHi =
CheckConvertedConstantExpression(Hi, CondType, HiVal,
CCEK_CaseValue);
if (ConvHi.isInvalid()) {
CaseListIsErroneous = true;
continue;
}
Hi = ConvHi.get();
} else {
HiVal = Hi->EvaluateKnownConstInt(Context);
// If the RHS is not the same type as the condition, insert an
// implicit cast.
Hi = DefaultLvalueConversion(Hi).get();
Hi = ImpCastExprToType(Hi, CondType, CK_IntegralCast).get();
}
// Check the unconverted value is within the range of possible values of
// the switch expression.
checkCaseValue(*this, Hi->getLocStart(), HiVal,
CondWidthBeforePromotion, CondIsSignedBeforePromotion);
// Convert the value to the same width/sign as the condition.
AdjustAPSInt(HiVal, CondWidth, CondIsSigned);
CR->setRHS(Hi);
// If the low value is bigger than the high value, the case is empty.
if (LoVal > HiVal) {
Diag(CR->getLHS()->getLocStart(), diag::warn_case_empty_range)
<< SourceRange(CR->getLHS()->getLocStart(),
Hi->getLocEnd());
CaseRanges.erase(CaseRanges.begin()+i);
--i, --e;
continue;
}
if (ShouldCheckConstantCond &&
LoVal <= ConstantCondValue &&
ConstantCondValue <= HiVal)
ShouldCheckConstantCond = false;
HiVals.push_back(HiVal);
}
// Rescan the ranges, looking for overlap with singleton values and other
// ranges. Since the range list is sorted, we only need to compare case
// ranges with their neighbors.
for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
llvm::APSInt &CRLo = CaseRanges[i].first;
llvm::APSInt &CRHi = HiVals[i];
CaseStmt *CR = CaseRanges[i].second;
// Check to see whether the case range overlaps with any
// singleton cases.
CaseStmt *OverlapStmt = nullptr;
llvm::APSInt OverlapVal(32);
// Find the smallest value >= the lower bound. If I is in the
// case range, then we have overlap.
CaseValsTy::iterator I = std::lower_bound(CaseVals.begin(),
CaseVals.end(), CRLo,
CaseCompareFunctor());
if (I != CaseVals.end() && I->first < CRHi) {
OverlapVal = I->first; // Found overlap with scalar.
OverlapStmt = I->second;
}
// Find the smallest value bigger than the upper bound.
I = std::upper_bound(I, CaseVals.end(), CRHi, CaseCompareFunctor());
if (I != CaseVals.begin() && (I-1)->first >= CRLo) {
OverlapVal = (I-1)->first; // Found overlap with scalar.
OverlapStmt = (I-1)->second;
}
// Check to see if this case stmt overlaps with the subsequent
// case range.
if (i && CRLo <= HiVals[i-1]) {
OverlapVal = HiVals[i-1]; // Found overlap with range.
OverlapStmt = CaseRanges[i-1].second;
}
if (OverlapStmt) {
// If we have a duplicate, report it.
Diag(CR->getLHS()->getLocStart(), diag::err_duplicate_case)
<< OverlapVal.toString(10);
Diag(OverlapStmt->getLHS()->getLocStart(),
diag::note_duplicate_case_prev);
// FIXME: We really want to remove the bogus case stmt from the
// substmt, but we have no way to do this right now.
CaseListIsErroneous = true;
}
}
}
// Complain if we have a constant condition and we didn't find a match.
if (!CaseListIsErroneous && ShouldCheckConstantCond) {
// TODO: it would be nice if we printed enums as enums, chars as
// chars, etc.
Diag(CondExpr->getExprLoc(), diag::warn_missing_case_for_condition)
<< ConstantCondValue.toString(10)
<< CondExpr->getSourceRange();
}
// Check to see if switch is over an Enum and handles all of its
// values. We only issue a warning if there is not 'default:', but
// we still do the analysis to preserve this information in the AST
// (which can be used by flow-based analyes).
//
const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>();
// If switch has default case, then ignore it.
if (!CaseListIsErroneous && !HasConstantCond && ET) {
const EnumDecl *ED = ET->getDecl();
EnumValsTy EnumVals;
// Gather all enum values, set their type and sort them,
// allowing easier comparison with CaseVals.
for (auto *EDI : ED->enumerators()) {
llvm::APSInt Val = EDI->getInitVal();
AdjustAPSInt(Val, CondWidth, CondIsSigned);
EnumVals.push_back(std::make_pair(Val, EDI));
}
std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
auto EI = EnumVals.begin(), EIEnd =
std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
// See which case values aren't in enum.
for (CaseValsTy::const_iterator CI = CaseVals.begin();
CI != CaseVals.end(); CI++) {
Expr *CaseExpr = CI->second->getLHS();
if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
CI->first))
Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
<< CondTypeBeforePromotion;
}
// See which of case ranges aren't in enum
EI = EnumVals.begin();
for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
RI != CaseRanges.end(); RI++) {
Expr *CaseExpr = RI->second->getLHS();
if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
RI->first))
Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
<< CondTypeBeforePromotion;
llvm::APSInt Hi =
RI->second->getRHS()->EvaluateKnownConstInt(Context);
AdjustAPSInt(Hi, CondWidth, CondIsSigned);
CaseExpr = RI->second->getRHS();
if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd,
Hi))
Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum)
<< CondTypeBeforePromotion;
}
// Check which enum vals aren't in switch
auto CI = CaseVals.begin();
auto RI = CaseRanges.begin();
bool hasCasesNotInSwitch = false;
SmallVector<DeclarationName,8> UnhandledNames;
for (EI = EnumVals.begin(); EI != EIEnd; EI++){
// Drop unneeded case values
while (CI != CaseVals.end() && CI->first < EI->first)
CI++;
if (CI != CaseVals.end() && CI->first == EI->first)
continue;
// Drop unneeded case ranges
for (; RI != CaseRanges.end(); RI++) {
llvm::APSInt Hi =
RI->second->getRHS()->EvaluateKnownConstInt(Context);
AdjustAPSInt(Hi, CondWidth, CondIsSigned);
if (EI->first <= Hi)
break;
}
if (RI == CaseRanges.end() || EI->first < RI->first) {
hasCasesNotInSwitch = true;
UnhandledNames.push_back(EI->second->getDeclName());
}
}
if (TheDefaultStmt && UnhandledNames.empty())
Diag(TheDefaultStmt->getDefaultLoc(), diag::warn_unreachable_default);
// Produce a nice diagnostic if multiple values aren't handled.
if (!UnhandledNames.empty()) {
DiagnosticBuilder DB = Diag(CondExpr->getExprLoc(),
TheDefaultStmt ? diag::warn_def_missing_case
: diag::warn_missing_case)
<< (int)UnhandledNames.size();
for (size_t I = 0, E = std::min(UnhandledNames.size(), (size_t)3);
I != E; ++I)
DB << UnhandledNames[I];
}
if (!hasCasesNotInSwitch)
SS->setAllEnumCasesCovered();
}
}
if (BodyStmt)
DiagnoseEmptyStmtBody(CondExpr->getLocEnd(), BodyStmt,
diag::warn_empty_switch_body);
// FIXME: If the case list was broken is some way, we don't have a good system
// to patch it up. Instead, just return the whole substmt as broken.
if (CaseListIsErroneous)
return StmtError();
return SS;
}
void
Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr) {
if (Diags.isIgnored(diag::warn_not_in_enum_assignment, SrcExpr->getExprLoc()))
return;
if (const EnumType *ET = DstType->getAs<EnumType>())
if (!Context.hasSameUnqualifiedType(SrcType, DstType) &&
SrcType->isIntegerType()) {
if (!SrcExpr->isTypeDependent() && !SrcExpr->isValueDependent() &&
SrcExpr->isIntegerConstantExpr(Context)) {
// Get the bitwidth of the enum value before promotions.
unsigned DstWidth = Context.getIntWidth(DstType);
bool DstIsSigned = DstType->isSignedIntegerOrEnumerationType();
llvm::APSInt RhsVal = SrcExpr->EvaluateKnownConstInt(Context);
AdjustAPSInt(RhsVal, DstWidth, DstIsSigned);
const EnumDecl *ED = ET->getDecl();
if (ED->hasAttr<FlagEnumAttr>()) {
if (!IsValueInFlagEnum(ED, RhsVal, true))
Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
<< DstType.getUnqualifiedType();
} else {
typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl *>, 64>
EnumValsTy;
EnumValsTy EnumVals;
// Gather all enum values, set their type and sort them,
// allowing easier comparison with rhs constant.
for (auto *EDI : ED->enumerators()) {
llvm::APSInt Val = EDI->getInitVal();
AdjustAPSInt(Val, DstWidth, DstIsSigned);
EnumVals.push_back(std::make_pair(Val, EDI));
}
if (EnumVals.empty())
return;
std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
EnumValsTy::iterator EIend =
std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
// See which values aren't in the enum.
EnumValsTy::const_iterator EI = EnumVals.begin();
while (EI != EIend && EI->first < RhsVal)
EI++;
if (EI == EIend || EI->first != RhsVal) {
Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment)
<< DstType.getUnqualifiedType();
}
}
}
}
}
StmtResult
Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
Decl *CondVar, Stmt *Body) {
ExprResult CondResult(Cond.release());
VarDecl *ConditionVar = nullptr;
if (CondVar) {
ConditionVar = cast<VarDecl>(CondVar);
CondResult = CheckConditionVariable(ConditionVar, WhileLoc, true);
CondResult = ActOnFinishFullExpr(CondResult.get(), WhileLoc);
if (CondResult.isInvalid())
return StmtError();
}
Expr *ConditionExpr = CondResult.get();
if (!ConditionExpr)
return StmtError();
// HLSL Change Begin.
hlsl::DiagnoseControlFlowConditionForHLSL(this, ConditionExpr, "while");
// HLSL Change End.
CheckBreakContinueBinding(ConditionExpr);
DiagnoseUnusedExprResult(Body);
if (isa<NullStmt>(Body))
getCurCompoundScope().setHasEmptyLoopBodies();
return new (Context)
WhileStmt(Context, ConditionVar, ConditionExpr, Body, WhileLoc);
}
StmtResult
Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen) {
assert(Cond && "ActOnDoStmt(): missing expression");
CheckBreakContinueBinding(Cond);
ExprResult CondResult = CheckBooleanCondition(Cond, DoLoc);
if (CondResult.isInvalid())
return StmtError();
Cond = CondResult.get();
CondResult = ActOnFinishFullExpr(Cond, DoLoc);
if (CondResult.isInvalid())
return StmtError();
Cond = CondResult.get();
// HLSL Change Begin.
if (Cond) {
hlsl::DiagnoseControlFlowConditionForHLSL(this, Cond, "do-while");
}
// HLSL Change End.
DiagnoseUnusedExprResult(Body);
return new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen);
}
namespace {
// This visitor will traverse a conditional statement and store all
// the evaluated decls into a vector. Simple is set to true if none
// of the excluded constructs are used.
class DeclExtractor : public EvaluatedExprVisitor<DeclExtractor> {
llvm::SmallPtrSetImpl<VarDecl*> &Decls;
SmallVectorImpl<SourceRange> &Ranges;
bool Simple;
public:
typedef EvaluatedExprVisitor<DeclExtractor> Inherited;
DeclExtractor(Sema &S, llvm::SmallPtrSetImpl<VarDecl*> &Decls,
SmallVectorImpl<SourceRange> &Ranges) :
Inherited(S.Context),
Decls(Decls),
Ranges(Ranges),
Simple(true) {}
bool isSimple() { return Simple; }
// Replaces the method in EvaluatedExprVisitor.
void VisitMemberExpr(MemberExpr* E) {
Simple = false;
}
// Any Stmt not whitelisted will cause the condition to be marked complex.
void VisitStmt(Stmt *S) {
Simple = false;
}
void VisitBinaryOperator(BinaryOperator *E) {
Visit(E->getLHS());
Visit(E->getRHS());
}
void VisitCastExpr(CastExpr *E) {
Visit(E->getSubExpr());
}
void VisitUnaryOperator(UnaryOperator *E) {
// Skip checking conditionals with derefernces.
if (E->getOpcode() == UO_Deref)
Simple = false;
else
Visit(E->getSubExpr());
}
void VisitConditionalOperator(ConditionalOperator *E) {
Visit(E->getCond());
Visit(E->getTrueExpr());
Visit(E->getFalseExpr());
}
void VisitParenExpr(ParenExpr *E) {
Visit(E->getSubExpr());
}
void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
Visit(E->getOpaqueValue()->getSourceExpr());
Visit(E->getFalseExpr());
}
void VisitIntegerLiteral(IntegerLiteral *E) { }
void VisitFloatingLiteral(FloatingLiteral *E) { }
void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { }
void VisitCharacterLiteral(CharacterLiteral *E) { }
void VisitGNUNullExpr(GNUNullExpr *E) { }
void VisitImaginaryLiteral(ImaginaryLiteral *E) { }
void VisitDeclRefExpr(DeclRefExpr *E) {
VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
if (!VD) return;
Ranges.push_back(E->getSourceRange());
Decls.insert(VD);
}
}; // end class DeclExtractor
// DeclMatcher checks to see if the decls are used in a non-evauluated
// context.
class DeclMatcher : public EvaluatedExprVisitor<DeclMatcher> {
llvm::SmallPtrSetImpl<VarDecl*> &Decls;
bool FoundDecl;
public:
typedef EvaluatedExprVisitor<DeclMatcher> Inherited;
DeclMatcher(Sema &S, llvm::SmallPtrSetImpl<VarDecl*> &Decls,
Stmt *Statement) :
Inherited(S.Context), Decls(Decls), FoundDecl(false) {
if (!Statement) return;
Visit(Statement);
}
void VisitReturnStmt(ReturnStmt *S) {
FoundDecl = true;
}
void VisitBreakStmt(BreakStmt *S) {
FoundDecl = true;
}
void VisitGotoStmt(GotoStmt *S) {
FoundDecl = true;
}
void VisitCastExpr(CastExpr *E) {
if (E->getCastKind() == CK_LValueToRValue)
CheckLValueToRValueCast(E->getSubExpr());
else
Visit(E->getSubExpr());
}
void CheckLValueToRValueCast(Expr *E) {
E = E->IgnoreParenImpCasts();
if (isa<DeclRefExpr>(E)) {
return;
}
if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
Visit(CO->getCond());
CheckLValueToRValueCast(CO->getTrueExpr());
CheckLValueToRValueCast(CO->getFalseExpr());
return;
}
if (BinaryConditionalOperator *BCO =
dyn_cast<BinaryConditionalOperator>(E)) {
CheckLValueToRValueCast(BCO->getOpaqueValue()->getSourceExpr());
CheckLValueToRValueCast(BCO->getFalseExpr());
return;
}
Visit(E);
}
void VisitDeclRefExpr(DeclRefExpr *E) {
if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
if (Decls.count(VD))
FoundDecl = true;
}
bool FoundDeclInUse() { return FoundDecl; }
}; // end class DeclMatcher
void CheckForLoopConditionalStatement(Sema &S, Expr *Second,
Expr *Third, Stmt *Body) {
// Condition is empty
if (!Second) return;
if (S.Diags.isIgnored(diag::warn_variables_not_in_loop_body,
Second->getLocStart()))
return;
PartialDiagnostic PDiag = S.PDiag(diag::warn_variables_not_in_loop_body);
llvm::SmallPtrSet<VarDecl*, 8> Decls;
SmallVector<SourceRange, 10> Ranges;
DeclExtractor DE(S, Decls, Ranges);
DE.Visit(Second);
// Don't analyze complex conditionals.
if (!DE.isSimple()) return;
// No decls found.
if (Decls.size() == 0) return;
// Don't warn on volatile, static, or global variables.
for (llvm::SmallPtrSetImpl<VarDecl*>::iterator I = Decls.begin(),
E = Decls.end();
I != E; ++I)
if ((*I)->getType().isVolatileQualified() ||
(*I)->hasGlobalStorage()) return;
if (DeclMatcher(S, Decls, Second).FoundDeclInUse() ||
DeclMatcher(S, Decls, Third).FoundDeclInUse() ||
DeclMatcher(S, Decls, Body).FoundDeclInUse())
return;
// Load decl names into diagnostic.
if (Decls.size() > 4)
PDiag << 0;
else {
PDiag << Decls.size();
for (llvm::SmallPtrSetImpl<VarDecl*>::iterator I = Decls.begin(),
E = Decls.end();
I != E; ++I)
PDiag << (*I)->getDeclName();
}
// Load SourceRanges into diagnostic if there is room.
// Otherwise, load the SourceRange of the conditional expression.
if (Ranges.size() <= PartialDiagnostic::MaxArguments)
for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
E = Ranges.end();
I != E; ++I)
PDiag << *I;
else
PDiag << Second->getSourceRange();
S.Diag(Ranges.begin()->getBegin(), PDiag);
}
// If Statement is an incemement or decrement, return true and sets the
// variables Increment and DRE.
bool ProcessIterationStmt(Sema &S, Stmt* Statement, bool &Increment,
DeclRefExpr *&DRE) {
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Statement)) {
switch (UO->getOpcode()) {
default: return false;
case UO_PostInc:
case UO_PreInc:
Increment = true;
break;
case UO_PostDec:
case UO_PreDec:
Increment = false;
break;
}
DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr());
return DRE;
}
if (CXXOperatorCallExpr *Call = dyn_cast<CXXOperatorCallExpr>(Statement)) {
FunctionDecl *FD = Call->getDirectCallee();
if (!FD || !FD->isOverloadedOperator()) return false;
switch (FD->getOverloadedOperator()) {
default: return false;
case OO_PlusPlus:
Increment = true;
break;
case OO_MinusMinus:
Increment = false;
break;
}
DRE = dyn_cast<DeclRefExpr>(Call->getArg(0));
return DRE;
}
return false;
}
// A visitor to determine if a continue or break statement is a
// subexpression.
class BreakContinueFinder : public EvaluatedExprVisitor<BreakContinueFinder> {
SourceLocation BreakLoc;
SourceLocation ContinueLoc;
public:
BreakContinueFinder(Sema &S, Stmt* Body) :
Inherited(S.Context) {
Visit(Body);
}
typedef EvaluatedExprVisitor<BreakContinueFinder> Inherited;
void VisitContinueStmt(ContinueStmt* E) {
ContinueLoc = E->getContinueLoc();
}
void VisitBreakStmt(BreakStmt* E) {
BreakLoc = E->getBreakLoc();
}
bool ContinueFound() { return ContinueLoc.isValid(); }
bool BreakFound() { return BreakLoc.isValid(); }
SourceLocation GetContinueLoc() { return ContinueLoc; }
SourceLocation GetBreakLoc() { return BreakLoc; }
}; // end class BreakContinueFinder
// Emit a warning when a loop increment/decrement appears twice per loop
// iteration. The conditions which trigger this warning are:
// 1) The last statement in the loop body and the third expression in the
// for loop are both increment or both decrement of the same variable
// 2) No continue statements in the loop body.
void CheckForRedundantIteration(Sema &S, Expr *Third, Stmt *Body) {
// Return when there is nothing to check.
if (!Body || !Third) return;
if (S.Diags.isIgnored(diag::warn_redundant_loop_iteration,
Third->getLocStart()))
return;
// Get the last statement from the loop body.
CompoundStmt *CS = dyn_cast<CompoundStmt>(Body);
if (!CS || CS->body_empty()) return;
Stmt *LastStmt = CS->body_back();
if (!LastStmt) return;
bool LoopIncrement, LastIncrement;
DeclRefExpr *LoopDRE, *LastDRE;
if (!ProcessIterationStmt(S, Third, LoopIncrement, LoopDRE)) return;
if (!ProcessIterationStmt(S, LastStmt, LastIncrement, LastDRE)) return;
// Check that the two statements are both increments or both decrements
// on the same variable.
if (LoopIncrement != LastIncrement ||
LoopDRE->getDecl() != LastDRE->getDecl()) return;
if (BreakContinueFinder(S, Body).ContinueFound()) return;
S.Diag(LastDRE->getLocation(), diag::warn_redundant_loop_iteration)
<< LastDRE->getDecl() << LastIncrement;
S.Diag(LoopDRE->getLocation(), diag::note_loop_iteration_here)
<< LoopIncrement;
}
} // end namespace
void Sema::CheckBreakContinueBinding(Expr *E) {
if (!E || getLangOpts().CPlusPlus)
return;
BreakContinueFinder BCFinder(*this, E);
Scope *BreakParent = CurScope->getBreakParent();
if (BCFinder.BreakFound() && BreakParent) {
if (BreakParent->getFlags() & Scope::SwitchScope) {
Diag(BCFinder.GetBreakLoc(), diag::warn_break_binds_to_switch);
} else {
Diag(BCFinder.GetBreakLoc(), diag::warn_loop_ctrl_binds_to_inner)
<< "break";
}
} else if (BCFinder.ContinueFound() && CurScope->getContinueParent()) {
Diag(BCFinder.GetContinueLoc(), diag::warn_loop_ctrl_binds_to_inner)
<< "continue";
}
}
StmtResult
Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
Stmt *First, FullExprArg second, Decl *secondVar,
FullExprArg third,
SourceLocation RParenLoc, Stmt *Body) {
if (!getLangOpts().CPlusPlus) {
if (DeclStmt *DS = dyn_cast_or_null<DeclStmt>(First)) {
// C99 6.8.5p3: The declaration part of a 'for' statement shall only
// declare identifiers for objects having storage class 'auto' or
// 'register'.
for (auto *DI : DS->decls()) {
VarDecl *VD = dyn_cast<VarDecl>(DI);
if (VD && VD->isLocalVarDecl() && !VD->hasLocalStorage())
VD = nullptr;
if (!VD) {
Diag(DI->getLocation(), diag::err_non_local_variable_decl_in_for);
DI->setInvalidDecl();
}
}
}
}
CheckBreakContinueBinding(second.get());
CheckBreakContinueBinding(third.get());
CheckForLoopConditionalStatement(*this, second.get(), third.get(), Body);
CheckForRedundantIteration(*this, third.get(), Body);
ExprResult SecondResult(second.release());
VarDecl *ConditionVar = nullptr;
if (secondVar) {
ConditionVar = cast<VarDecl>(secondVar);
SecondResult = CheckConditionVariable(ConditionVar, ForLoc, true);
SecondResult = ActOnFinishFullExpr(SecondResult.get(), ForLoc);
if (SecondResult.isInvalid())
return StmtError();
}
// HLSL Change Begin.
Expr *Cond = SecondResult.get();
if (Cond) {
hlsl::DiagnoseControlFlowConditionForHLSL(this, Cond, "for");
}
// HLSL Change End.
Expr *Third = third.release().getAs<Expr>();
DiagnoseUnusedExprResult(First);
DiagnoseUnusedExprResult(Third);
DiagnoseUnusedExprResult(Body);
if (isa<NullStmt>(Body))
getCurCompoundScope().setHasEmptyLoopBodies();
return new (Context) ForStmt(Context, First, Cond, ConditionVar,
Third, Body, ForLoc, LParenLoc, RParenLoc);
}
/// In an Objective C collection iteration statement:
/// for (x in y)
/// x can be an arbitrary l-value expression. Bind it up as a
/// full-expression.
StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
// Reduce placeholder expressions here. Note that this rejects the
// use of pseudo-object l-values in this position.
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return StmtError();
E = result.get();
ExprResult FullExpr = ActOnFinishFullExpr(E);
if (FullExpr.isInvalid())
return StmtError();
return StmtResult(static_cast<Stmt*>(FullExpr.get()));
}
ExprResult
Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
if (!collection)
return ExprError();
ExprResult result = CorrectDelayedTyposInExpr(collection);
if (!result.isUsable())
return ExprError();
collection = result.get();
// Bail out early if we've got a type-dependent expression.
if (collection->isTypeDependent()) return collection;
// Perform normal l-value conversion.
result = DefaultFunctionArrayLvalueConversion(collection);
if (result.isInvalid())
return ExprError();
collection = result.get();
// The operand needs to have object-pointer type.
// TODO: should we do a contextual conversion?
const ObjCObjectPointerType *pointerType =
collection->getType()->getAs<ObjCObjectPointerType>();
if (!pointerType)
return Diag(forLoc, diag::err_collection_expr_type)
<< collection->getType() << collection->getSourceRange();
// Check that the operand provides
// - countByEnumeratingWithState:objects:count:
const ObjCObjectType *objectType = pointerType->getObjectType();
ObjCInterfaceDecl *iface = objectType->getInterface();
// If we have a forward-declared type, we can't do this check.
// Under ARC, it is an error not to have a forward-declared class.
if (iface &&
RequireCompleteType(forLoc, QualType(objectType, 0),
getLangOpts().ObjCAutoRefCount
? diag::err_arc_collection_forward
: 0,
collection)) {
// Otherwise, if we have any useful type information, check that
// the type declares the appropriate method.
} else if (iface || !objectType->qual_empty()) {
IdentifierInfo *selectorIdents[] = {
&Context.Idents.get("countByEnumeratingWithState"),
&Context.Idents.get("objects"),
&Context.Idents.get("count")
};
Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]);
ObjCMethodDecl *method = nullptr;
// If there's an interface, look in both the public and private APIs.
if (iface) {
method = iface->lookupInstanceMethod(selector);
if (!method) method = iface->lookupPrivateMethod(selector);
}
// Also check protocol qualifiers.
if (!method)
method = LookupMethodInQualifiedType(selector, pointerType,
/*instance*/ true);
// If we didn't find it anywhere, give up.
if (!method) {
Diag(forLoc, diag::warn_collection_expr_type)
<< collection->getType() << selector << collection->getSourceRange();
}
// TODO: check for an incompatible signature?
}
// Wrap up any cleanups in the expression.
return collection;
}
StmtResult
Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc) {
ExprResult CollectionExprResult =
CheckObjCForCollectionOperand(ForLoc, collection);
if (First) {
QualType FirstType;
if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) {
if (!DS->isSingleDecl())
return StmtError(Diag((*DS->decl_begin())->getLocation(),
diag::err_toomany_element_decls));
VarDecl *D = dyn_cast<VarDecl>(DS->getSingleDecl());
if (!D || D->isInvalidDecl())
return StmtError();
FirstType = D->getType();
// C99 6.8.5p3: The declaration part of a 'for' statement shall only
// declare identifiers for objects having storage class 'auto' or
// 'register'.
if (!D->hasLocalStorage())
return StmtError(Diag(D->getLocation(),
diag::err_non_local_variable_decl_in_for));
// If the type contained 'auto', deduce the 'auto' to 'id'.
if (FirstType->getContainedAutoType()) {
OpaqueValueExpr OpaqueId(D->getLocation(), Context.getObjCIdType(),
VK_RValue);
Expr *DeducedInit = &OpaqueId;
if (DeduceAutoType(D->getTypeSourceInfo(), DeducedInit, FirstType) ==
DAR_Failed)
DiagnoseAutoDeductionFailure(D, DeducedInit);
if (FirstType.isNull()) {
D->setInvalidDecl();
return StmtError();
}
D->setType(FirstType);
if (ActiveTemplateInstantiations.empty()) {
SourceLocation Loc =
D->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
Diag(Loc, diag::warn_auto_var_is_id)
<< D->getDeclName();
}
}
} else {
Expr *FirstE = cast<Expr>(First);
if (!FirstE->isTypeDependent() && !FirstE->isLValue())
return StmtError(Diag(First->getLocStart(),
diag::err_selector_element_not_lvalue)
<< First->getSourceRange());
FirstType = static_cast<Expr*>(First)->getType();
if (FirstType.isConstQualified())
Diag(ForLoc, diag::err_selector_element_const_type)
<< FirstType << First->getSourceRange();
}
if (!FirstType->isDependentType() &&
!FirstType->isObjCObjectPointerType() &&
!FirstType->isBlockPointerType())
return StmtError(Diag(ForLoc, diag::err_selector_element_type)
<< FirstType << First->getSourceRange());
}
if (CollectionExprResult.isInvalid())
return StmtError();
CollectionExprResult = ActOnFinishFullExpr(CollectionExprResult.get());
if (CollectionExprResult.isInvalid())
return StmtError();
return new (Context) ObjCForCollectionStmt(First, CollectionExprResult.get(),
nullptr, ForLoc, RParenLoc);
}
/// Finish building a variable declaration for a for-range statement.
/// \return true if an error occurs.
static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
SourceLocation Loc, int DiagID) {
if (Decl->getType()->isUndeducedType()) {
ExprResult Res = SemaRef.CorrectDelayedTyposInExpr(Init);
if (!Res.isUsable()) {
Decl->setInvalidDecl();
return true;
}
Init = Res.get();
}
// Deduce the type for the iterator variable now rather than leaving it to
// AddInitializerToDecl, so we can produce a more suitable diagnostic.
QualType InitType;
if ((!isa<InitListExpr>(Init) && Init->getType()->isVoidType()) ||
SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitType) ==
Sema::DAR_Failed)
SemaRef.Diag(Loc, DiagID) << Init->getType();
if (InitType.isNull()) {
Decl->setInvalidDecl();
return true;
}
Decl->setType(InitType);
// In ARC, infer lifetime.
// FIXME: ARC may want to turn this into 'const __unsafe_unretained' if
// we're doing the equivalent of fast iteration.
if (SemaRef.getLangOpts().ObjCAutoRefCount &&
SemaRef.inferObjCARCLifetime(Decl))
Decl->setInvalidDecl();
SemaRef.AddInitializerToDecl(Decl, Init, /*DirectInit=*/false,
/*TypeMayContainAuto=*/false);
SemaRef.FinalizeDeclaration(Decl);
SemaRef.CurContext->addHiddenDecl(Decl);
return false;
}
namespace {
/// Produce a note indicating which begin/end function was implicitly called
/// by a C++11 for-range statement. This is often not obvious from the code,
/// nor from the diagnostics produced when analysing the implicit expressions
/// required in a for-range statement.
void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E,
Sema::BeginEndFunction BEF) {
CallExpr *CE = dyn_cast<CallExpr>(E);
if (!CE)
return;
FunctionDecl *D = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
if (!D)
return;
SourceLocation Loc = D->getLocation();
std::string Description;
bool IsTemplate = false;
if (FunctionTemplateDecl *FunTmpl = D->getPrimaryTemplate()) {
Description = SemaRef.getTemplateArgumentBindingsText(
FunTmpl->getTemplateParameters(), *D->getTemplateSpecializationArgs());
IsTemplate = true;
}
SemaRef.Diag(Loc, diag::note_for_range_begin_end)
<< BEF << IsTemplate << Description << E->getType();
}
/// Build a variable declaration for a for-range statement.
VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc,
QualType Type, const char *Name) {
DeclContext *DC = SemaRef.CurContext;
IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type,
TInfo, SC_None);
Decl->setImplicit();
return Decl;
}
}
static bool ObjCEnumerationCollection(Expr *Collection) {
return !Collection->isTypeDependent()
&& Collection->getType()->getAs<ObjCObjectPointerType>() != nullptr;
}
/// ActOnCXXForRangeStmt - Check and build a C++11 for-range statement.
///
/// C++11 [stmt.ranged]:
/// A range-based for statement is equivalent to
///
/// {
/// auto && __range = range-init;
/// for ( auto __begin = begin-expr,
/// __end = end-expr;
/// __begin != __end;
/// ++__begin ) {
/// for-range-declaration = *__begin;
/// statement
/// }
/// }
///
/// The body of the loop is not available yet, since it cannot be analysed until
/// we have determined the type of the for-range-declaration.
StmtResult
Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc,
Stmt *First, SourceLocation ColonLoc, Expr *Range,
SourceLocation RParenLoc, BuildForRangeKind Kind) {
if (!First)
return StmtError();
if (Range && ObjCEnumerationCollection(Range))
return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc);
DeclStmt *DS = dyn_cast<DeclStmt>(First);
assert(DS && "first part of for range not a decl stmt");
if (!DS->isSingleDecl()) {
Diag(DS->getStartLoc(), diag::err_type_defined_in_for_range);
return StmtError();
}
Decl *LoopVar = DS->getSingleDecl();
if (LoopVar->isInvalidDecl() || !Range ||
DiagnoseUnexpandedParameterPack(Range, UPPC_Expression)) {
LoopVar->setInvalidDecl();
return StmtError();
}
// Build auto && __range = range-init
SourceLocation RangeLoc = Range->getLocStart();
VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc,
Context.getAutoRRefDeductType(),
"__range");
if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc,
diag::err_for_range_deduction_failure)) {
LoopVar->setInvalidDecl();
return StmtError();
}
// Claim the type doesn't contain auto: we've already done the checking.
DeclGroupPtrTy RangeGroup =
BuildDeclaratorGroup(MutableArrayRef<Decl *>((Decl **)&RangeVar, 1),
/*TypeMayContainAuto=*/ false);
StmtResult RangeDecl = ActOnDeclStmt(RangeGroup, RangeLoc, RangeLoc);
if (RangeDecl.isInvalid()) {
LoopVar->setInvalidDecl();
return StmtError();
}
return BuildCXXForRangeStmt(ForLoc, ColonLoc, RangeDecl.get(),
/*BeginEndDecl=*/nullptr, /*Cond=*/nullptr,
/*Inc=*/nullptr, DS, RParenLoc, Kind);
}
/// \brief Create the initialization, compare, and increment steps for
/// the range-based for loop expression.
/// This function does not handle array-based for loops,
/// which are created in Sema::BuildCXXForRangeStmt.
///
/// \returns a ForRangeStatus indicating success or what kind of error occurred.
/// BeginExpr and EndExpr are set and FRS_Success is returned on success;
/// CandidateSet and BEF are set and some non-success value is returned on
/// failure.
static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S,
Expr *BeginRange, Expr *EndRange,
QualType RangeType,
VarDecl *BeginVar,
VarDecl *EndVar,
SourceLocation ColonLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *BeginExpr,
ExprResult *EndExpr,
Sema::BeginEndFunction *BEF) {
DeclarationNameInfo BeginNameInfo(
&SemaRef.PP.getIdentifierTable().get("begin"), ColonLoc);
DeclarationNameInfo EndNameInfo(&SemaRef.PP.getIdentifierTable().get("end"),
ColonLoc);
LookupResult BeginMemberLookup(SemaRef, BeginNameInfo,
Sema::LookupMemberName);
LookupResult EndMemberLookup(SemaRef, EndNameInfo, Sema::LookupMemberName);
if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) {
// - if _RangeT is a class type, the unqualified-ids begin and end are
// looked up in the scope of class _RangeT as if by class member access
// lookup (3.4.5), and if either (or both) finds at least one
// declaration, begin-expr and end-expr are __range.begin() and
// __range.end(), respectively;
SemaRef.LookupQualifiedName(BeginMemberLookup, D);
SemaRef.LookupQualifiedName(EndMemberLookup, D);
if (BeginMemberLookup.empty() != EndMemberLookup.empty()) {
SourceLocation RangeLoc = BeginVar->getLocation();
*BEF = BeginMemberLookup.empty() ? Sema::BEF_end : Sema::BEF_begin;
SemaRef.Diag(RangeLoc, diag::err_for_range_member_begin_end_mismatch)
<< RangeLoc << BeginRange->getType() << *BEF;
return Sema::FRS_DiagnosticIssued;
}
} else {
// - otherwise, begin-expr and end-expr are begin(__range) and
// end(__range), respectively, where begin and end are looked up with
// argument-dependent lookup (3.4.2). For the purposes of this name
// lookup, namespace std is an associated namespace.
}
*BEF = Sema::BEF_begin;
Sema::ForRangeStatus RangeStatus =
SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, BeginVar,
Sema::BEF_begin, BeginNameInfo,
BeginMemberLookup, CandidateSet,
BeginRange, BeginExpr);
if (RangeStatus != Sema::FRS_Success)
return RangeStatus;
if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc,
diag::err_for_range_iter_deduction_failure)) {
NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF);
return Sema::FRS_DiagnosticIssued;
}
*BEF = Sema::BEF_end;
RangeStatus =
SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, EndVar,
Sema::BEF_end, EndNameInfo,
EndMemberLookup, CandidateSet,
EndRange, EndExpr);
if (RangeStatus != Sema::FRS_Success)
return RangeStatus;
if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc,
diag::err_for_range_iter_deduction_failure)) {
NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF);
return Sema::FRS_DiagnosticIssued;
}
return Sema::FRS_Success;
}
/// Speculatively attempt to dereference an invalid range expression.
/// If the attempt fails, this function will return a valid, null StmtResult
/// and emit no diagnostics.
static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S,
SourceLocation ForLoc,
Stmt *LoopVarDecl,
SourceLocation ColonLoc,
Expr *Range,
SourceLocation RangeLoc,
SourceLocation RParenLoc) {
// Determine whether we can rebuild the for-range statement with a
// dereferenced range expression.
ExprResult AdjustedRange;
{
Sema::SFINAETrap Trap(SemaRef);
AdjustedRange = SemaRef.BuildUnaryOp(S, RangeLoc, UO_Deref, Range);
if (AdjustedRange.isInvalid())
return StmtResult();
StmtResult SR =
SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc,
AdjustedRange.get(), RParenLoc,
Sema::BFRK_Check);
if (SR.isInvalid())
return StmtResult();
}
// The attempt to dereference worked well enough that it could produce a valid
// loop. Produce a fixit, and rebuild the loop with diagnostics enabled, in
// case there are any other (non-fatal) problems with it.
SemaRef.Diag(RangeLoc, diag::err_for_range_dereference)
<< Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*");
return SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc,
AdjustedRange.get(), RParenLoc,
Sema::BFRK_Rebuild);
}
namespace {
/// RAII object to automatically invalidate a declaration if an error occurs.
struct InvalidateOnErrorScope {
InvalidateOnErrorScope(Sema &SemaRef, Decl *D, bool Enabled)
: Trap(SemaRef.Diags), D(D), Enabled(Enabled) {}
~InvalidateOnErrorScope() {
if (Enabled && Trap.hasErrorOccurred())
D->setInvalidDecl();
}
DiagnosticErrorTrap Trap;
Decl *D;
bool Enabled;
};
}
/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement.
StmtResult
Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEnd, Expr *Cond,
Expr *Inc, Stmt *LoopVarDecl,
SourceLocation RParenLoc, BuildForRangeKind Kind) {
Scope *S = getCurScope();
DeclStmt *RangeDS = cast<DeclStmt>(RangeDecl);
VarDecl *RangeVar = cast<VarDecl>(RangeDS->getSingleDecl());
QualType RangeVarType = RangeVar->getType();
DeclStmt *LoopVarDS = cast<DeclStmt>(LoopVarDecl);
VarDecl *LoopVar = cast<VarDecl>(LoopVarDS->getSingleDecl());
// If we hit any errors, mark the loop variable as invalid if its type
// contains 'auto'.
InvalidateOnErrorScope Invalidate(*this, LoopVar,
LoopVar->getType()->isUndeducedType());
StmtResult BeginEndDecl = BeginEnd;
ExprResult NotEqExpr = Cond, IncrExpr = Inc;
if (RangeVarType->isDependentType()) {
// The range is implicitly used as a placeholder when it is dependent.
RangeVar->markUsed(Context);
// Deduce any 'auto's in the loop variable as 'DependentTy'. We'll fill
// them in properly when we instantiate the loop.
if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check)
LoopVar->setType(SubstAutoType(LoopVar->getType(), Context.DependentTy));
} else if (!BeginEndDecl.get()) {
SourceLocation RangeLoc = RangeVar->getLocation();
const QualType RangeVarNonRefType = RangeVarType.getNonReferenceType();
ExprResult BeginRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType,
VK_LValue, ColonLoc);
if (BeginRangeRef.isInvalid())
return StmtError();
ExprResult EndRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType,
VK_LValue, ColonLoc);
if (EndRangeRef.isInvalid())
return StmtError();
QualType AutoType = Context.getAutoDeductType();
Expr *Range = RangeVar->getInit();
if (!Range)
return StmtError();
QualType RangeType = Range->getType();
if (RequireCompleteType(RangeLoc, RangeType,
diag::err_for_range_incomplete_type))
return StmtError();
// Build auto __begin = begin-expr, __end = end-expr.
VarDecl *BeginVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
"__begin");
VarDecl *EndVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
"__end");
// Build begin-expr and end-expr and attach to __begin and __end variables.
ExprResult BeginExpr, EndExpr;
if (const ArrayType *UnqAT = RangeType->getAsArrayTypeUnsafe()) {
// - if _RangeT is an array type, begin-expr and end-expr are __range and
// __range + __bound, respectively, where __bound is the array bound. If
// _RangeT is an array of unknown size or an array of incomplete type,
// the program is ill-formed;
// begin-expr is __range.
BeginExpr = BeginRangeRef;
if (FinishForRangeVarDecl(*this, BeginVar, BeginRangeRef.get(), ColonLoc,
diag::err_for_range_iter_deduction_failure)) {
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
return StmtError();
}
// Find the array bound.
ExprResult BoundExpr;
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(UnqAT))
BoundExpr = IntegerLiteral::Create(
Context, CAT->getSize(), Context.getPointerDiffType(), RangeLoc);
else if (const VariableArrayType *VAT =
dyn_cast<VariableArrayType>(UnqAT))
BoundExpr = VAT->getSizeExpr();
else {
// Can't be a DependentSizedArrayType or an IncompleteArrayType since
// UnqAT is not incomplete and Range is not type-dependent.
llvm_unreachable("Unexpected array type in for-range");
}
// end-expr is __range + __bound.
EndExpr = ActOnBinOp(S, ColonLoc, tok::plus, EndRangeRef.get(),
BoundExpr.get());
if (EndExpr.isInvalid())
return StmtError();
if (FinishForRangeVarDecl(*this, EndVar, EndExpr.get(), ColonLoc,
diag::err_for_range_iter_deduction_failure)) {
NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
return StmtError();
}
} else {
OverloadCandidateSet CandidateSet(RangeLoc,
OverloadCandidateSet::CSK_Normal);
Sema::BeginEndFunction BEFFailure;
ForRangeStatus RangeStatus =
BuildNonArrayForRange(*this, S, BeginRangeRef.get(),
EndRangeRef.get(), RangeType,
BeginVar, EndVar, ColonLoc, &CandidateSet,
&BeginExpr, &EndExpr, &BEFFailure);
if (Kind == BFRK_Build && RangeStatus == FRS_NoViableFunction &&
BEFFailure == BEF_begin) {
// If the range is being built from an array parameter, emit a
// a diagnostic that it is being treated as a pointer.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Range)) {
if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
QualType ArrayTy = PVD->getOriginalType();
QualType PointerTy = PVD->getType();
if (PointerTy->isPointerType() && ArrayTy->isArrayType()) {
Diag(Range->getLocStart(), diag::err_range_on_array_parameter)
<< RangeLoc << PVD << ArrayTy << PointerTy;
Diag(PVD->getLocation(), diag::note_declared_at);
return StmtError();
}
}
}
// If building the range failed, try dereferencing the range expression
// unless a diagnostic was issued or the end function is problematic.
StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc,
LoopVarDecl, ColonLoc,
Range, RangeLoc,
RParenLoc);
if (SR.isInvalid() || SR.isUsable())
return SR;
}
// Otherwise, emit diagnostics if we haven't already.
if (RangeStatus == FRS_NoViableFunction) {
Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get();
Diag(Range->getLocStart(), diag::err_for_range_invalid)
<< RangeLoc << Range->getType() << BEFFailure;
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Range);
}
// Return an error if no fix was discovered.
if (RangeStatus != FRS_Success)
return StmtError();
}
assert(!BeginExpr.isInvalid() && !EndExpr.isInvalid() &&
"invalid range expression in for loop");
// C++11 [dcl.spec.auto]p7: BeginType and EndType must be the same.
QualType BeginType = BeginVar->getType(), EndType = EndVar->getType();
if (!Context.hasSameType(BeginType, EndType)) {
Diag(RangeLoc, diag::err_for_range_begin_end_types_differ)
<< BeginType << EndType;
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
}
Decl *BeginEndDecls[] = { BeginVar, EndVar };
// Claim the type doesn't contain auto: we've already done the checking.
DeclGroupPtrTy BeginEndGroup =
BuildDeclaratorGroup(MutableArrayRef<Decl *>(BeginEndDecls, 2),
/*TypeMayContainAuto=*/ false);
BeginEndDecl = ActOnDeclStmt(BeginEndGroup, ColonLoc, ColonLoc);
const QualType BeginRefNonRefType = BeginType.getNonReferenceType();
ExprResult BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
VK_LValue, ColonLoc);
if (BeginRef.isInvalid())
return StmtError();
ExprResult EndRef = BuildDeclRefExpr(EndVar, EndType.getNonReferenceType(),
VK_LValue, ColonLoc);
if (EndRef.isInvalid())
return StmtError();
// Build and check __begin != __end expression.
NotEqExpr = ActOnBinOp(S, ColonLoc, tok::exclaimequal,
BeginRef.get(), EndRef.get());
NotEqExpr = ActOnBooleanCondition(S, ColonLoc, NotEqExpr.get());
NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get());
if (NotEqExpr.isInvalid()) {
Diag(RangeLoc, diag::note_for_range_invalid_iterator)
<< RangeLoc << 0 << BeginRangeRef.get()->getType();
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
if (!Context.hasSameType(BeginType, EndType))
NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
return StmtError();
}
// Build and check ++__begin expression.
BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
VK_LValue, ColonLoc);
if (BeginRef.isInvalid())
return StmtError();
IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get());
IncrExpr = ActOnFinishFullExpr(IncrExpr.get());
if (IncrExpr.isInvalid()) {
Diag(RangeLoc, diag::note_for_range_invalid_iterator)
<< RangeLoc << 2 << BeginRangeRef.get()->getType() ;
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
return StmtError();
}
// Build and check *__begin expression.
BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
VK_LValue, ColonLoc);
if (BeginRef.isInvalid())
return StmtError();
ExprResult DerefExpr = ActOnUnaryOp(S, ColonLoc, tok::star, BeginRef.get());
if (DerefExpr.isInvalid()) {
Diag(RangeLoc, diag::note_for_range_invalid_iterator)
<< RangeLoc << 1 << BeginRangeRef.get()->getType();
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
return StmtError();
}
// Attach *__begin as initializer for VD. Don't touch it if we're just
// trying to determine whether this would be a valid range.
if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) {
AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false,
/*TypeMayContainAuto=*/true);
if (LoopVar->isInvalidDecl())
NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
}
}
// Don't bother to actually allocate the result if we're just trying to
// determine whether it would be valid.
if (Kind == BFRK_Check)
return StmtResult();
return new (Context) CXXForRangeStmt(
RangeDS, cast_or_null<DeclStmt>(BeginEndDecl.get()), NotEqExpr.get(),
IncrExpr.get(), LoopVarDS, /*Body=*/nullptr, ForLoc, ColonLoc, RParenLoc);
}
/// FinishObjCForCollectionStmt - Attach the body to a objective-C foreach
/// statement.
StmtResult Sema::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) {
if (!S || !B)
return StmtError();
ObjCForCollectionStmt * ForStmt = cast<ObjCForCollectionStmt>(S);
ForStmt->setBody(B);
return S;
}
// Warn when the loop variable is a const reference that creates a copy.
// Suggest using the non-reference type for copies. If a copy can be prevented
// suggest the const reference type that would do so.
// For instance, given "for (const &Foo : Range)", suggest
// "for (const Foo : Range)" to denote a copy is made for the loop. If
// possible, also suggest "for (const &Bar : Range)" if this type prevents
// the copy altogether.
static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef,
const VarDecl *VD,
QualType RangeInitType) {
const Expr *InitExpr = VD->getInit();
if (!InitExpr)
return;
QualType VariableType = VD->getType();
const MaterializeTemporaryExpr *MTE =
dyn_cast<MaterializeTemporaryExpr>(InitExpr);
// No copy made.
if (!MTE)
return;
const Expr *E = MTE->GetTemporaryExpr()->IgnoreImpCasts();
// Searching for either UnaryOperator for dereference of a pointer or
// CXXOperatorCallExpr for handling iterators.
while (!isa<CXXOperatorCallExpr>(E) && !isa<UnaryOperator>(E)) {
if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(E)) {
E = CCE->getArg(0);
} else if (const CXXMemberCallExpr *Call = dyn_cast<CXXMemberCallExpr>(E)) {
const MemberExpr *ME = cast<MemberExpr>(Call->getCallee());
E = ME->getBase();
} else {
const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
E = MTE->GetTemporaryExpr();
}
E = E->IgnoreImpCasts();
}
bool ReturnsReference = false;
if (isa<UnaryOperator>(E)) {
ReturnsReference = true;
} else {
const CXXOperatorCallExpr *Call = cast<CXXOperatorCallExpr>(E);
const FunctionDecl *FD = Call->getDirectCallee();
QualType ReturnType = FD->getReturnType();
ReturnsReference = ReturnType->isReferenceType();
}
if (ReturnsReference) {
// Loop variable creates a temporary. Suggest either to go with
// non-reference loop variable to indiciate a copy is made, or
// the correct time to bind a const reference.
SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_reference_copy)
<< VD << VariableType << E->getType();
QualType NonReferenceType = VariableType.getNonReferenceType();
NonReferenceType.removeLocalConst();
QualType NewReferenceType =
SemaRef.Context.getLValueReferenceType(E->getType().withConst());
SemaRef.Diag(VD->getLocStart(), diag::note_use_type_or_non_reference)
<< NonReferenceType << NewReferenceType << VD->getSourceRange();
} else {
// The range always returns a copy, so a temporary is always created.
// Suggest removing the reference from the loop variable.
SemaRef.Diag(VD->getLocation(), diag::warn_for_range_variable_always_copy)
<< VD << RangeInitType;
QualType NonReferenceType = VariableType.getNonReferenceType();
NonReferenceType.removeLocalConst();
SemaRef.Diag(VD->getLocStart(), diag::note_use_non_reference_type)
<< NonReferenceType << VD->getSourceRange();
}
}
// Warns when the loop variable can be changed to a reference type to
// prevent a copy. For instance, if given "for (const Foo x : Range)" suggest
// "for (const Foo &x : Range)" if this form does not make a copy.
static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
const VarDecl *VD) {
const Expr *InitExpr = VD->getInit();
if (!InitExpr)
return;
QualType VariableType = VD->getType();
if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(InitExpr)) {
if (!CE->getConstructor()->isCopyConstructor())
return;
} else if (const CastExpr *CE = dyn_cast<CastExpr>(InitExpr)) {
if (CE->getCastKind() != CK_LValueToRValue)
return;
} else {
return;
}
// TODO: Determine a maximum size that a POD type can be before a diagnostic
// should be emitted. Also, only ignore POD types with trivial copy
// constructors.
if (VariableType.isPODType(SemaRef.Context))
return;
// Suggest changing from a const variable to a const reference variable
// if doing so will prevent a copy.
SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy)
<< VD << VariableType << InitExpr->getType();
SemaRef.Diag(VD->getLocStart(), diag::note_use_reference_type)
<< SemaRef.Context.getLValueReferenceType(VariableType)
<< VD->getSourceRange();
}
/// DiagnoseForRangeVariableCopies - Diagnose three cases and fixes for them.
/// 1) for (const foo &x : foos) where foos only returns a copy. Suggest
/// using "const foo x" to show that a copy is made
/// 2) for (const bar &x : foos) where bar is a temporary intialized by bar.
/// Suggest either "const bar x" to keep the copying or "const foo& x" to
/// prevent the copy.
/// 3) for (const foo x : foos) where x is constructed from a reference foo.
/// Suggest "const foo &x" to prevent the copy.
static void DiagnoseForRangeVariableCopies(Sema &SemaRef,
const CXXForRangeStmt *ForStmt) {
if (SemaRef.Diags.isIgnored(diag::warn_for_range_const_reference_copy,
ForStmt->getLocStart()) &&
SemaRef.Diags.isIgnored(diag::warn_for_range_variable_always_copy,
ForStmt->getLocStart()) &&
SemaRef.Diags.isIgnored(diag::warn_for_range_copy,
ForStmt->getLocStart())) {
return;
}
const VarDecl *VD = ForStmt->getLoopVariable();
if (!VD)
return;
QualType VariableType = VD->getType();
if (VariableType->isIncompleteType())
return;
const Expr *InitExpr = VD->getInit();
if (!InitExpr)
return;
if (VariableType->isReferenceType()) {
DiagnoseForRangeReferenceVariableCopies(SemaRef, VD,
ForStmt->getRangeInit()->getType());
} else if (VariableType.isConstQualified()) {
DiagnoseForRangeConstVariableCopies(SemaRef, VD);
}
}
/// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement.
/// This is a separate step from ActOnCXXForRangeStmt because analysis of the
/// body cannot be performed until after the type of the range variable is
/// determined.
StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) {
if (!S || !B)
return StmtError();
if (isa<ObjCForCollectionStmt>(S))
return FinishObjCForCollectionStmt(S, B);
CXXForRangeStmt *ForStmt = cast<CXXForRangeStmt>(S);
ForStmt->setBody(B);
DiagnoseEmptyStmtBody(ForStmt->getRParenLoc(), B,
diag::warn_empty_range_based_for_body);
DiagnoseForRangeVariableCopies(*this, ForStmt);
return S;
}
StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl) {
getCurFunction()->setHasBranchIntoScope();
TheDecl->markUsed(Context);
return new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc);
}
StmtResult
Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
Expr *E) {
// Convert operand to void*
if (!E->isTypeDependent()) {
QualType ETy = E->getType();
QualType DestTy = Context.getPointerType(Context.VoidTy.withConst());
ExprResult ExprRes = E;
AssignConvertType ConvTy =
CheckSingleAssignmentConstraints(DestTy, ExprRes);
if (ExprRes.isInvalid())
return StmtError();
E = ExprRes.get();
if (DiagnoseAssignmentResult(ConvTy, StarLoc, DestTy, ETy, E, AA_Passing))
return StmtError();
}
ExprResult ExprRes = ActOnFinishFullExpr(E);
if (ExprRes.isInvalid())
return StmtError();
E = ExprRes.get();
getCurFunction()->setHasIndirectGoto();
return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E);
}
static void CheckJumpOutOfSEHFinally(Sema &S, SourceLocation Loc,
const Scope &DestScope) {
if (!S.CurrentSEHFinally.empty() &&
DestScope.Contains(*S.CurrentSEHFinally.back())) {
S.Diag(Loc, diag::warn_jump_out_of_seh_finally);
}
}
StmtResult
Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
Scope *S = CurScope->getContinueParent();
if (!S) {
// C99 6.8.6.2p1: A break shall appear only in or as a loop body.
return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop));
}
CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S);
return new (Context) ContinueStmt(ContinueLoc);
}
StmtResult
Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
Scope *S = CurScope->getBreakParent();
if (!S) {
// C99 6.8.6.3p1: A break shall appear only in or as a switch/loop body.
return StmtError(Diag(BreakLoc, diag::err_break_not_in_loop_or_switch));
}
if (S->isOpenMPLoopScope())
return StmtError(Diag(BreakLoc, diag::err_omp_loop_cannot_use_stmt)
<< "break");
CheckJumpOutOfSEHFinally(*this, BreakLoc, *S);
return new (Context) BreakStmt(BreakLoc);
}
/// \brief Determine whether the given expression is a candidate for
/// copy elision in either a return statement or a throw expression.
///
/// \param ReturnType If we're determining the copy elision candidate for
/// a return statement, this is the return type of the function. If we're
/// determining the copy elision candidate for a throw expression, this will
/// be a NULL type.
///
/// \param E The expression being returned from the function or block, or
/// being thrown.
///
/// \param AllowFunctionParameter Whether we allow function parameters to
/// be considered NRVO candidates. C++ prohibits this for NRVO itself, but
/// we re-use this logic to determine whether we should try to move as part of
/// a return or throw (which does allow function parameters).
///
/// \returns The NRVO candidate variable, if the return statement may use the
/// NRVO, or NULL if there is no such candidate.
VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType,
Expr *E,
bool AllowFunctionParameter) {
if (!getLangOpts().CPlusPlus)
return nullptr;
// - in a return statement in a function [where] ...
// ... the expression is the name of a non-volatile automatic object ...
DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
if (!DR || DR->refersToEnclosingVariableOrCapture())
return nullptr;
VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
return nullptr;
// HLSL Change Begins: NRVO unsafe for a variety of cases in HLSL
if (getLangOpts().HLSL && hlsl::ShouldSkipNRVO(*this, ReturnType, VD, getCurFunctionDecl()))
return nullptr;
// HLSL Change Ends
if (isCopyElisionCandidate(ReturnType, VD, AllowFunctionParameter))
return VD;
return nullptr;
}
bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameter) {
QualType VDType = VD->getType();
// - in a return statement in a function with ...
// ... a class return type ...
if (!ReturnType.isNull() && !ReturnType->isDependentType()) {
if (!ReturnType->isRecordType())
return false;
// ... the same cv-unqualified type as the function return type ...
if (!VDType->isDependentType() &&
!Context.hasSameUnqualifiedType(ReturnType, VDType))
return false;
}
// ...object (other than a function or catch-clause parameter)...
if (VD->getKind() != Decl::Var &&
!(AllowFunctionParameter && VD->getKind() == Decl::ParmVar))
return false;
if (VD->isExceptionVariable()) return false;
// ...automatic...
if (!VD->hasLocalStorage()) return false;
// ...non-volatile...
if (VD->getType().isVolatileQualified()) return false;
// __block variables can't be allocated in a way that permits NRVO.
if (VD->hasAttr<BlocksAttr>()) return false;
// Variables with higher required alignment than their type's ABI
// alignment cannot use NRVO.
if (!VD->getType()->isDependentType() && VD->hasAttr<AlignedAttr>() &&
Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VD->getType()))
return false;
return true;
}
/// \brief Perform the initialization of a potentially-movable value, which
/// is the result of return value.
///
/// This routine implements C++0x [class.copy]p33, which attempts to treat
/// returned lvalues as rvalues in certain cases (to prefer move construction),
/// then falls back to treating them as lvalues if that failed.
ExprResult
Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO) {
// C++0x [class.copy]p33:
// When the criteria for elision of a copy operation are met or would
// be met save for the fact that the source object is a function
// parameter, and the object to be copied is designated by an lvalue,
// overload resolution to select the constructor for the copy is first
// performed as if the object were designated by an rvalue.
ExprResult Res = ExprError();
if (AllowNRVO &&
(NRVOCandidate || getCopyElisionCandidate(ResultType, Value, true))) {
ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack,
Value->getType(), CK_NoOp, Value, VK_XValue);
Expr *InitExpr = &AsRvalue;
InitializationKind Kind
= InitializationKind::CreateCopy(Value->getLocStart(),
Value->getLocStart());
InitializationSequence Seq(*this, Entity, Kind, InitExpr);
// [...] If overload resolution fails, or if the type of the first
// parameter of the selected constructor is not an rvalue reference
// to the object's type (possibly cv-qualified), overload resolution
// is performed again, considering the object as an lvalue.
if (Seq) {
for (InitializationSequence::step_iterator Step = Seq.step_begin(),
StepEnd = Seq.step_end();
Step != StepEnd; ++Step) {
if (Step->Kind != InitializationSequence::SK_ConstructorInitialization)
continue;
CXXConstructorDecl *Constructor
= cast<CXXConstructorDecl>(Step->Function.Function);
const RValueReferenceType *RRefType
= Constructor->getParamDecl(0)->getType()
->getAs<RValueReferenceType>();
// If we don't meet the criteria, break out now.
if (!RRefType ||
!Context.hasSameUnqualifiedType(RRefType->getPointeeType(),
Context.getTypeDeclType(Constructor->getParent())))
break;
// Promote "AsRvalue" to the heap, since we now need this
// expression node to persist.
Value = ImplicitCastExpr::Create(Context, Value->getType(),
CK_NoOp, Value, nullptr, VK_XValue);
// Complete type-checking the initialization of the return type
// using the constructor we found.
Res = Seq.Perform(*this, Entity, Kind, Value);
}
}
}
// Either we didn't meet the criteria for treating an lvalue as an rvalue,
// above, or overload resolution failed. Either way, we need to try
// (again) now with the return value expression as written.
if (Res.isInvalid())
Res = PerformCopyInitialization(Entity, SourceLocation(), Value);
return Res;
}
/// \brief Determine whether the declared return type of the specified function
/// contains 'auto'.
static bool hasDeducedReturnType(FunctionDecl *FD) {
const FunctionProtoType *FPT =
FD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>();
return FPT->getReturnType()->isUndeducedType();
}
/// ActOnCapScopeReturnStmt - Utility routine to type-check return statements
/// for capturing scopes.
///
StmtResult
Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// If this is the first return we've seen, infer the return type.
// [expr.prim.lambda]p4 in C++11; block literals follow the same rules.
CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction());
QualType FnRetType = CurCap->ReturnType;
LambdaScopeInfo *CurLambda = dyn_cast<LambdaScopeInfo>(CurCap);
if (CurLambda && hasDeducedReturnType(CurLambda->CallOperator)) {
// In C++1y, the return type may involve 'auto'.
// FIXME: Blocks might have a return type of 'auto' explicitly specified.
FunctionDecl *FD = CurLambda->CallOperator;
if (CurCap->ReturnType.isNull())
CurCap->ReturnType = FD->getReturnType();
AutoType *AT = CurCap->ReturnType->getContainedAutoType();
assert(AT && "lost auto type from lambda return type");
if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
FD->setInvalidDecl();
return StmtError();
}
CurCap->ReturnType = FnRetType = FD->getReturnType();
} else if (CurCap->HasImplicitReturnType) {
// For blocks/lambdas with implicit return types, we check each return
// statement individually, and deduce the common return type when the block
// or lambda is completed.
// FIXME: Fold this into the 'auto' codepath above.
if (RetValExp && !isa<InitListExpr>(RetValExp)) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(RetValExp);
if (Result.isInvalid())
return StmtError();
RetValExp = Result.get();
// DR1048: even prior to C++14, we should use the 'auto' deduction rules
// when deducing a return type for a lambda-expression (or by extension
// for a block). These rules differ from the stated C++11 rules only in
// that they remove top-level cv-qualifiers.
if (!CurContext->isDependentContext())
FnRetType = RetValExp->getType().getUnqualifiedType();
else
FnRetType = CurCap->ReturnType = Context.DependentTy;
} else {
if (RetValExp) {
// C++11 [expr.lambda.prim]p4 bans inferring the result from an
// initializer list, because it is not an expression (even
// though we represent it as one). We still deduce 'void'.
Diag(ReturnLoc, diag::err_lambda_return_init_list)
<< RetValExp->getSourceRange();
}
FnRetType = Context.VoidTy;
}
// Although we'll properly infer the type of the block once it's completed,
// make sure we provide a return type now for better error recovery.
if (CurCap->ReturnType.isNull())
CurCap->ReturnType = FnRetType;
}
assert(!FnRetType.isNull());
if (BlockScopeInfo *CurBlock = dyn_cast<BlockScopeInfo>(CurCap)) {
if (CurBlock->FunctionType->getAs<FunctionType>()->getNoReturnAttr()) {
Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr);
return StmtError();
}
} else if (CapturedRegionScopeInfo *CurRegion =
dyn_cast<CapturedRegionScopeInfo>(CurCap)) {
Diag(ReturnLoc, diag::err_return_in_captured_stmt) << CurRegion->getRegionName();
return StmtError();
} else {
assert(CurLambda && "unknown kind of captured scope");
if (CurLambda->CallOperator->getType()->getAs<FunctionType>()
->getNoReturnAttr()) {
Diag(ReturnLoc, diag::err_noreturn_lambda_has_return_expr);
return StmtError();
}
}
// Otherwise, verify that this result type matches the previous one. We are
// pickier with blocks than for normal functions because we don't have GCC
// compatibility to worry about here.
const VarDecl *NRVOCandidate = nullptr;
if (FnRetType->isDependentType()) {
// Delay processing for now. TODO: there are lots of dependent
// types we can conclusively prove aren't void.
} else if (FnRetType->isVoidType()) {
if (RetValExp && !isa<InitListExpr>(RetValExp) &&
!(getLangOpts().CPlusPlus &&
(RetValExp->isTypeDependent() ||
RetValExp->getType()->isVoidType()))) {
if (!getLangOpts().CPlusPlus &&
RetValExp->getType()->isVoidType())
Diag(ReturnLoc, diag::ext_return_has_void_expr) << "literal" << 2;
else {
Diag(ReturnLoc, diag::err_return_block_has_expr);
RetValExp = nullptr;
}
}
} else if (!RetValExp) {
return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr));
} else if (!RetValExp->isTypeDependent()) {
// we have a non-void block with an expression, continue checking
// C99 6.8.6.4p3(136): The return statement is not an assignment. The
// overlap restriction of subclause 6.5.16.1 does not apply to the case of
// function return.
// In C++ the return statement is handled via a copy initialization.
// the C version of which boils down to CheckSingleAssignmentConstraints.
NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
FnRetType,
NRVOCandidate != nullptr);
ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
FnRetType, RetValExp);
if (Res.isInvalid()) {
// FIXME: Cleanup temporaries here, anyway?
return StmtError();
}
RetValExp = Res.get();
CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc);
} else {
NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
}
if (RetValExp) {
ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
ReturnStmt *Result = new (Context) ReturnStmt(ReturnLoc, RetValExp,
NRVOCandidate);
// If we need to check for the named return value optimization,
// or if we need to infer the return type,
// save the return statement in our scope for later processing.
if (CurCap->HasImplicitReturnType || NRVOCandidate)
FunctionScopes.back()->Returns.push_back(Result);
return Result;
}
namespace {
/// \brief Marks all typedefs in all local classes in a type referenced.
///
/// In a function like
/// auto f() {
/// struct S { typedef int a; };
/// return S();
/// }
///
/// the local type escapes and could be referenced in some TUs but not in
/// others. Pretend that all local typedefs are always referenced, to not warn
/// on this. This isn't necessary if f has internal linkage, or the typedef
/// is private.
class LocalTypedefNameReferencer
: public RecursiveASTVisitor<LocalTypedefNameReferencer> {
public:
LocalTypedefNameReferencer(Sema &S) : S(S) {}
bool VisitRecordType(const RecordType *RT);
private:
Sema &S;
};
bool LocalTypedefNameReferencer::VisitRecordType(const RecordType *RT) {
auto *R = dyn_cast<CXXRecordDecl>(RT->getDecl());
if (!R || !R->isLocalClass() || !R->isLocalClass()->isExternallyVisible() ||
R->isDependentType())
return true;
for (auto *TmpD : R->decls())
if (auto *T = dyn_cast<TypedefNameDecl>(TmpD))
if (T->getAccess() != AS_private || R->hasFriends())
S.MarkAnyDeclReferenced(T->getLocation(), T, /*OdrUse=*/false);
return true;
}
}
TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const {
TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
while (auto ATL = TL.getAs<AttributedTypeLoc>())
TL = ATL.getModifiedLoc().IgnoreParens();
return TL.castAs<FunctionProtoTypeLoc>().getReturnLoc();
}
/// Deduce the return type for a function from a returned expression, per
/// C++1y [dcl.spec.auto]p6.
bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr,
AutoType *AT) {
TypeLoc OrigResultType = getReturnTypeLoc(FD);
QualType Deduced;
if (RetExpr && isa<InitListExpr>(RetExpr)) {
// If the deduction is for a return statement and the initializer is
// a braced-init-list, the program is ill-formed.
Diag(RetExpr->getExprLoc(),
getCurLambda() ? diag::err_lambda_return_init_list
: diag::err_auto_fn_return_init_list)
<< RetExpr->getSourceRange();
return true;
}
if (FD->isDependentContext()) {
// C++1y [dcl.spec.auto]p12:
// Return type deduction [...] occurs when the definition is
// instantiated even if the function body contains a return
// statement with a non-type-dependent operand.
assert(AT->isDeduced() && "should have deduced to dependent type");
return false;
} else if (RetExpr) {
// If the deduction is for a return statement and the initializer is
// a braced-init-list, the program is ill-formed.
if (isa<InitListExpr>(RetExpr)) {
Diag(RetExpr->getExprLoc(), diag::err_auto_fn_return_init_list);
return true;
}
// Otherwise, [...] deduce a value for U using the rules of template
// argument deduction.
DeduceAutoResult DAR = DeduceAutoType(OrigResultType, RetExpr, Deduced);
if (DAR == DAR_Failed && !FD->isInvalidDecl())
Diag(RetExpr->getExprLoc(), diag::err_auto_fn_deduction_failure)
<< OrigResultType.getType() << RetExpr->getType();
if (DAR != DAR_Succeeded)
return true;
// If a local type is part of the returned type, mark its fields as
// referenced.
LocalTypedefNameReferencer Referencer(*this);
Referencer.TraverseType(RetExpr->getType());
} else {
// In the case of a return with no operand, the initializer is considered
// to be void().
//
// Deduction here can only succeed if the return type is exactly 'cv auto'
// or 'decltype(auto)', so just check for that case directly.
if (!OrigResultType.getType()->getAs<AutoType>()) {
Diag(ReturnLoc, diag::err_auto_fn_return_void_but_not_auto)
<< OrigResultType.getType();
return true;
}
// We always deduce U = void in this case.
Deduced = SubstAutoType(OrigResultType.getType(), Context.VoidTy);
if (Deduced.isNull())
return true;
}
// If a function with a declared return type that contains a placeholder type
// has multiple return statements, the return type is deduced for each return
// statement. [...] if the type deduced is not the same in each deduction,
// the program is ill-formed.
if (AT->isDeduced() && !FD->isInvalidDecl()) {
AutoType *NewAT = Deduced->getContainedAutoType();
if (!FD->isDependentContext() &&
!Context.hasSameType(AT->getDeducedType(), NewAT->getDeducedType())) {
const LambdaScopeInfo *LambdaSI = getCurLambda();
if (LambdaSI && LambdaSI->HasImplicitReturnType) {
Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible)
<< NewAT->getDeducedType() << AT->getDeducedType()
<< true /*IsLambda*/;
} else {
Diag(ReturnLoc, diag::err_auto_fn_different_deductions)
<< (AT->isDecltypeAuto() ? 1 : 0)
<< NewAT->getDeducedType() << AT->getDeducedType();
}
return true;
}
} else if (!FD->isInvalidDecl()) {
// Update all declarations of the function to have the deduced return type.
Context.adjustDeducedFunctionResultType(FD, Deduced);
}
return false;
}
StmtResult
Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope) {
StmtResult R = BuildReturnStmt(ReturnLoc, RetValExp);
if (R.isInvalid()) {
return R;
}
if (VarDecl *VD =
const_cast<VarDecl*>(cast<ReturnStmt>(R.get())->getNRVOCandidate())) {
CurScope->addNRVOCandidate(VD);
} else {
CurScope->setNoNRVO();
}
CheckJumpOutOfSEHFinally(*this, ReturnLoc, *CurScope->getFnParent());
return R;
}
StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// Check for unexpanded parameter packs.
if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
return StmtError();
if (isa<CapturingScopeInfo>(getCurFunction()))
return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp);
QualType FnRetType;
QualType RelatedRetType;
const AttrVec *Attrs = nullptr;
bool isObjCMethod = false;
if (const FunctionDecl *FD = getCurFunctionDecl()) {
FnRetType = FD->getReturnType();
if (FD->hasAttrs())
Attrs = &FD->getAttrs();
if (FD->isNoReturn())
Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
<< FD->getDeclName();
} else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
FnRetType = MD->getReturnType();
isObjCMethod = true;
if (MD->hasAttrs())
Attrs = &MD->getAttrs();
if (MD->hasRelatedResultType() && MD->getClassInterface()) {
// In the implementation of a method with a related return type, the
// type used to type-check the validity of return statements within the
// method body is a pointer to the type of the class being implemented.
RelatedRetType = Context.getObjCInterfaceType(MD->getClassInterface());
RelatedRetType = Context.getObjCObjectPointerType(RelatedRetType);
}
} else // If we don't have a function/method context, bail.
return StmtError();
// FIXME: Add a flag to the ScopeInfo to indicate whether we're performing
// deduction.
if (getLangOpts().CPlusPlus14) {
if (AutoType *AT = FnRetType->getContainedAutoType()) {
FunctionDecl *FD = cast<FunctionDecl>(CurContext);
if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
FD->setInvalidDecl();
return StmtError();
} else {
FnRetType = FD->getReturnType();
}
}
}
// HLSL Change begin - Diagnose mismatched globallycoherent attrs on return.
if (RetValExp)
DiagnoseGloballyCoherentMismatch(RetValExp, FnRetType, ReturnLoc);
// HLSL Change end
bool HasDependentReturnType = FnRetType->isDependentType();
ReturnStmt *Result = nullptr;
if (FnRetType->isVoidType()) {
if (RetValExp) {
if (isa<InitListExpr>(RetValExp)) {
// We simply never allow init lists as the return value of void
// functions. This is compatible because this was never allowed before,
// so there's no legacy code to deal with.
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
int FunctionKind = 0;
if (isa<ObjCMethodDecl>(CurDecl))
FunctionKind = 1;
else if (isa<CXXConstructorDecl>(CurDecl))
FunctionKind = 2;
else if (isa<CXXDestructorDecl>(CurDecl))
FunctionKind = 3;
Diag(ReturnLoc, diag::err_return_init_list)
<< CurDecl->getDeclName() << FunctionKind
<< RetValExp->getSourceRange();
// Drop the expression.
RetValExp = nullptr;
} else if (!RetValExp->isTypeDependent()) {
// C99 6.8.6.4p1 (ext_ since GCC warns)
unsigned D = diag::ext_return_has_expr;
if (RetValExp->getType()->isVoidType()) {
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
if (isa<CXXConstructorDecl>(CurDecl) ||
isa<CXXDestructorDecl>(CurDecl))
D = diag::err_ctor_dtor_returns_void;
else
D = diag::ext_return_has_void_expr;
}
else {
ExprResult Result = RetValExp;
Result = IgnoredValueConversions(Result.get());
if (Result.isInvalid())
return StmtError();
RetValExp = Result.get();
RetValExp = ImpCastExprToType(RetValExp,
Context.VoidTy, CK_ToVoid).get();
}
// return of void in constructor/destructor is illegal in C++.
if (D == diag::err_ctor_dtor_returns_void) {
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
Diag(ReturnLoc, D)
<< CurDecl->getDeclName() << isa<CXXDestructorDecl>(CurDecl)
<< RetValExp->getSourceRange();
}
// return (some void expression); is legal in C++.
else if (D != diag::ext_return_has_void_expr ||
!getLangOpts().CPlusPlus) {
NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
int FunctionKind = 0;
if (isa<ObjCMethodDecl>(CurDecl))
FunctionKind = 1;
else if (isa<CXXConstructorDecl>(CurDecl))
FunctionKind = 2;
else if (isa<CXXDestructorDecl>(CurDecl))
FunctionKind = 3;
Diag(ReturnLoc, D)
<< CurDecl->getDeclName() << FunctionKind
<< RetValExp->getSourceRange();
}
}
if (RetValExp) {
ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
}
Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, nullptr);
} else if (!RetValExp && !HasDependentReturnType) {
FunctionDecl *FD = getCurFunctionDecl();
unsigned DiagID;
if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
// C++11 [stmt.return]p2
DiagID = diag::err_constexpr_return_missing_expr;
FD->setInvalidDecl();
} else if (getLangOpts().C99) {
// C99 6.8.6.4p1 (ext_ since GCC warns)
DiagID = diag::ext_return_missing_expr;
} else {
// C90 6.6.6.4p4
DiagID = diag::warn_return_missing_expr;
}
if (FD)
Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0/*fn*/;
else
Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
Result = new (Context) ReturnStmt(ReturnLoc);
} else {
assert(RetValExp || HasDependentReturnType);
const VarDecl *NRVOCandidate = nullptr;
QualType RetType = RelatedRetType.isNull() ? FnRetType : RelatedRetType;
// C99 6.8.6.4p3(136): The return statement is not an assignment. The
// overlap restriction of subclause 6.5.16.1 does not apply to the case of
// function return.
// In C++ the return statement is handled via a copy initialization,
// the C version of which boils down to CheckSingleAssignmentConstraints.
if (RetValExp)
NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
if (!HasDependentReturnType && !RetValExp->isTypeDependent()) {
// we have a non-void function with an expression, continue checking
InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
RetType,
NRVOCandidate != nullptr);
ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
RetType, RetValExp);
if (Res.isInvalid()) {
// FIXME: Clean up temporaries here anyway?
return StmtError();
}
RetValExp = Res.getAs<Expr>();
// If we have a related result type, we need to implicitly
// convert back to the formal result type. We can't pretend to
// initialize the result again --- we might end double-retaining
// --- so instead we initialize a notional temporary.
if (!RelatedRetType.isNull()) {
Entity = InitializedEntity::InitializeRelatedResult(getCurMethodDecl(),
FnRetType);
Res = PerformCopyInitialization(Entity, ReturnLoc, RetValExp);
if (Res.isInvalid()) {
// FIXME: Clean up temporaries here anyway?
return StmtError();
}
RetValExp = Res.getAs<Expr>();
}
CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc, isObjCMethod, Attrs,
getCurFunctionDecl());
}
if (RetValExp) {
ExprResult ER = ActOnFinishFullExpr(RetValExp, ReturnLoc);
if (ER.isInvalid())
return StmtError();
RetValExp = ER.get();
}
Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, NRVOCandidate);
}
// If we need to check for the named return value optimization, save the
// return statement in our scope for later processing.
if (Result->getNRVOCandidate())
FunctionScopes.back()->Returns.push_back(Result);
return Result;
}
StmtResult
Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
SourceLocation RParen, Decl *Parm,
Stmt *Body) {
VarDecl *Var = cast_or_null<VarDecl>(Parm);
if (Var && Var->isInvalidDecl())
return StmtError();
return new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body);
}
StmtResult
Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) {
return new (Context) ObjCAtFinallyStmt(AtLoc, Body);
}
StmtResult
Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg CatchStmts, Stmt *Finally) {
if (!getLangOpts().ObjCExceptions)
Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
getCurFunction()->setHasBranchProtectedScope();
unsigned NumCatchStmts = CatchStmts.size();
return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(),
NumCatchStmts, Finally);
}
StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) {
if (Throw) {
ExprResult Result = DefaultLvalueConversion(Throw);
if (Result.isInvalid())
return StmtError();
Result = ActOnFinishFullExpr(Result.get());
if (Result.isInvalid())
return StmtError();
Throw = Result.get();
QualType ThrowType = Throw->getType();
// Make sure the expression type is an ObjC pointer or "void *".
if (!ThrowType->isDependentType() &&
!ThrowType->isObjCObjectPointerType()) {
const PointerType *PT = ThrowType->getAs<PointerType>();
if (!PT || !PT->getPointeeType()->isVoidType())
return StmtError(Diag(AtLoc, diag::error_objc_throw_expects_object)
<< Throw->getType() << Throw->getSourceRange());
}
}
return new (Context) ObjCAtThrowStmt(AtLoc, Throw);
}
StmtResult
Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope) {
if (!getLangOpts().ObjCExceptions)
Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw";
if (!Throw) {
// @throw without an expression designates a rethrow (which must occur
// in the context of an @catch clause).
Scope *AtCatchParent = CurScope;
while (AtCatchParent && !AtCatchParent->isAtCatchScope())
AtCatchParent = AtCatchParent->getParent();
if (!AtCatchParent)
return StmtError(Diag(AtLoc, diag::error_rethrow_used_outside_catch));
}
return BuildObjCAtThrowStmt(AtLoc, Throw);
}
ExprResult
Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) {
ExprResult result = DefaultLvalueConversion(operand);
if (result.isInvalid())
return ExprError();
operand = result.get();
// Make sure the expression type is an ObjC pointer or "void *".
QualType type = operand->getType();
if (!type->isDependentType() &&
!type->isObjCObjectPointerType()) {
const PointerType *pointerType = type->getAs<PointerType>();
if (!pointerType || !pointerType->getPointeeType()->isVoidType()) {
if (getLangOpts().CPlusPlus) {
if (RequireCompleteType(atLoc, type,
diag::err_incomplete_receiver_type))
return Diag(atLoc, diag::error_objc_synchronized_expects_object)
<< type << operand->getSourceRange();
ExprResult result = PerformContextuallyConvertToObjCPointer(operand);
if (!result.isUsable())
return Diag(atLoc, diag::error_objc_synchronized_expects_object)
<< type << operand->getSourceRange();
operand = result.get();
} else {
return Diag(atLoc, diag::error_objc_synchronized_expects_object)
<< type << operand->getSourceRange();
}
}
}
// The operand to @synchronized is a full-expression.
return ActOnFinishFullExpr(operand);
}
StmtResult
Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr,
Stmt *SyncBody) {
// We can't jump into or indirect-jump out of a @synchronized block.
getCurFunction()->setHasBranchProtectedScope();
return new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody);
}
/// ActOnCXXCatchBlock - Takes an exception declaration and a handler block
/// and creates a proper catch handler from them.
StmtResult
Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl,
Stmt *HandlerBlock) {
// There's nothing to test that ActOnExceptionDecl didn't already test.
return new (Context)
CXXCatchStmt(CatchLoc, cast_or_null<VarDecl>(ExDecl), HandlerBlock);
}
StmtResult
Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) {
getCurFunction()->setHasBranchProtectedScope();
return new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body);
}
namespace {
class CatchHandlerType {
QualType QT;
unsigned IsPointer : 1;
// This is a special constructor to be used only with DenseMapInfo's
// getEmptyKey() and getTombstoneKey() functions.
friend struct llvm::DenseMapInfo<CatchHandlerType>;
enum Unique { ForDenseMap };
CatchHandlerType(QualType QT, Unique) : QT(QT), IsPointer(false) {}
public:
/// Used when creating a CatchHandlerType from a handler type; will determine
/// whether the type is a pointer or reference and will strip off the top
/// level pointer and cv-qualifiers.
CatchHandlerType(QualType Q) : QT(Q), IsPointer(false) {
if (QT->isPointerType())
IsPointer = true;
if (IsPointer || QT->isReferenceType())
QT = QT->getPointeeType();
QT = QT.getUnqualifiedType();
}
/// Used when creating a CatchHandlerType from a base class type; pretends the
/// type passed in had the pointer qualifier, does not need to get an
/// unqualified type.
CatchHandlerType(QualType QT, bool IsPointer)
: QT(QT), IsPointer(IsPointer) {}
QualType underlying() const { return QT; }
bool isPointer() const { return IsPointer; }
friend bool operator==(const CatchHandlerType &LHS,
const CatchHandlerType &RHS) {
// If the pointer qualification does not match, we can return early.
if (LHS.IsPointer != RHS.IsPointer)
return false;
// Otherwise, check the underlying type without cv-qualifiers.
return LHS.QT == RHS.QT;
}
};
} // namespace
namespace llvm {
template <> struct DenseMapInfo<CatchHandlerType> {
static CatchHandlerType getEmptyKey() {
return CatchHandlerType(DenseMapInfo<QualType>::getEmptyKey(),
CatchHandlerType::ForDenseMap);
}
static CatchHandlerType getTombstoneKey() {
return CatchHandlerType(DenseMapInfo<QualType>::getTombstoneKey(),
CatchHandlerType::ForDenseMap);
}
static unsigned getHashValue(const CatchHandlerType &Base) {
return DenseMapInfo<QualType>::getHashValue(Base.underlying());
}
static bool isEqual(const CatchHandlerType &LHS,
const CatchHandlerType &RHS) {
return LHS == RHS;
}
};
// It's OK to treat CatchHandlerType as a POD type.
template <> struct isPodLike<CatchHandlerType> {
static const bool value = true;
};
}
namespace {
class CatchTypePublicBases {
ASTContext &Ctx;
const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &TypesToCheck;
const bool CheckAgainstPointer;
CXXCatchStmt *FoundHandler;
CanQualType FoundHandlerType;
public:
CatchTypePublicBases(
ASTContext &Ctx,
const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &T, bool C)
: Ctx(Ctx), TypesToCheck(T), CheckAgainstPointer(C),
FoundHandler(nullptr) {}
CXXCatchStmt *getFoundHandler() const { return FoundHandler; }
CanQualType getFoundHandlerType() const { return FoundHandlerType; }
static bool FindPublicBasesOfType(const CXXBaseSpecifier *S, CXXBasePath &,
void *User) {
auto &PBOT = *reinterpret_cast<CatchTypePublicBases *>(User);
if (S->getAccessSpecifier() == AccessSpecifier::AS_public) {
CatchHandlerType Check(S->getType(), PBOT.CheckAgainstPointer);
auto M = PBOT.TypesToCheck;
auto I = M.find(Check);
if (I != M.end()) {
PBOT.FoundHandler = I->second;
PBOT.FoundHandlerType = PBOT.Ctx.getCanonicalType(S->getType());
return true;
}
}
return false;
}
};
}
/// ActOnCXXTryBlock - Takes a try compound-statement and a number of
/// handlers and creates a try statement from them.
StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers) {
// Don't report an error if 'try' is used in system headers.
if (!getLangOpts().CXXExceptions &&
!getSourceManager().isInSystemHeader(TryLoc))
Diag(TryLoc, diag::err_exceptions_disabled) << "try";
if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try";
sema::FunctionScopeInfo *FSI = getCurFunction();
// C++ try is incompatible with SEH __try.
if (!getLangOpts().Borland && FSI->FirstSEHTryLoc.isValid()) {
Diag(TryLoc, diag::err_mixing_cxx_try_seh_try);
Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'";
}
const unsigned NumHandlers = Handlers.size();
assert(!Handlers.empty() &&
"The parser shouldn't call this if there are no handlers.");
llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> HandledTypes;
for (unsigned i = 0; i < NumHandlers; ++i) {
CXXCatchStmt *H = cast<CXXCatchStmt>(Handlers[i]);
// Diagnose when the handler is a catch-all handler, but it isn't the last
// handler for the try block. [except.handle]p5. Also, skip exception
// declarations that are invalid, since we can't usefully report on them.
if (!H->getExceptionDecl()) {
if (i < NumHandlers - 1)
return StmtError(Diag(H->getLocStart(), diag::err_early_catch_all));
continue;
} else if (H->getExceptionDecl()->isInvalidDecl())
continue;
// Walk the type hierarchy to diagnose when this type has already been
// handled (duplication), or cannot be handled (derivation inversion). We
// ignore top-level cv-qualifiers, per [except.handle]p3
CatchHandlerType HandlerCHT =
(QualType)Context.getCanonicalType(H->getCaughtType());
// We can ignore whether the type is a reference or a pointer; we need the
// underlying declaration type in order to get at the underlying record
// decl, if there is one.
QualType Underlying = HandlerCHT.underlying();
if (auto *RD = Underlying->getAsCXXRecordDecl()) {
if (!RD->hasDefinition())
continue;
// Check that none of the public, unambiguous base classes are in the
// map ([except.handle]p1). Give the base classes the same pointer
// qualification as the original type we are basing off of. This allows
// comparison against the handler type using the same top-level pointer
// as the original type.
CXXBasePaths Paths;
Paths.setOrigin(RD);
CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer());
if (RD->lookupInBases(CatchTypePublicBases::FindPublicBasesOfType, &CTPB,
Paths)) {
const CXXCatchStmt *Problem = CTPB.getFoundHandler();
if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) {
Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
diag::warn_exception_caught_by_earlier_handler)
<< H->getCaughtType();
Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(),
diag::note_previous_exception_handler)
<< Problem->getCaughtType();
}
}
}
// Add the type the list of ones we have handled; diagnose if we've already
// handled it.
auto R = HandledTypes.insert(std::make_pair(H->getCaughtType(), H));
if (!R.second) {
const CXXCatchStmt *Problem = R.first->second;
Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
diag::warn_exception_caught_by_earlier_handler)
<< H->getCaughtType();
Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(),
diag::note_previous_exception_handler)
<< Problem->getCaughtType();
}
}
FSI->setHasCXXTry(TryLoc);
return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers);
}
StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc,
Stmt *TryBlock, Stmt *Handler) {
assert(TryBlock && Handler);
sema::FunctionScopeInfo *FSI = getCurFunction();
// SEH __try is incompatible with C++ try. Borland appears to support this,
// however.
if (!getLangOpts().Borland) {
if (FSI->FirstCXXTryLoc.isValid()) {
Diag(TryLoc, diag::err_mixing_cxx_try_seh_try);
Diag(FSI->FirstCXXTryLoc, diag::note_conflicting_try_here) << "'try'";
}
}
FSI->setHasSEHTry(TryLoc);
// Reject __try in Obj-C methods, blocks, and captured decls, since we don't
// track if they use SEH.
DeclContext *DC = CurContext;
while (DC && !DC->isFunctionOrMethod())
DC = DC->getParent();
FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DC);
if (FD)
FD->setUsesSEHTry(true);
else
Diag(TryLoc, diag::err_seh_try_outside_functions);
// Reject __try on unsupported targets.
if (!Context.getTargetInfo().isSEHTrySupported())
Diag(TryLoc, diag::err_seh_try_unsupported);
return SEHTryStmt::Create(Context, IsCXXTry, TryLoc, TryBlock, Handler);
}
StmtResult
Sema::ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block) {
assert(FilterExpr && Block);
if(!FilterExpr->getType()->isIntegerType()) {
return StmtError(Diag(FilterExpr->getExprLoc(),
diag::err_filter_expression_integral)
<< FilterExpr->getType());
}
return SEHExceptStmt::Create(Context,Loc,FilterExpr,Block);
}
void Sema::ActOnStartSEHFinallyBlock() {
CurrentSEHFinally.push_back(CurScope);
}
void Sema::ActOnAbortSEHFinallyBlock() {
CurrentSEHFinally.pop_back();
}
StmtResult Sema::ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block) {
assert(Block);
CurrentSEHFinally.pop_back();
return SEHFinallyStmt::Create(Context, Loc, Block);
}
StmtResult
Sema::ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope) {
Scope *SEHTryParent = CurScope;
while (SEHTryParent && !SEHTryParent->isSEHTryScope())
SEHTryParent = SEHTryParent->getParent();
if (!SEHTryParent)
return StmtError(Diag(Loc, diag::err_ms___leave_not_in___try));
CheckJumpOutOfSEHFinally(*this, Loc, *SEHTryParent);
return new (Context) SEHLeaveStmt(Loc);
}
StmtResult Sema::BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested)
{
return new (Context) MSDependentExistsStmt(KeywordLoc, IsIfExists,
QualifierLoc, NameInfo,
cast<CompoundStmt>(Nested));
}
StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS,
UnqualifiedId &Name,
Stmt *Nested) {
return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
SS.getWithLocInContext(Context),
GetNameFromUnqualifiedId(Name),
Nested);
}
RecordDecl*
Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc,
unsigned NumParams) {
DeclContext *DC = CurContext;
while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
DC = DC->getParent();
RecordDecl *RD = nullptr;
if (getLangOpts().CPlusPlus)
RD = CXXRecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc,
/*Id=*/nullptr);
else
RD = RecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr);
RD->setCapturedRecord();
DC->addDecl(RD);
RD->setImplicit();
RD->startDefinition();
assert(NumParams > 0 && "CapturedStmt requires context parameter");
CD = CapturedDecl::Create(Context, CurContext, NumParams);
DC->addDecl(CD);
return RD;
}
static void buildCapturedStmtCaptureList(
SmallVectorImpl<CapturedStmt::Capture> &Captures,
SmallVectorImpl<Expr *> &CaptureInits,
ArrayRef<CapturingScopeInfo::Capture> Candidates) {
typedef ArrayRef<CapturingScopeInfo::Capture>::const_iterator CaptureIter;
for (CaptureIter Cap = Candidates.begin(); Cap != Candidates.end(); ++Cap) {
if (Cap->isThisCapture()) {
Captures.push_back(CapturedStmt::Capture(Cap->getLocation(),
CapturedStmt::VCK_This));
CaptureInits.push_back(Cap->getInitExpr());
continue;
} else if (Cap->isVLATypeCapture()) {
Captures.push_back(
CapturedStmt::Capture(Cap->getLocation(), CapturedStmt::VCK_VLAType));
CaptureInits.push_back(nullptr);
continue;
}
assert(Cap->isReferenceCapture() &&
"non-reference capture not yet implemented");
Captures.push_back(CapturedStmt::Capture(Cap->getLocation(),
CapturedStmt::VCK_ByRef,
Cap->getVariable()));
CaptureInits.push_back(Cap->getInitExpr());
}
}
void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
unsigned NumParams) {
CapturedDecl *CD = nullptr;
RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, NumParams);
// Build the context parameter
DeclContext *DC = CapturedDecl::castToDeclContext(CD);
IdentifierInfo *ParamName = &Context.Idents.get("__context");
QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
ImplicitParamDecl *Param
= ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType);
DC->addDecl(Param);
CD->setContextParam(0, Param);
// Enter the capturing scope for this captured region.
PushCapturedRegionScope(CurScope, CD, RD, Kind);
if (CurScope)
PushDeclContext(CurScope, CD);
else
CurContext = CD;
PushExpressionEvaluationContext(PotentiallyEvaluated);
}
void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params) {
CapturedDecl *CD = nullptr;
RecordDecl *RD = CreateCapturedStmtRecordDecl(CD, Loc, Params.size());
// Build the context parameter
DeclContext *DC = CapturedDecl::castToDeclContext(CD);
bool ContextIsFound = false;
unsigned ParamNum = 0;
for (ArrayRef<CapturedParamNameType>::iterator I = Params.begin(),
E = Params.end();
I != E; ++I, ++ParamNum) {
if (I->second.isNull()) {
assert(!ContextIsFound &&
"null type has been found already for '__context' parameter");
IdentifierInfo *ParamName = &Context.Idents.get("__context");
QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
ImplicitParamDecl *Param
= ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType);
DC->addDecl(Param);
CD->setContextParam(ParamNum, Param);
ContextIsFound = true;
} else {
IdentifierInfo *ParamName = &Context.Idents.get(I->first);
ImplicitParamDecl *Param
= ImplicitParamDecl::Create(Context, DC, Loc, ParamName, I->second);
DC->addDecl(Param);
CD->setParam(ParamNum, Param);
}
}
assert(ContextIsFound && "no null type for '__context' parameter");
if (!ContextIsFound) {
// Add __context implicitly if it is not specified.
IdentifierInfo *ParamName = &Context.Idents.get("__context");
QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
ImplicitParamDecl *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType);
DC->addDecl(Param);
CD->setContextParam(ParamNum, Param);
}
// Enter the capturing scope for this captured region.
PushCapturedRegionScope(CurScope, CD, RD, Kind);
if (CurScope)
PushDeclContext(CurScope, CD);
else
CurContext = CD;
PushExpressionEvaluationContext(PotentiallyEvaluated);
}
void Sema::ActOnCapturedRegionError() {
DiscardCleanupsInEvaluationContext();
PopExpressionEvaluationContext();
CapturedRegionScopeInfo *RSI = getCurCapturedRegion();
RecordDecl *Record = RSI->TheRecordDecl;
Record->setInvalidDecl();
SmallVector<Decl*, 4> Fields(Record->fields());
ActOnFields(/*Scope=*/nullptr, Record->getLocation(), Record, Fields,
SourceLocation(), SourceLocation(), /*AttributeList=*/nullptr);
PopDeclContext();
PopFunctionScopeInfo();
}
StmtResult Sema::ActOnCapturedRegionEnd(Stmt *S) {
CapturedRegionScopeInfo *RSI = getCurCapturedRegion();
SmallVector<CapturedStmt::Capture, 4> Captures;
SmallVector<Expr *, 4> CaptureInits;
buildCapturedStmtCaptureList(Captures, CaptureInits, RSI->Captures);
CapturedDecl *CD = RSI->TheCapturedDecl;
RecordDecl *RD = RSI->TheRecordDecl;
CapturedStmt *Res = CapturedStmt::Create(getASTContext(), S,
RSI->CapRegionKind, Captures,
CaptureInits, CD, RD);
CD->setBody(Res->getCapturedStmt());
RD->completeDefinition();
DiscardCleanupsInEvaluationContext();
PopExpressionEvaluationContext();
PopDeclContext();
PopFunctionScopeInfo();
return Res;
}
// HLSL Change: adding hlsl support
StmtResult Sema::ActOnHlslDiscardStmt(SourceLocation Loc)
{
return new (Context)DiscardStmt(Loc);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/AttributeList.cpp | //===--- AttributeList.cpp --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the AttributeList class implementation
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/AttributeList.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
IdentifierInfo *Ident) {
IdentifierLoc *Result = new (Ctx) IdentifierLoc;
Result->Loc = Loc;
Result->Ident = Ident;
return Result;
}
size_t AttributeList::allocated_size() const {
if (IsAvailability) return AttributeFactory::AvailabilityAllocSize;
else if (IsTypeTagForDatatype)
return AttributeFactory::TypeTagForDatatypeAllocSize;
else if (IsProperty)
return AttributeFactory::PropertyAllocSize;
return (sizeof(AttributeList) + NumArgs * sizeof(ArgsUnion));
}
AttributeFactory::AttributeFactory() {
// Go ahead and configure all the inline capacity. This is just a memset.
FreeLists.resize(InlineFreeListsCapacity);
}
AttributeFactory::~AttributeFactory() {}
static size_t getFreeListIndexForSize(size_t size) {
assert(size >= sizeof(AttributeList));
assert((size % sizeof(void*)) == 0);
return ((size - sizeof(AttributeList)) / sizeof(void*));
}
void *AttributeFactory::allocate(size_t size) {
// Check for a previously reclaimed attribute.
size_t index = getFreeListIndexForSize(size);
if (index < FreeLists.size()) {
if (AttributeList *attr = FreeLists[index]) {
FreeLists[index] = attr->NextInPool;
return attr;
}
}
// Otherwise, allocate something new.
return Alloc.Allocate(size, llvm::AlignOf<AttributeFactory>::Alignment);
}
void AttributeFactory::reclaimPool(AttributeList *cur) {
assert(cur && "reclaiming empty pool!");
do {
// Read this here, because we're going to overwrite NextInPool
// when we toss 'cur' into the appropriate queue.
AttributeList *next = cur->NextInPool;
size_t size = cur->allocated_size();
size_t freeListIndex = getFreeListIndexForSize(size);
// Expand FreeLists to the appropriate size, if required.
if (freeListIndex >= FreeLists.size())
FreeLists.resize(freeListIndex+1);
// Add 'cur' to the appropriate free-list.
cur->NextInPool = FreeLists[freeListIndex];
FreeLists[freeListIndex] = cur;
cur = next;
} while (cur);
}
void AttributePool::takePool(AttributeList *pool) {
assert(pool);
// Fast path: this pool is empty.
if (!Head) {
Head = pool;
return;
}
// Reverse the pool onto the current head. This optimizes for the
// pattern of pulling a lot of pools into a single pool.
do {
AttributeList *next = pool->NextInPool;
pool->NextInPool = Head;
Head = pool;
pool = next;
} while (pool);
}
#include "clang/Sema/AttrParsedAttrKinds.inc"
AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name,
const IdentifierInfo *ScopeName,
Syntax SyntaxUsed) {
StringRef AttrName = Name->getName();
SmallString<64> FullName;
if (ScopeName)
FullName += ScopeName->getName();
// Normalize the attribute name, __foo__ becomes foo. This is only allowable
// for GNU attributes.
bool IsGNU = SyntaxUsed == AS_GNU || (SyntaxUsed == AS_CXX11 &&
FullName == "gnu");
if (IsGNU && AttrName.size() >= 4 && AttrName.startswith("__") &&
AttrName.endswith("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
// Ensure that in the case of C++11 attributes, we look for '::foo' if it is
// unscoped.
if (ScopeName || SyntaxUsed == AS_CXX11)
FullName += "::";
FullName += AttrName;
// HLSL Change Starts: - support case-insensitive variant
#if 1
AttributeList::Kind Result = ::getAttrKind(FullName, SyntaxUsed);
if (Result == AttributeList::UnknownAttribute) {
std::string lower = FullName.str().lower();
Result = ::getAttrKind(StringRef(lower), SyntaxUsed);
}
return Result;
#else
return ::getAttrKind(FullName, SyntaxUsed);
#endif
// HLSL Change Ends
}
unsigned AttributeList::getAttributeSpellingListIndex() const {
// Both variables will be used in tablegen generated
// attribute spell list index matching code.
StringRef Name = AttrName->getName();
StringRef Scope = ScopeName ? ScopeName->getName() : "";
#include "clang/Sema/AttrSpellingListIndex.inc"
}
struct ParsedAttrInfo {
unsigned NumArgs : 4;
unsigned OptArgs : 4;
unsigned HasCustomParsing : 1;
unsigned IsTargetSpecific : 1;
unsigned IsType : 1;
unsigned IsKnownToGCC : 1;
bool (*DiagAppertainsToDecl)(Sema &S, const AttributeList &Attr,
const Decl *);
bool (*DiagLangOpts)(Sema &S, const AttributeList &Attr);
bool (*ExistsInTarget)(const llvm::Triple &T);
unsigned (*SpellingIndexToSemanticSpelling)(const AttributeList &Attr);
};
namespace {
#include "clang/Sema/AttrParsedAttrImpl.inc"
}
static const ParsedAttrInfo &getInfo(const AttributeList &A) {
return AttrInfoMap[A.getKind()];
}
unsigned AttributeList::getMinArgs() const {
return getInfo(*this).NumArgs;
}
unsigned AttributeList::getMaxArgs() const {
return getMinArgs() + getInfo(*this).OptArgs;
}
bool AttributeList::hasCustomParsing() const {
return getInfo(*this).HasCustomParsing;
}
bool AttributeList::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
return getInfo(*this).DiagAppertainsToDecl(S, *this, D);
}
bool AttributeList::diagnoseLangOpts(Sema &S) const {
return getInfo(*this).DiagLangOpts(S, *this);
}
bool AttributeList::isTargetSpecificAttr() const {
return getInfo(*this).IsTargetSpecific;
}
bool AttributeList::isTypeAttr() const {
return getInfo(*this).IsType;
}
bool AttributeList::existsInTarget(const llvm::Triple &T) const {
return getInfo(*this).ExistsInTarget(T);
}
bool AttributeList::isKnownToGCC() const {
return getInfo(*this).IsKnownToGCC;
}
unsigned AttributeList::getSemanticSpelling() const {
return getInfo(*this).SpellingIndexToSemanticSpelling(*this);
}
bool AttributeList::hasVariadicArg() const {
// If the attribute has the maximum number of optional arguments, we will
// claim that as being variadic. If we someday get an attribute that
// legitimately bumps up against that maximum, we can use another bit to track
// whether it's truly variadic or not.
return getInfo(*this).OptArgs == 15;
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/JumpDiagnostics.cpp | //===--- JumpDiagnostics.cpp - Protected scope jump analysis ------*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the JumpScopeChecker class, which is used to diagnose
// jumps that enter a protected scope in an invalid way.
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaInternal.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "llvm/ADT/BitVector.h"
using namespace clang;
namespace {
/// JumpScopeChecker - This object is used by Sema to diagnose invalid jumps
/// into VLA and other protected scopes. For example, this rejects:
/// goto L;
/// int a[n];
/// L:
///
class JumpScopeChecker {
Sema &S;
/// Permissive - True when recovering from errors, in which case precautions
/// are taken to handle incomplete scope information.
const bool Permissive;
/// GotoScope - This is a record that we use to keep track of all of the
/// scopes that are introduced by VLAs and other things that scope jumps like
/// gotos. This scope tree has nothing to do with the source scope tree,
/// because you can have multiple VLA scopes per compound statement, and most
/// compound statements don't introduce any scopes.
struct GotoScope {
/// ParentScope - The index in ScopeMap of the parent scope. This is 0 for
/// the parent scope is the function body.
unsigned ParentScope;
/// InDiag - The note to emit if there is a jump into this scope.
unsigned InDiag;
/// OutDiag - The note to emit if there is an indirect jump out
/// of this scope. Direct jumps always clean up their current scope
/// in an orderly way.
unsigned OutDiag;
/// Loc - Location to emit the diagnostic.
SourceLocation Loc;
GotoScope(unsigned parentScope, unsigned InDiag, unsigned OutDiag,
SourceLocation L)
: ParentScope(parentScope), InDiag(InDiag), OutDiag(OutDiag), Loc(L) {}
};
SmallVector<GotoScope, 48> Scopes;
llvm::DenseMap<Stmt*, unsigned> LabelAndGotoScopes;
SmallVector<Stmt*, 16> Jumps;
SmallVector<IndirectGotoStmt*, 4> IndirectJumps;
SmallVector<LabelDecl*, 4> IndirectJumpTargets;
public:
JumpScopeChecker(Stmt *Body, Sema &S);
private:
void BuildScopeInformation(Decl *D, unsigned &ParentScope);
void BuildScopeInformation(VarDecl *D, const BlockDecl *BDecl,
unsigned &ParentScope);
void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
void VerifyJumps();
void VerifyIndirectJumps();
void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
void DiagnoseIndirectJump(IndirectGotoStmt *IG, unsigned IGScope,
LabelDecl *Target, unsigned TargetScope);
void CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
unsigned JumpDiag, unsigned JumpDiagWarning,
unsigned JumpDiagCXX98Compat);
void CheckGotoStmt(GotoStmt *GS);
unsigned GetDeepestCommonScope(unsigned A, unsigned B);
};
} // end anonymous namespace
#define CHECK_PERMISSIVE(x) (assert(Permissive || !(x)), (Permissive && (x)))
JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s)
: S(s), Permissive(s.hasAnyUnrecoverableErrorsInThisFunction()) {
// Add a scope entry for function scope.
Scopes.push_back(GotoScope(~0U, ~0U, ~0U, SourceLocation()));
// Build information for the top level compound statement, so that we have a
// defined scope record for every "goto" and label.
unsigned BodyParentScope = 0;
BuildScopeInformation(Body, BodyParentScope);
// Check that all jumps we saw are kosher.
VerifyJumps();
VerifyIndirectJumps();
}
/// GetDeepestCommonScope - Finds the innermost scope enclosing the
/// two scopes.
unsigned JumpScopeChecker::GetDeepestCommonScope(unsigned A, unsigned B) {
while (A != B) {
// Inner scopes are created after outer scopes and therefore have
// higher indices.
if (A < B) {
assert(Scopes[B].ParentScope < B);
B = Scopes[B].ParentScope;
} else {
assert(Scopes[A].ParentScope < A);
A = Scopes[A].ParentScope;
}
}
return A;
}
typedef std::pair<unsigned,unsigned> ScopePair;
/// GetDiagForGotoScopeDecl - If this decl induces a new goto scope, return a
/// diagnostic that should be emitted if control goes over it. If not, return 0.
static ScopePair GetDiagForGotoScopeDecl(Sema &S, const Decl *D) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
unsigned InDiag = 0;
unsigned OutDiag = 0;
if (VD->getType()->isVariablyModifiedType())
InDiag = diag::note_protected_by_vla;
if (VD->hasAttr<BlocksAttr>())
return ScopePair(diag::note_protected_by___block,
diag::note_exits___block);
if (VD->hasAttr<CleanupAttr>())
return ScopePair(diag::note_protected_by_cleanup,
diag::note_exits_cleanup);
if (VD->hasLocalStorage()) {
switch (VD->getType().isDestructedType()) {
case QualType::DK_objc_strong_lifetime:
case QualType::DK_objc_weak_lifetime:
return ScopePair(diag::note_protected_by_objc_ownership,
diag::note_exits_objc_ownership);
case QualType::DK_cxx_destructor:
OutDiag = diag::note_exits_dtor;
break;
case QualType::DK_none:
break;
}
}
const Expr *Init = VD->getInit();
if (S.Context.getLangOpts().CPlusPlus && VD->hasLocalStorage() && Init) {
// C++11 [stmt.dcl]p3:
// A program that jumps from a point where a variable with automatic
// storage duration is not in scope to a point where it is in scope
// is ill-formed unless the variable has scalar type, class type with
// a trivial default constructor and a trivial destructor, a
// cv-qualified version of one of these types, or an array of one of
// the preceding types and is declared without an initializer.
// C++03 [stmt.dcl.p3:
// A program that jumps from a point where a local variable
// with automatic storage duration is not in scope to a point
// where it is in scope is ill-formed unless the variable has
// POD type and is declared without an initializer.
InDiag = diag::note_protected_by_variable_init;
// For a variable of (array of) class type declared without an
// initializer, we will have call-style initialization and the initializer
// will be the CXXConstructExpr with no intervening nodes.
if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
const CXXConstructorDecl *Ctor = CCE->getConstructor();
if (Ctor->isTrivial() && Ctor->isDefaultConstructor() &&
VD->getInitStyle() == VarDecl::CallInit) {
if (OutDiag)
InDiag = diag::note_protected_by_variable_nontriv_destructor;
else if (!Ctor->getParent()->isPOD())
InDiag = diag::note_protected_by_variable_non_pod;
else
InDiag = 0;
}
}
}
return ScopePair(InDiag, OutDiag);
}
if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
if (TD->getUnderlyingType()->isVariablyModifiedType())
return ScopePair(isa<TypedefDecl>(TD)
? diag::note_protected_by_vla_typedef
: diag::note_protected_by_vla_type_alias,
0);
}
return ScopePair(0U, 0U);
}
/// \brief Build scope information for a declaration that is part of a DeclStmt.
void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
// If this decl causes a new scope, push and switch to it.
std::pair<unsigned,unsigned> Diags = GetDiagForGotoScopeDecl(S, D);
if (Diags.first || Diags.second) {
Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second,
D->getLocation()));
ParentScope = Scopes.size()-1;
}
// If the decl has an initializer, walk it with the potentially new
// scope we just installed.
if (VarDecl *VD = dyn_cast<VarDecl>(D))
if (Expr *Init = VD->getInit())
BuildScopeInformation(Init, ParentScope);
}
/// \brief Build scope information for a captured block literal variables.
void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
const BlockDecl *BDecl,
unsigned &ParentScope) {
// exclude captured __block variables; there's no destructor
// associated with the block literal for them.
if (D->hasAttr<BlocksAttr>())
return;
QualType T = D->getType();
QualType::DestructionKind destructKind = T.isDestructedType();
if (destructKind != QualType::DK_none) {
std::pair<unsigned,unsigned> Diags;
switch (destructKind) {
case QualType::DK_cxx_destructor:
Diags = ScopePair(diag::note_enters_block_captures_cxx_obj,
diag::note_exits_block_captures_cxx_obj);
break;
case QualType::DK_objc_strong_lifetime:
Diags = ScopePair(diag::note_enters_block_captures_strong,
diag::note_exits_block_captures_strong);
break;
case QualType::DK_objc_weak_lifetime:
Diags = ScopePair(diag::note_enters_block_captures_weak,
diag::note_exits_block_captures_weak);
break;
case QualType::DK_none:
llvm_unreachable("non-lifetime captured variable");
}
SourceLocation Loc = D->getLocation();
if (Loc.isInvalid())
Loc = BDecl->getLocation();
Scopes.push_back(GotoScope(ParentScope,
Diags.first, Diags.second, Loc));
ParentScope = Scopes.size()-1;
}
}
/// BuildScopeInformation - The statements from CI to CE are known to form a
/// coherent VLA scope with a specified parent node. Walk through the
/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
/// walking the AST as needed.
void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) {
// If this is a statement, rather than an expression, scopes within it don't
// propagate out into the enclosing scope. Otherwise we have to worry
// about block literals, which have the lifetime of their enclosing statement.
unsigned independentParentScope = origParentScope;
unsigned &ParentScope = ((isa<Expr>(S) && !isa<StmtExpr>(S))
? origParentScope : independentParentScope);
bool SkipFirstSubStmt = false;
// If we found a label, remember that it is in ParentScope scope.
switch (S->getStmtClass()) {
case Stmt::AddrLabelExprClass:
IndirectJumpTargets.push_back(cast<AddrLabelExpr>(S)->getLabel());
break;
case Stmt::IndirectGotoStmtClass:
// "goto *&&lbl;" is a special case which we treat as equivalent
// to a normal goto. In addition, we don't calculate scope in the
// operand (to avoid recording the address-of-label use), which
// works only because of the restricted set of expressions which
// we detect as constant targets.
if (cast<IndirectGotoStmt>(S)->getConstantTarget()) {
LabelAndGotoScopes[S] = ParentScope;
Jumps.push_back(S);
return;
}
LabelAndGotoScopes[S] = ParentScope;
IndirectJumps.push_back(cast<IndirectGotoStmt>(S));
break;
case Stmt::SwitchStmtClass:
// Evaluate the condition variable before entering the scope of the switch
// statement.
if (VarDecl *Var = cast<SwitchStmt>(S)->getConditionVariable()) {
BuildScopeInformation(Var, ParentScope);
SkipFirstSubStmt = true;
}
LLVM_FALLTHROUGH; // HLSL Change
case Stmt::GotoStmtClass:
// Remember both what scope a goto is in as well as the fact that we have
// it. This makes the second scan not have to walk the AST again.
LabelAndGotoScopes[S] = ParentScope;
Jumps.push_back(S);
break;
case Stmt::CXXTryStmtClass: {
CXXTryStmt *TS = cast<CXXTryStmt>(S);
unsigned newParentScope;
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_cxx_try,
diag::note_exits_cxx_try,
TS->getSourceRange().getBegin()));
if (Stmt *TryBlock = TS->getTryBlock())
BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
// Jump from the catch into the try is not allowed either.
for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) {
CXXCatchStmt *CS = TS->getHandler(I);
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_cxx_catch,
diag::note_exits_cxx_catch,
CS->getSourceRange().getBegin()));
BuildScopeInformation(CS->getHandlerBlock(),
(newParentScope = Scopes.size()-1));
}
return;
}
case Stmt::SEHTryStmtClass: {
SEHTryStmt *TS = cast<SEHTryStmt>(S);
unsigned newParentScope;
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_seh_try,
diag::note_exits_seh_try,
TS->getSourceRange().getBegin()));
if (Stmt *TryBlock = TS->getTryBlock())
BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
// Jump from __except or __finally into the __try are not allowed either.
if (SEHExceptStmt *Except = TS->getExceptHandler()) {
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_seh_except,
diag::note_exits_seh_except,
Except->getSourceRange().getBegin()));
BuildScopeInformation(Except->getBlock(),
(newParentScope = Scopes.size()-1));
} else if (SEHFinallyStmt *Finally = TS->getFinallyHandler()) {
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_seh_finally,
diag::note_exits_seh_finally,
Finally->getSourceRange().getBegin()));
BuildScopeInformation(Finally->getBlock(),
(newParentScope = Scopes.size()-1));
}
return;
}
default:
break;
}
for (Stmt *SubStmt : S->children()) {
if (SkipFirstSubStmt) {
SkipFirstSubStmt = false;
continue;
}
if (!SubStmt) continue;
// Cases, labels, and defaults aren't "scope parents". It's also
// important to handle these iteratively instead of recursively in
// order to avoid blowing out the stack.
while (true) {
Stmt *Next;
if (CaseStmt *CS = dyn_cast<CaseStmt>(SubStmt))
Next = CS->getSubStmt();
else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SubStmt))
Next = DS->getSubStmt();
else if (LabelStmt *LS = dyn_cast<LabelStmt>(SubStmt))
Next = LS->getSubStmt();
else
break;
LabelAndGotoScopes[SubStmt] = ParentScope;
SubStmt = Next;
}
// If this is a declstmt with a VLA definition, it defines a scope from here
// to the end of the containing context.
if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) {
// The decl statement creates a scope if any of the decls in it are VLAs
// or have the cleanup attribute.
for (auto *I : DS->decls())
BuildScopeInformation(I, ParentScope);
continue;
}
// Disallow jumps into any part of an @try statement by pushing a scope and
// walking all sub-stmts in that scope.
if (ObjCAtTryStmt *AT = dyn_cast<ObjCAtTryStmt>(SubStmt)) {
unsigned newParentScope;
// Recursively walk the AST for the @try part.
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_objc_try,
diag::note_exits_objc_try,
AT->getAtTryLoc()));
if (Stmt *TryPart = AT->getTryBody())
BuildScopeInformation(TryPart, (newParentScope = Scopes.size()-1));
// Jump from the catch to the finally or try is not valid.
for (unsigned I = 0, N = AT->getNumCatchStmts(); I != N; ++I) {
ObjCAtCatchStmt *AC = AT->getCatchStmt(I);
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_objc_catch,
diag::note_exits_objc_catch,
AC->getAtCatchLoc()));
// @catches are nested and it isn't
BuildScopeInformation(AC->getCatchBody(),
(newParentScope = Scopes.size()-1));
}
// Jump from the finally to the try or catch is not valid.
if (ObjCAtFinallyStmt *AF = AT->getFinallyStmt()) {
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_objc_finally,
diag::note_exits_objc_finally,
AF->getAtFinallyLoc()));
BuildScopeInformation(AF, (newParentScope = Scopes.size()-1));
}
continue;
}
unsigned newParentScope;
// Disallow jumps into the protected statement of an @synchronized, but
// allow jumps into the object expression it protects.
if (ObjCAtSynchronizedStmt *AS =
dyn_cast<ObjCAtSynchronizedStmt>(SubStmt)) {
// Recursively walk the AST for the @synchronized object expr, it is
// evaluated in the normal scope.
BuildScopeInformation(AS->getSynchExpr(), ParentScope);
// Recursively walk the AST for the @synchronized part, protected by a new
// scope.
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_objc_synchronized,
diag::note_exits_objc_synchronized,
AS->getAtSynchronizedLoc()));
BuildScopeInformation(AS->getSynchBody(),
(newParentScope = Scopes.size()-1));
continue;
}
// Disallow jumps into the protected statement of an @autoreleasepool.
if (ObjCAutoreleasePoolStmt *AS =
dyn_cast<ObjCAutoreleasePoolStmt>(SubStmt)) {
// Recursively walk the AST for the @autoreleasepool part, protected by a
// new scope.
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_objc_autoreleasepool,
diag::note_exits_objc_autoreleasepool,
AS->getAtLoc()));
BuildScopeInformation(AS->getSubStmt(),
(newParentScope = Scopes.size() - 1));
continue;
}
// Disallow jumps past full-expressions that use blocks with
// non-trivial cleanups of their captures. This is theoretically
// implementable but a lot of work which we haven't felt up to doing.
if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(SubStmt)) {
for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) {
const BlockDecl *BDecl = EWC->getObject(i);
for (const auto &CI : BDecl->captures()) {
VarDecl *variable = CI.getVariable();
BuildScopeInformation(variable, BDecl, ParentScope);
}
}
}
// Disallow jumps out of scopes containing temporaries lifetime-extended to
// automatic storage duration.
if (MaterializeTemporaryExpr *MTE =
dyn_cast<MaterializeTemporaryExpr>(SubStmt)) {
if (MTE->getStorageDuration() == SD_Automatic) {
SmallVector<const Expr *, 4> CommaLHS;
SmallVector<SubobjectAdjustment, 4> Adjustments;
const Expr *ExtendedObject =
MTE->GetTemporaryExpr()->skipRValueSubobjectAdjustments(
CommaLHS, Adjustments);
if (ExtendedObject->getType().isDestructedType()) {
Scopes.push_back(GotoScope(ParentScope, 0,
diag::note_exits_temporary_dtor,
ExtendedObject->getExprLoc()));
ParentScope = Scopes.size()-1;
}
}
}
// Recursively walk the AST.
BuildScopeInformation(SubStmt, ParentScope);
}
}
/// VerifyJumps - Verify each element of the Jumps array to see if they are
/// valid, emitting diagnostics if not.
void JumpScopeChecker::VerifyJumps() {
while (!Jumps.empty()) {
Stmt *Jump = Jumps.pop_back_val();
// With a goto,
if (GotoStmt *GS = dyn_cast<GotoStmt>(Jump)) {
// The label may not have a statement if it's coming from inline MS ASM.
if (GS->getLabel()->getStmt()) {
CheckJump(GS, GS->getLabel()->getStmt(), GS->getGotoLoc(),
diag::err_goto_into_protected_scope,
diag::ext_goto_into_protected_scope,
diag::warn_cxx98_compat_goto_into_protected_scope);
}
CheckGotoStmt(GS);
continue;
}
// We only get indirect gotos here when they have a constant target.
if (IndirectGotoStmt *IGS = dyn_cast<IndirectGotoStmt>(Jump)) {
LabelDecl *Target = IGS->getConstantTarget();
CheckJump(IGS, Target->getStmt(), IGS->getGotoLoc(),
diag::err_goto_into_protected_scope,
diag::ext_goto_into_protected_scope,
diag::warn_cxx98_compat_goto_into_protected_scope);
continue;
}
SwitchStmt *SS = cast<SwitchStmt>(Jump);
for (SwitchCase *SC = SS->getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(SC)))
continue;
SourceLocation Loc;
if (CaseStmt *CS = dyn_cast<CaseStmt>(SC))
Loc = CS->getLocStart();
else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC))
Loc = DS->getLocStart();
else
Loc = SC->getLocStart();
CheckJump(SS, SC, Loc, diag::err_switch_into_protected_scope, 0,
diag::warn_cxx98_compat_switch_into_protected_scope);
}
}
}
/// VerifyIndirectJumps - Verify whether any possible indirect jump
/// might cross a protection boundary. Unlike direct jumps, indirect
/// jumps count cleanups as protection boundaries: since there's no
/// way to know where the jump is going, we can't implicitly run the
/// right cleanups the way we can with direct jumps.
///
/// Thus, an indirect jump is "trivial" if it bypasses no
/// initializations and no teardowns. More formally, an indirect jump
/// from A to B is trivial if the path out from A to DCA(A,B) is
/// trivial and the path in from DCA(A,B) to B is trivial, where
/// DCA(A,B) is the deepest common ancestor of A and B.
/// Jump-triviality is transitive but asymmetric.
///
/// A path in is trivial if none of the entered scopes have an InDiag.
/// A path out is trivial is none of the exited scopes have an OutDiag.
///
/// Under these definitions, this function checks that the indirect
/// jump between A and B is trivial for every indirect goto statement A
/// and every label B whose address was taken in the function.
void JumpScopeChecker::VerifyIndirectJumps() {
if (IndirectJumps.empty()) return;
// If there aren't any address-of-label expressions in this function,
// complain about the first indirect goto.
if (IndirectJumpTargets.empty()) {
S.Diag(IndirectJumps[0]->getGotoLoc(),
diag::err_indirect_goto_without_addrlabel);
return;
}
// Collect a single representative of every scope containing an
// indirect goto. For most code bases, this substantially cuts
// down on the number of jump sites we'll have to consider later.
typedef std::pair<unsigned, IndirectGotoStmt*> JumpScope;
SmallVector<JumpScope, 32> JumpScopes;
{
llvm::DenseMap<unsigned, IndirectGotoStmt*> JumpScopesMap;
for (SmallVectorImpl<IndirectGotoStmt*>::iterator
I = IndirectJumps.begin(), E = IndirectJumps.end(); I != E; ++I) {
IndirectGotoStmt *IG = *I;
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(IG)))
continue;
unsigned IGScope = LabelAndGotoScopes[IG];
IndirectGotoStmt *&Entry = JumpScopesMap[IGScope];
if (!Entry) Entry = IG;
}
JumpScopes.reserve(JumpScopesMap.size());
for (llvm::DenseMap<unsigned, IndirectGotoStmt*>::iterator
I = JumpScopesMap.begin(), E = JumpScopesMap.end(); I != E; ++I)
JumpScopes.push_back(*I);
}
// Collect a single representative of every scope containing a
// label whose address was taken somewhere in the function.
// For most code bases, there will be only one such scope.
llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
for (SmallVectorImpl<LabelDecl*>::iterator
I = IndirectJumpTargets.begin(), E = IndirectJumpTargets.end();
I != E; ++I) {
LabelDecl *TheLabel = *I;
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(TheLabel->getStmt())))
continue;
unsigned LabelScope = LabelAndGotoScopes[TheLabel->getStmt()];
LabelDecl *&Target = TargetScopes[LabelScope];
if (!Target) Target = TheLabel;
}
// For each target scope, make sure it's trivially reachable from
// every scope containing a jump site.
//
// A path between scopes always consists of exitting zero or more
// scopes, then entering zero or more scopes. We build a set of
// of scopes S from which the target scope can be trivially
// entered, then verify that every jump scope can be trivially
// exitted to reach a scope in S.
llvm::BitVector Reachable(Scopes.size(), false);
for (llvm::DenseMap<unsigned,LabelDecl*>::iterator
TI = TargetScopes.begin(), TE = TargetScopes.end(); TI != TE; ++TI) {
unsigned TargetScope = TI->first;
LabelDecl *TargetLabel = TI->second;
Reachable.reset();
// Mark all the enclosing scopes from which you can safely jump
// into the target scope. 'Min' will end up being the index of
// the shallowest such scope.
unsigned Min = TargetScope;
while (true) {
Reachable.set(Min);
// Don't go beyond the outermost scope.
if (Min == 0) break;
// Stop if we can't trivially enter the current scope.
if (Scopes[Min].InDiag) break;
Min = Scopes[Min].ParentScope;
}
// Walk through all the jump sites, checking that they can trivially
// reach this label scope.
for (SmallVectorImpl<JumpScope>::iterator
I = JumpScopes.begin(), E = JumpScopes.end(); I != E; ++I) {
unsigned Scope = I->first;
// Walk out the "scope chain" for this scope, looking for a scope
// we've marked reachable. For well-formed code this amortizes
// to O(JumpScopes.size() / Scopes.size()): we only iterate
// when we see something unmarked, and in well-formed code we
// mark everything we iterate past.
bool IsReachable = false;
while (true) {
if (Reachable.test(Scope)) {
// If we find something reachable, mark all the scopes we just
// walked through as reachable.
for (unsigned S = I->first; S != Scope; S = Scopes[S].ParentScope)
Reachable.set(S);
IsReachable = true;
break;
}
// Don't walk out if we've reached the top-level scope or we've
// gotten shallower than the shallowest reachable scope.
if (Scope == 0 || Scope < Min) break;
// Don't walk out through an out-diagnostic.
if (Scopes[Scope].OutDiag) break;
Scope = Scopes[Scope].ParentScope;
}
// Only diagnose if we didn't find something.
if (IsReachable) continue;
DiagnoseIndirectJump(I->second, I->first, TargetLabel, TargetScope);
}
}
}
/// Return true if a particular error+note combination must be downgraded to a
/// warning in Microsoft mode.
static bool IsMicrosoftJumpWarning(unsigned JumpDiag, unsigned InDiagNote) {
return (JumpDiag == diag::err_goto_into_protected_scope &&
(InDiagNote == diag::note_protected_by_variable_init ||
InDiagNote == diag::note_protected_by_variable_nontriv_destructor));
}
/// Return true if a particular note should be downgraded to a compatibility
/// warning in C++11 mode.
static bool IsCXX98CompatWarning(Sema &S, unsigned InDiagNote) {
return S.getLangOpts().CPlusPlus11 &&
InDiagNote == diag::note_protected_by_variable_non_pod;
}
/// Produce primary diagnostic for an indirect jump statement.
static void DiagnoseIndirectJumpStmt(Sema &S, IndirectGotoStmt *Jump,
LabelDecl *Target, bool &Diagnosed) {
if (Diagnosed)
return;
S.Diag(Jump->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
Diagnosed = true;
}
/// Produce note diagnostics for a jump into a protected scope.
void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) {
if (CHECK_PERMISSIVE(ToScopes.empty()))
return;
for (unsigned I = 0, E = ToScopes.size(); I != E; ++I)
if (Scopes[ToScopes[I]].InDiag)
S.Diag(Scopes[ToScopes[I]].Loc, Scopes[ToScopes[I]].InDiag);
}
/// Diagnose an indirect jump which is known to cross scopes.
void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
unsigned JumpScope,
LabelDecl *Target,
unsigned TargetScope) {
if (CHECK_PERMISSIVE(JumpScope == TargetScope))
return;
unsigned Common = GetDeepestCommonScope(JumpScope, TargetScope);
bool Diagnosed = false;
// Walk out the scope chain until we reach the common ancestor.
for (unsigned I = JumpScope; I != Common; I = Scopes[I].ParentScope)
if (Scopes[I].OutDiag) {
DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
S.Diag(Scopes[I].Loc, Scopes[I].OutDiag);
}
SmallVector<unsigned, 10> ToScopesCXX98Compat;
// Now walk into the scopes containing the label whose address was taken.
for (unsigned I = TargetScope; I != Common; I = Scopes[I].ParentScope)
if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
ToScopesCXX98Compat.push_back(I);
else if (Scopes[I].InDiag) {
DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
S.Diag(Scopes[I].Loc, Scopes[I].InDiag);
}
// Diagnose this jump if it would be ill-formed in C++98.
if (!Diagnosed && !ToScopesCXX98Compat.empty()) {
S.Diag(Jump->getGotoLoc(),
diag::warn_cxx98_compat_indirect_goto_in_protected_scope);
S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
NoteJumpIntoScopes(ToScopesCXX98Compat);
}
}
/// CheckJump - Validate that the specified jump statement is valid: that it is
/// jumping within or out of its current scope, not into a deeper one.
void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
unsigned JumpDiagError, unsigned JumpDiagWarning,
unsigned JumpDiagCXX98Compat) {
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(From)))
return;
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(To)))
return;
unsigned FromScope = LabelAndGotoScopes[From];
unsigned ToScope = LabelAndGotoScopes[To];
// Common case: exactly the same scope, which is fine.
if (FromScope == ToScope) return;
// Warn on gotos out of __finally blocks.
if (isa<GotoStmt>(From) || isa<IndirectGotoStmt>(From)) {
// If FromScope > ToScope, FromScope is more nested and the jump goes to a
// less nested scope. Check if it crosses a __finally along the way.
for (unsigned I = FromScope; I > ToScope; I = Scopes[I].ParentScope) {
if (Scopes[I].InDiag == diag::note_protected_by_seh_finally) {
S.Diag(From->getLocStart(), diag::warn_jump_out_of_seh_finally);
break;
}
}
}
unsigned CommonScope = GetDeepestCommonScope(FromScope, ToScope);
// It's okay to jump out from a nested scope.
if (CommonScope == ToScope) return;
// Pull out (and reverse) any scopes we might need to diagnose skipping.
SmallVector<unsigned, 10> ToScopesCXX98Compat;
SmallVector<unsigned, 10> ToScopesError;
SmallVector<unsigned, 10> ToScopesWarning;
for (unsigned I = ToScope; I != CommonScope; I = Scopes[I].ParentScope) {
if (S.getLangOpts().MSVCCompat && JumpDiagWarning != 0 &&
IsMicrosoftJumpWarning(JumpDiagError, Scopes[I].InDiag))
ToScopesWarning.push_back(I);
else if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
ToScopesCXX98Compat.push_back(I);
else if (Scopes[I].InDiag)
ToScopesError.push_back(I);
}
// Handle warnings.
if (!ToScopesWarning.empty()) {
S.Diag(DiagLoc, JumpDiagWarning);
NoteJumpIntoScopes(ToScopesWarning);
}
// Handle errors.
if (!ToScopesError.empty()) {
S.Diag(DiagLoc, JumpDiagError);
NoteJumpIntoScopes(ToScopesError);
}
// Handle -Wc++98-compat warnings if the jump is well-formed.
if (ToScopesError.empty() && !ToScopesCXX98Compat.empty()) {
S.Diag(DiagLoc, JumpDiagCXX98Compat);
NoteJumpIntoScopes(ToScopesCXX98Compat);
}
}
void JumpScopeChecker::CheckGotoStmt(GotoStmt *GS) {
if (GS->getLabel()->isMSAsmLabel()) {
S.Diag(GS->getGotoLoc(), diag::err_goto_ms_asm_label)
<< GS->getLabel()->getIdentifier();
S.Diag(GS->getLabel()->getLocation(), diag::note_goto_ms_asm_label)
<< GS->getLabel()->getIdentifier();
}
}
void Sema::DiagnoseInvalidJumps(Stmt *Body) {
(void)JumpScopeChecker(Body, *this);
}
|
0 | repos/DirectXShaderCompiler/tools/clang/lib | repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaHLSL.cpp | //===--- SemaHLSL.cpp - HLSL support for AST nodes and operations ---===//
///////////////////////////////////////////////////////////////////////////////
// //
// SemaHLSL.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This file implements the semantic support for HLSL. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "clang/Sema/SemaHLSL.h"
#include "VkConstantsTables.h"
#include "dxc/DXIL/DxilFunctionProps.h"
#include "dxc/DXIL/DxilShaderModel.h"
#include "dxc/HLSL/HLOperations.h"
#include "dxc/HlslIntrinsicOp.h"
#include "dxc/Support/Global.h"
#include "dxc/Support/WinIncludes.h"
#include "dxc/dxcapi.internal.h"
#include "gen_intrin_main_tables_15.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/HlslTypes.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <array>
#include <bitset>
#include <float.h>
enum ArBasicKind {
AR_BASIC_BOOL,
AR_BASIC_LITERAL_FLOAT,
AR_BASIC_FLOAT16,
AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_FLOAT32,
AR_BASIC_FLOAT64,
AR_BASIC_LITERAL_INT,
AR_BASIC_INT8,
AR_BASIC_UINT8,
AR_BASIC_INT16,
AR_BASIC_UINT16,
AR_BASIC_INT32,
AR_BASIC_UINT32,
AR_BASIC_INT64,
AR_BASIC_UINT64,
AR_BASIC_MIN10FLOAT,
AR_BASIC_MIN16FLOAT,
AR_BASIC_MIN12INT,
AR_BASIC_MIN16INT,
AR_BASIC_MIN16UINT,
AR_BASIC_INT8_4PACKED,
AR_BASIC_UINT8_4PACKED,
AR_BASIC_ENUM,
AR_BASIC_COUNT,
//
// Pseudo-entries for intrinsic tables and such.
//
AR_BASIC_NONE,
AR_BASIC_UNKNOWN,
AR_BASIC_NOCAST,
AR_BASIC_DEPENDENT,
//
// The following pseudo-entries represent higher-level
// object types that are treated as units.
//
AR_BASIC_POINTER,
AR_BASIC_ENUM_CLASS,
AR_OBJECT_NULL,
AR_OBJECT_STRING_LITERAL,
AR_OBJECT_STRING,
// AR_OBJECT_TEXTURE,
AR_OBJECT_TEXTURE1D,
AR_OBJECT_TEXTURE1D_ARRAY,
AR_OBJECT_TEXTURE2D,
AR_OBJECT_TEXTURE2D_ARRAY,
AR_OBJECT_TEXTURE3D,
AR_OBJECT_TEXTURECUBE,
AR_OBJECT_TEXTURECUBE_ARRAY,
AR_OBJECT_TEXTURE2DMS,
AR_OBJECT_TEXTURE2DMS_ARRAY,
AR_OBJECT_SAMPLER,
AR_OBJECT_SAMPLER1D,
AR_OBJECT_SAMPLER2D,
AR_OBJECT_SAMPLER3D,
AR_OBJECT_SAMPLERCUBE,
AR_OBJECT_SAMPLERCOMPARISON,
AR_OBJECT_BUFFER,
//
// View objects are only used as variable/types within the Effects
// framework, for example in calls to OMSetRenderTargets.
//
AR_OBJECT_RENDERTARGETVIEW,
AR_OBJECT_DEPTHSTENCILVIEW,
//
// Shader objects are only used as variable/types within the Effects
// framework, for example as a result of CompileShader().
//
AR_OBJECT_COMPUTESHADER,
AR_OBJECT_DOMAINSHADER,
AR_OBJECT_GEOMETRYSHADER,
AR_OBJECT_HULLSHADER,
AR_OBJECT_PIXELSHADER,
AR_OBJECT_VERTEXSHADER,
AR_OBJECT_PIXELFRAGMENT,
AR_OBJECT_VERTEXFRAGMENT,
AR_OBJECT_STATEBLOCK,
AR_OBJECT_RASTERIZER,
AR_OBJECT_DEPTHSTENCIL,
AR_OBJECT_BLEND,
AR_OBJECT_POINTSTREAM,
AR_OBJECT_LINESTREAM,
AR_OBJECT_TRIANGLESTREAM,
AR_OBJECT_INPUTPATCH,
AR_OBJECT_OUTPUTPATCH,
AR_OBJECT_RWTEXTURE1D,
AR_OBJECT_RWTEXTURE1D_ARRAY,
AR_OBJECT_RWTEXTURE2D,
AR_OBJECT_RWTEXTURE2D_ARRAY,
AR_OBJECT_RWTEXTURE3D,
AR_OBJECT_RWBUFFER,
AR_OBJECT_BYTEADDRESS_BUFFER,
AR_OBJECT_RWBYTEADDRESS_BUFFER,
AR_OBJECT_STRUCTURED_BUFFER,
AR_OBJECT_RWSTRUCTURED_BUFFER,
AR_OBJECT_RWSTRUCTURED_BUFFER_ALLOC,
AR_OBJECT_RWSTRUCTURED_BUFFER_CONSUME,
AR_OBJECT_APPEND_STRUCTURED_BUFFER,
AR_OBJECT_CONSUME_STRUCTURED_BUFFER,
AR_OBJECT_CONSTANT_BUFFER,
AR_OBJECT_TEXTURE_BUFFER,
AR_OBJECT_ROVBUFFER,
AR_OBJECT_ROVBYTEADDRESS_BUFFER,
AR_OBJECT_ROVSTRUCTURED_BUFFER,
AR_OBJECT_ROVTEXTURE1D,
AR_OBJECT_ROVTEXTURE1D_ARRAY,
AR_OBJECT_ROVTEXTURE2D,
AR_OBJECT_ROVTEXTURE2D_ARRAY,
AR_OBJECT_ROVTEXTURE3D,
AR_OBJECT_FEEDBACKTEXTURE2D,
AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY,
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
AR_OBJECT_VK_SUBPASS_INPUT,
AR_OBJECT_VK_SUBPASS_INPUT_MS,
AR_OBJECT_VK_SPIRV_TYPE,
AR_OBJECT_VK_SPIRV_OPAQUE_TYPE,
AR_OBJECT_VK_INTEGRAL_CONSTANT,
AR_OBJECT_VK_LITERAL,
AR_OBJECT_VK_SPV_INTRINSIC_TYPE,
AR_OBJECT_VK_SPV_INTRINSIC_RESULT_ID,
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
AR_OBJECT_INNER, // Used for internal type object
AR_OBJECT_LEGACY_EFFECT,
AR_OBJECT_WAVE,
AR_OBJECT_RAY_DESC,
AR_OBJECT_ACCELERATION_STRUCT,
AR_OBJECT_USER_DEFINED_TYPE,
AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES,
// subobjects
AR_OBJECT_STATE_OBJECT_CONFIG,
AR_OBJECT_GLOBAL_ROOT_SIGNATURE,
AR_OBJECT_LOCAL_ROOT_SIGNATURE,
AR_OBJECT_SUBOBJECT_TO_EXPORTS_ASSOC,
AR_OBJECT_RAYTRACING_SHADER_CONFIG,
AR_OBJECT_RAYTRACING_PIPELINE_CONFIG,
AR_OBJECT_TRIANGLE_HIT_GROUP,
AR_OBJECT_PROCEDURAL_PRIMITIVE_HIT_GROUP,
AR_OBJECT_RAYTRACING_PIPELINE_CONFIG1,
// RayQuery
AR_OBJECT_RAY_QUERY,
// Heap Resource
AR_OBJECT_HEAP_RESOURCE,
AR_OBJECT_HEAP_SAMPLER,
AR_OBJECT_RWTEXTURE2DMS,
AR_OBJECT_RWTEXTURE2DMS_ARRAY,
// Work Graphs
AR_OBJECT_EMPTY_NODE_INPUT,
AR_OBJECT_DISPATCH_NODE_INPUT_RECORD,
AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD,
AR_OBJECT_GROUP_NODE_INPUT_RECORDS,
AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS,
AR_OBJECT_THREAD_NODE_INPUT_RECORD,
AR_OBJECT_RWTHREAD_NODE_INPUT_RECORD,
AR_OBJECT_NODE_OUTPUT,
AR_OBJECT_EMPTY_NODE_OUTPUT,
AR_OBJECT_NODE_OUTPUT_ARRAY,
AR_OBJECT_EMPTY_NODE_OUTPUT_ARRAY,
AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS,
AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS,
AR_BASIC_MAXIMUM_COUNT
};
#define AR_BASIC_TEXTURE_MS_CASES \
case AR_OBJECT_TEXTURE2DMS: \
case AR_OBJECT_TEXTURE2DMS_ARRAY: \
case AR_OBJECT_RWTEXTURE2DMS: \
case AR_OBJECT_RWTEXTURE2DMS_ARRAY
#define AR_BASIC_NON_TEXTURE_MS_CASES \
case AR_OBJECT_TEXTURE1D: \
case AR_OBJECT_TEXTURE1D_ARRAY: \
case AR_OBJECT_TEXTURE2D: \
case AR_OBJECT_TEXTURE2D_ARRAY: \
case AR_OBJECT_TEXTURE3D: \
case AR_OBJECT_TEXTURECUBE: \
case AR_OBJECT_TEXTURECUBE_ARRAY
#define AR_BASIC_TEXTURE_CASES \
AR_BASIC_TEXTURE_MS_CASES: \
AR_BASIC_NON_TEXTURE_MS_CASES
#define AR_BASIC_NON_CMP_SAMPLER_CASES \
case AR_OBJECT_SAMPLER: \
case AR_OBJECT_SAMPLER1D: \
case AR_OBJECT_SAMPLER2D: \
case AR_OBJECT_SAMPLER3D: \
case AR_OBJECT_SAMPLERCUBE
#define AR_BASIC_ROBJECT_CASES \
case AR_OBJECT_BLEND: \
case AR_OBJECT_RASTERIZER: \
case AR_OBJECT_DEPTHSTENCIL: \
case AR_OBJECT_STATEBLOCK
//
// Properties of entries in the ArBasicKind enumeration.
// These properties are intended to allow easy identification
// of classes of basic kinds. More specific checks on the
// actual kind values could then be done.
//
// The first four bits are used as a subtype indicator,
// such as bit count for primitive kinds or specific
// types for non-primitive-data kinds.
#define BPROP_SUBTYPE_MASK 0x0000000f
// Bit counts must be ordered from smaller to larger.
#define BPROP_BITS0 0x00000000
#define BPROP_BITS8 0x00000001
#define BPROP_BITS10 0x00000002
#define BPROP_BITS12 0x00000003
#define BPROP_BITS16 0x00000004
#define BPROP_BITS32 0x00000005
#define BPROP_BITS64 0x00000006
#define BPROP_BITS_NON_PRIM 0x00000007
#define GET_BPROP_SUBTYPE(_Props) ((_Props)&BPROP_SUBTYPE_MASK)
#define GET_BPROP_BITS(_Props) ((_Props)&BPROP_SUBTYPE_MASK)
#define BPROP_BOOLEAN 0x00000010 // Whether the type is bool
#define BPROP_INTEGER 0x00000020 // Whether the type is an integer
#define BPROP_UNSIGNED \
0x00000040 // Whether the type is an unsigned numeric (its absence implies
// signed)
#define BPROP_NUMERIC 0x00000080 // Whether the type is numeric or boolean
#define BPROP_LITERAL \
0x00000100 // Whether the type is a literal float or integer
#define BPROP_FLOATING 0x00000200 // Whether the type is a float
#define BPROP_OBJECT \
0x00000400 // Whether the type is an object (including null or stream)
#define BPROP_OTHER \
0x00000800 // Whether the type is a pseudo-entry in another table.
#define BPROP_PARTIAL_PRECISION \
0x00001000 // Whether the type has partial precision for calculations (i.e.,
// is this 'half')
#define BPROP_POINTER 0x00002000 // Whether the type is a basic pointer.
#define BPROP_TEXTURE 0x00004000 // Whether the type is any kind of texture.
#define BPROP_SAMPLER \
0x00008000 // Whether the type is any kind of sampler object.
#define BPROP_STREAM \
0x00010000 // Whether the type is a point, line or triangle stream.
#define BPROP_PATCH 0x00020000 // Whether the type is an input or output patch.
#define BPROP_RBUFFER 0x00040000 // Whether the type acts as a read-only buffer.
#define BPROP_RWBUFFER \
0x00080000 // Whether the type acts as a read-write buffer.
#define BPROP_PRIMITIVE \
0x00100000 // Whether the type is a primitive scalar type.
#define BPROP_MIN_PRECISION \
0x00200000 // Whether the type is qualified with a minimum precision.
#define BPROP_ROVBUFFER 0x00400000 // Whether the type is a ROV object.
#define BPROP_FEEDBACKTEXTURE \
0x00800000 // Whether the type is a feedback texture.
#define BPROP_ENUM 0x01000000 // Whether the type is a enum
#define GET_BPROP_PRIM_KIND(_Props) \
((_Props) & (BPROP_BOOLEAN | BPROP_INTEGER | BPROP_FLOATING))
#define GET_BPROP_PRIM_KIND_SU(_Props) \
((_Props) & (BPROP_BOOLEAN | BPROP_INTEGER | BPROP_FLOATING | BPROP_UNSIGNED))
#define IS_BPROP_PRIMITIVE(_Props) (((_Props)&BPROP_PRIMITIVE) != 0)
#define IS_BPROP_BOOL(_Props) (((_Props)&BPROP_BOOLEAN) != 0)
#define IS_BPROP_FLOAT(_Props) (((_Props)&BPROP_FLOATING) != 0)
#define IS_BPROP_SINT(_Props) \
(((_Props) & (BPROP_INTEGER | BPROP_UNSIGNED | BPROP_BOOLEAN)) == \
BPROP_INTEGER)
#define IS_BPROP_UINT(_Props) \
(((_Props) & (BPROP_INTEGER | BPROP_UNSIGNED | BPROP_BOOLEAN)) == \
(BPROP_INTEGER | BPROP_UNSIGNED))
#define IS_BPROP_AINT(_Props) \
(((_Props) & (BPROP_INTEGER | BPROP_BOOLEAN)) == BPROP_INTEGER)
#define IS_BPROP_STREAM(_Props) (((_Props)&BPROP_STREAM) != 0)
#define IS_BPROP_SAMPLER(_Props) (((_Props)&BPROP_SAMPLER) != 0)
#define IS_BPROP_TEXTURE(_Props) (((_Props)&BPROP_TEXTURE) != 0)
#define IS_BPROP_OBJECT(_Props) (((_Props)&BPROP_OBJECT) != 0)
#define IS_BPROP_MIN_PRECISION(_Props) (((_Props)&BPROP_MIN_PRECISION) != 0)
#define IS_BPROP_UNSIGNABLE(_Props) \
(IS_BPROP_AINT(_Props) && GET_BPROP_BITS(_Props) != BPROP_BITS12)
#define IS_BPROP_ENUM(_Props) (((_Props)&BPROP_ENUM) != 0)
const UINT g_uBasicKindProps[] = {
BPROP_PRIMITIVE | BPROP_BOOLEAN | BPROP_INTEGER | BPROP_NUMERIC |
BPROP_BITS0, // AR_BASIC_BOOL
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_FLOATING | BPROP_LITERAL |
BPROP_BITS0, // AR_BASIC_LITERAL_FLOAT
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_FLOATING |
BPROP_BITS16, // AR_BASIC_FLOAT16
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_FLOATING | BPROP_BITS32 |
BPROP_PARTIAL_PRECISION, // AR_BASIC_FLOAT32_PARTIAL_PRECISION
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_FLOATING |
BPROP_BITS32, // AR_BASIC_FLOAT32
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_FLOATING |
BPROP_BITS64, // AR_BASIC_FLOAT64
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_LITERAL |
BPROP_BITS0, // AR_BASIC_LITERAL_INT
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER |
BPROP_BITS8, // AR_BASIC_INT8
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_UNSIGNED |
BPROP_BITS8, // AR_BASIC_UINT8
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER |
BPROP_BITS16, // AR_BASIC_INT16
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_UNSIGNED |
BPROP_BITS16, // AR_BASIC_UINT16
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER |
BPROP_BITS32, // AR_BASIC_INT32
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_UNSIGNED |
BPROP_BITS32, // AR_BASIC_UINT32
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER |
BPROP_BITS64, // AR_BASIC_INT64
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_UNSIGNED |
BPROP_BITS64, // AR_BASIC_UINT64
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_FLOATING | BPROP_BITS10 |
BPROP_MIN_PRECISION, // AR_BASIC_MIN10FLOAT
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_FLOATING | BPROP_BITS16 |
BPROP_MIN_PRECISION, // AR_BASIC_MIN16FLOAT
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_BITS12 |
BPROP_MIN_PRECISION, // AR_BASIC_MIN12INT
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_BITS16 |
BPROP_MIN_PRECISION, // AR_BASIC_MIN16INT
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_UNSIGNED |
BPROP_BITS16 | BPROP_MIN_PRECISION, // AR_BASIC_MIN16UINT
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_UNSIGNED |
BPROP_BITS32, // AR_BASIC_INT8_4PACKED
BPROP_PRIMITIVE | BPROP_NUMERIC | BPROP_INTEGER | BPROP_UNSIGNED |
BPROP_BITS32, // AR_BASIC_UINT8_4PACKED
BPROP_ENUM | BPROP_NUMERIC | BPROP_INTEGER, // AR_BASIC_ENUM
BPROP_OTHER, // AR_BASIC_COUNT
//
// Pseudo-entries for intrinsic tables and such.
//
0, // AR_BASIC_NONE
BPROP_OTHER, // AR_BASIC_UNKNOWN
BPROP_OTHER, // AR_BASIC_NOCAST
0, // AR_BASIC_DEPENDENT
//
// The following pseudo-entries represent higher-level
// object types that are treated as units.
//
BPROP_POINTER, // AR_BASIC_POINTER
BPROP_ENUM, // AR_BASIC_ENUM_CLASS
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_NULL
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_STRING_LITERAL
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_STRING
// BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE1D
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE1D_ARRAY
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE2D
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE2D_ARRAY
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE3D
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURECUBE
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURECUBE_ARRAY
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE2DMS
BPROP_OBJECT | BPROP_TEXTURE, // AR_OBJECT_TEXTURE2DMS_ARRAY
BPROP_OBJECT | BPROP_SAMPLER, // AR_OBJECT_SAMPLER
BPROP_OBJECT | BPROP_SAMPLER, // AR_OBJECT_SAMPLER1D
BPROP_OBJECT | BPROP_SAMPLER, // AR_OBJECT_SAMPLER2D
BPROP_OBJECT | BPROP_SAMPLER, // AR_OBJECT_SAMPLER3D
BPROP_OBJECT | BPROP_SAMPLER, // AR_OBJECT_SAMPLERCUBE
BPROP_OBJECT | BPROP_SAMPLER, // AR_OBJECT_SAMPLERCOMPARISON
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_BUFFER
BPROP_OBJECT, // AR_OBJECT_RENDERTARGETVIEW
BPROP_OBJECT, // AR_OBJECT_DEPTHSTENCILVIEW
BPROP_OBJECT, // AR_OBJECT_COMPUTESHADER
BPROP_OBJECT, // AR_OBJECT_DOMAINSHADER
BPROP_OBJECT, // AR_OBJECT_GEOMETRYSHADER
BPROP_OBJECT, // AR_OBJECT_HULLSHADER
BPROP_OBJECT, // AR_OBJECT_PIXELSHADER
BPROP_OBJECT, // AR_OBJECT_VERTEXSHADER
BPROP_OBJECT, // AR_OBJECT_PIXELFRAGMENT
BPROP_OBJECT, // AR_OBJECT_VERTEXFRAGMENT
BPROP_OBJECT, // AR_OBJECT_STATEBLOCK
BPROP_OBJECT, // AR_OBJECT_RASTERIZER
BPROP_OBJECT, // AR_OBJECT_DEPTHSTENCIL
BPROP_OBJECT, // AR_OBJECT_BLEND
BPROP_OBJECT | BPROP_STREAM, // AR_OBJECT_POINTSTREAM
BPROP_OBJECT | BPROP_STREAM, // AR_OBJECT_LINESTREAM
BPROP_OBJECT | BPROP_STREAM, // AR_OBJECT_TRIANGLESTREAM
BPROP_OBJECT | BPROP_PATCH, // AR_OBJECT_INPUTPATCH
BPROP_OBJECT | BPROP_PATCH, // AR_OBJECT_OUTPUTPATCH
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTEXTURE1D
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTEXTURE1D_ARRAY
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTEXTURE2D
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTEXTURE2D_ARRAY
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTEXTURE3D
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWBUFFER
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_BYTEADDRESS_BUFFER
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWBYTEADDRESS_BUFFER
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_STRUCTURED_BUFFER
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWSTRUCTURED_BUFFER
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWSTRUCTURED_BUFFER_ALLOC
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWSTRUCTURED_BUFFER_CONSUME
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_APPEND_STRUCTURED_BUFFER
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_CONSUME_STRUCTURED_BUFFER
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_CONSTANT_BUFFER
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_TEXTURE_BUFFER
BPROP_OBJECT | BPROP_RWBUFFER | BPROP_ROVBUFFER, // AR_OBJECT_ROVBUFFER
BPROP_OBJECT | BPROP_RWBUFFER |
BPROP_ROVBUFFER, // AR_OBJECT_ROVBYTEADDRESS_BUFFER
BPROP_OBJECT | BPROP_RWBUFFER |
BPROP_ROVBUFFER, // AR_OBJECT_ROVSTRUCTURED_BUFFER
BPROP_OBJECT | BPROP_RWBUFFER | BPROP_ROVBUFFER, // AR_OBJECT_ROVTEXTURE1D
BPROP_OBJECT | BPROP_RWBUFFER |
BPROP_ROVBUFFER, // AR_OBJECT_ROVTEXTURE1D_ARRAY
BPROP_OBJECT | BPROP_RWBUFFER | BPROP_ROVBUFFER, // AR_OBJECT_ROVTEXTURE2D
BPROP_OBJECT | BPROP_RWBUFFER |
BPROP_ROVBUFFER, // AR_OBJECT_ROVTEXTURE2D_ARRAY
BPROP_OBJECT | BPROP_RWBUFFER | BPROP_ROVBUFFER, // AR_OBJECT_ROVTEXTURE3D
BPROP_OBJECT | BPROP_TEXTURE |
BPROP_FEEDBACKTEXTURE, // AR_OBJECT_FEEDBACKTEXTURE2D
BPROP_OBJECT | BPROP_TEXTURE |
BPROP_FEEDBACKTEXTURE, // AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_VK_SUBPASS_INPUT
BPROP_OBJECT | BPROP_RBUFFER, // AR_OBJECT_VK_SUBPASS_INPUT_MS
BPROP_OBJECT, // AR_OBJECT_VK_SPIRV_TYPE
BPROP_OBJECT, // AR_OBJECT_VK_SPIRV_OPAQUE_TYPE
BPROP_OBJECT, // AR_OBJECT_VK_INTEGRAL_CONSTANT,
BPROP_OBJECT, // AR_OBJECT_VK_LITERAL,
BPROP_OBJECT, // AR_OBJECT_VK_SPV_INTRINSIC_TYPE use recordType
BPROP_OBJECT, // AR_OBJECT_VK_SPV_INTRINSIC_RESULT_ID use recordType
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
BPROP_OBJECT, // AR_OBJECT_INNER
BPROP_OBJECT, // AR_OBJECT_LEGACY_EFFECT
BPROP_OBJECT, // AR_OBJECT_WAVE
LICOMPTYPE_RAYDESC, // AR_OBJECT_RAY_DESC
LICOMPTYPE_ACCELERATION_STRUCT, // AR_OBJECT_ACCELERATION_STRUCT
LICOMPTYPE_USER_DEFINED_TYPE, // AR_OBJECT_USER_DEFINED_TYPE
0, // AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES
// subobjects
0, // AR_OBJECT_STATE_OBJECT_CONFIG,
0, // AR_OBJECT_GLOBAL_ROOT_SIGNATURE,
0, // AR_OBJECT_LOCAL_ROOT_SIGNATURE,
0, // AR_OBJECT_SUBOBJECT_TO_EXPORTS_ASSOC,
0, // AR_OBJECT_RAYTRACING_SHADER_CONFIG,
0, // AR_OBJECT_RAYTRACING_PIPELINE_CONFIG,
0, // AR_OBJECT_TRIANGLE_HIT_GROUP,
0, // AR_OBJECT_PROCEDURAL_PRIMITIVE_HIT_GROUP,
0, // AR_OBJECT_RAYTRACING_PIPELINE_CONFIG1,
BPROP_OBJECT, // AR_OBJECT_RAY_QUERY,
BPROP_OBJECT, // AR_OBJECT_HEAP_RESOURCE,
BPROP_OBJECT, // AR_OBJECT_HEAP_SAMPLER,
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTEXTURE2DMS
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTEXTURE2DMS_ARRAY
// WorkGraphs
BPROP_OBJECT, // AR_OBJECT_EMPTY_NODE_INPUT
BPROP_OBJECT, // AR_OBJECT_DISPATCH_NODE_INPUT_RECORD
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD
BPROP_OBJECT, // AR_OBJECT_GROUP_NODE_INPUT_RECORDS
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS
BPROP_OBJECT, // AR_OBJECT_THREAD_NODE_INPUT_RECORD
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_RWTHREAD_NODE_INPUT_RECORD
BPROP_OBJECT, // AR_OBJECT_NODE_OUTPUT
BPROP_OBJECT, // AR_OBJECT_EMPTY_NODE_OUTPUT
BPROP_OBJECT, // AR_OBJECT_NODE_OUTPUT_ARRAY
BPROP_OBJECT, // AR_OBJECT_EMPTY_NODE_OUTPUT_ARRAY
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS,
BPROP_OBJECT | BPROP_RWBUFFER, // AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS,
// AR_BASIC_MAXIMUM_COUNT
};
C_ASSERT(ARRAYSIZE(g_uBasicKindProps) == AR_BASIC_MAXIMUM_COUNT);
#define GetBasicKindProps(_Kind) g_uBasicKindProps[(_Kind)]
#define GET_BASIC_BITS(_Kind) GET_BPROP_BITS(GetBasicKindProps(_Kind))
#define GET_BASIC_PRIM_KIND(_Kind) GET_BPROP_PRIM_KIND(GetBasicKindProps(_Kind))
#define GET_BASIC_PRIM_KIND_SU(_Kind) \
GET_BPROP_PRIM_KIND_SU(GetBasicKindProps(_Kind))
#define IS_BASIC_PRIMITIVE(_Kind) IS_BPROP_PRIMITIVE(GetBasicKindProps(_Kind))
#define IS_BASIC_BOOL(_Kind) IS_BPROP_BOOL(GetBasicKindProps(_Kind))
#define IS_BASIC_FLOAT(_Kind) IS_BPROP_FLOAT(GetBasicKindProps(_Kind))
#define IS_BASIC_SINT(_Kind) IS_BPROP_SINT(GetBasicKindProps(_Kind))
#define IS_BASIC_UINT(_Kind) IS_BPROP_UINT(GetBasicKindProps(_Kind))
#define IS_BASIC_AINT(_Kind) IS_BPROP_AINT(GetBasicKindProps(_Kind))
#define IS_BASIC_STREAM(_Kind) IS_BPROP_STREAM(GetBasicKindProps(_Kind))
#define IS_BASIC_SAMPLER(_Kind) IS_BPROP_SAMPLER(GetBasicKindProps(_Kind))
#define IS_BASIC_TEXTURE(_Kind) IS_BPROP_TEXTURE(GetBasicKindProps(_Kind))
#define IS_BASIC_OBJECT(_Kind) IS_BPROP_OBJECT(GetBasicKindProps(_Kind))
#define IS_BASIC_MIN_PRECISION(_Kind) \
IS_BPROP_MIN_PRECISION(GetBasicKindProps(_Kind))
#define IS_BASIC_UNSIGNABLE(_Kind) IS_BPROP_UNSIGNABLE(GetBasicKindProps(_Kind))
#define IS_BASIC_ENUM(_Kind) IS_BPROP_ENUM(GetBasicKindProps(_Kind))
#define BITWISE_ENUM_OPS(_Type) \
inline _Type operator|(_Type F1, _Type F2) { \
return (_Type)((UINT)F1 | (UINT)F2); \
} \
inline _Type operator&(_Type F1, _Type F2) { \
return (_Type)((UINT)F1 & (UINT)F2); \
} \
inline _Type &operator|=(_Type &F1, _Type F2) { \
F1 = F1 | F2; \
return F1; \
} \
inline _Type &operator&=(_Type &F1, _Type F2) { \
F1 = F1 & F2; \
return F1; \
} \
inline _Type &operator&=(_Type &F1, UINT F2) { \
F1 = (_Type)((UINT)F1 & F2); \
return F1; \
}
enum ArTypeObjectKind {
AR_TOBJ_INVALID, // Flag for an unassigned / unavailable object type.
AR_TOBJ_VOID, // Represents the type for functions with not returned valued.
AR_TOBJ_BASIC, // Represents a primitive type.
AR_TOBJ_COMPOUND, // Represents a struct or class.
AR_TOBJ_INTERFACE, // Represents an interface.
AR_TOBJ_POINTER, // Represents a pointer to another type.
AR_TOBJ_OBJECT, // Represents a built-in object.
AR_TOBJ_ARRAY, // Represents an array of other types.
AR_TOBJ_MATRIX, // Represents a matrix of basic types.
AR_TOBJ_VECTOR, // Represents a vector of basic types.
AR_TOBJ_QUALIFIER, // Represents another type plus an ArTypeQualifier.
AR_TOBJ_INNER_OBJ, // Represents a built-in inner object, such as an
// indexer object used to implement .mips[1].
AR_TOBJ_STRING, // Represents a string
AR_TOBJ_DEPENDENT, // Dependent type for template.
};
enum TYPE_CONVERSION_FLAGS {
TYPE_CONVERSION_DEFAULT =
0x00000000, // Indicates an implicit conversion is done.
TYPE_CONVERSION_EXPLICIT =
0x00000001, // Indicates a conversion is done through an explicit cast.
TYPE_CONVERSION_BY_REFERENCE =
0x00000002, // Indicates a conversion is done to an output parameter.
};
enum TYPE_CONVERSION_REMARKS {
TYPE_CONVERSION_NONE = 0x00000000,
TYPE_CONVERSION_PRECISION_LOSS = 0x00000001,
TYPE_CONVERSION_IDENTICAL = 0x00000002,
TYPE_CONVERSION_TO_VOID = 0x00000004,
TYPE_CONVERSION_ELT_TRUNCATION = 0x00000008,
};
BITWISE_ENUM_OPS(TYPE_CONVERSION_REMARKS)
#define AR_TOBJ_SCALAR AR_TOBJ_BASIC
#define AR_TOBJ_UNKNOWN AR_TOBJ_INVALID
#define AR_TPROP_VOID 0x0000000000000001
#define AR_TPROP_CONST 0x0000000000000002
#define AR_TPROP_IMP_CONST 0x0000000000000004
#define AR_TPROP_OBJECT 0x0000000000000008
#define AR_TPROP_SCALAR 0x0000000000000010
#define AR_TPROP_UNSIGNED 0x0000000000000020
#define AR_TPROP_NUMERIC 0x0000000000000040
#define AR_TPROP_INTEGRAL 0x0000000000000080
#define AR_TPROP_FLOATING 0x0000000000000100
#define AR_TPROP_LITERAL 0x0000000000000200
#define AR_TPROP_POINTER 0x0000000000000400
#define AR_TPROP_INPUT_PATCH 0x0000000000000800
#define AR_TPROP_OUTPUT_PATCH 0x0000000000001000
#define AR_TPROP_INH_IFACE 0x0000000000002000
#define AR_TPROP_HAS_COMPOUND 0x0000000000004000
#define AR_TPROP_HAS_TEXTURES 0x0000000000008000
#define AR_TPROP_HAS_SAMPLERS 0x0000000000010000
#define AR_TPROP_HAS_SAMPLER_CMPS 0x0000000000020000
#define AR_TPROP_HAS_STREAMS 0x0000000000040000
#define AR_TPROP_HAS_OTHER_OBJECTS 0x0000000000080000
#define AR_TPROP_HAS_BASIC 0x0000000000100000
#define AR_TPROP_HAS_BUFFERS 0x0000000000200000
#define AR_TPROP_HAS_ROBJECTS 0x0000000000400000
#define AR_TPROP_HAS_POINTERS 0x0000000000800000
#define AR_TPROP_INDEXABLE 0x0000000001000000
#define AR_TPROP_HAS_MIPS 0x0000000002000000
#define AR_TPROP_WRITABLE_GLOBAL 0x0000000004000000
#define AR_TPROP_HAS_UAVS 0x0000000008000000
#define AR_TPROP_HAS_BYTEADDRESS 0x0000000010000000
#define AR_TPROP_HAS_STRUCTURED 0x0000000020000000
#define AR_TPROP_HAS_SAMPLE 0x0000000040000000
#define AR_TPROP_MIN_PRECISION 0x0000000080000000
#define AR_TPROP_HAS_CBUFFERS 0x0000000100008000
#define AR_TPROP_HAS_TBUFFERS 0x0000000200008000
#define AR_TPROP_ALL 0xffffffffffffffff
#define AR_TPROP_HAS_OBJECTS \
(AR_TPROP_HAS_TEXTURES | AR_TPROP_HAS_SAMPLERS | AR_TPROP_HAS_SAMPLER_CMPS | \
AR_TPROP_HAS_STREAMS | AR_TPROP_HAS_OTHER_OBJECTS | AR_TPROP_HAS_BUFFERS | \
AR_TPROP_HAS_ROBJECTS | AR_TPROP_HAS_UAVS | AR_TPROP_HAS_BYTEADDRESS | \
AR_TPROP_HAS_STRUCTURED)
#define AR_TPROP_HAS_BASIC_RESOURCES \
(AR_TPROP_HAS_TEXTURES | AR_TPROP_HAS_SAMPLERS | AR_TPROP_HAS_SAMPLER_CMPS | \
AR_TPROP_HAS_BUFFERS | AR_TPROP_HAS_UAVS)
#define AR_TPROP_UNION_BITS \
(AR_TPROP_INH_IFACE | AR_TPROP_HAS_COMPOUND | AR_TPROP_HAS_TEXTURES | \
AR_TPROP_HAS_SAMPLERS | AR_TPROP_HAS_SAMPLER_CMPS | AR_TPROP_HAS_STREAMS | \
AR_TPROP_HAS_OTHER_OBJECTS | AR_TPROP_HAS_BASIC | AR_TPROP_HAS_BUFFERS | \
AR_TPROP_HAS_ROBJECTS | AR_TPROP_HAS_POINTERS | AR_TPROP_WRITABLE_GLOBAL | \
AR_TPROP_HAS_UAVS | AR_TPROP_HAS_BYTEADDRESS | AR_TPROP_HAS_STRUCTURED | \
AR_TPROP_MIN_PRECISION)
#define AR_TINFO_ALLOW_COMPLEX 0x00000001
#define AR_TINFO_ALLOW_OBJECTS 0x00000002
#define AR_TINFO_IGNORE_QUALIFIERS 0x00000004
#define AR_TINFO_OBJECTS_AS_ELEMENTS 0x00000008
#define AR_TINFO_PACK_SCALAR 0x00000010
#define AR_TINFO_PACK_ROW_MAJOR 0x00000020
#define AR_TINFO_PACK_TEMP_ARRAY 0x00000040
#define AR_TINFO_ALL_VAR_INFO 0x00000080
#define AR_TINFO_ALLOW_ALL (AR_TINFO_ALLOW_COMPLEX | AR_TINFO_ALLOW_OBJECTS)
#define AR_TINFO_PACK_CBUFFER 0
#define AR_TINFO_LAYOUT_PACK_ALL \
(AR_TINFO_PACK_SCALAR | AR_TINFO_PACK_TEMP_ARRAY)
#define AR_TINFO_SIMPLE_OBJECTS \
(AR_TINFO_ALLOW_OBJECTS | AR_TINFO_OBJECTS_AS_ELEMENTS)
struct ArTypeInfo {
ArTypeObjectKind ShapeKind; // The shape of the type (basic, matrix, etc.)
ArBasicKind EltKind; // The primitive type of elements in this type.
const clang::Type *EltTy; // Canonical element type ptr
ArBasicKind
ObjKind; // The object type for this type (textures, buffers, etc.)
UINT uRows;
UINT uCols;
UINT uTotalElts;
};
using namespace clang;
using namespace clang::sema;
using namespace hlsl;
extern const char *HLSLScalarTypeNames[];
static const bool ExplicitConversionFalse =
false; // a conversion operation is not the result of an explicit cast
static const bool ParameterPackFalse =
false; // template parameter is not an ellipsis.
static const bool TypenameTrue =
false; // 'typename' specified rather than 'class' for a template argument.
static const bool DelayTypeCreationTrue =
true; // delay type creation for a declaration
static const SourceLocation NoLoc; // no source location attribution available
static const SourceRange NoRange; // no source range attribution available
static const bool HasWrittenPrototypeTrue =
true; // function had the prototype written
static const bool InlineSpecifiedFalse =
false; // function was not specified as inline
static const bool IsConstexprFalse = false; // function is not constexpr
static const bool ListInitializationFalse =
false; // not performing a list initialization
static const bool SuppressWarningsFalse =
false; // do not suppress warning diagnostics
static const bool SuppressErrorsTrue = true; // suppress error diagnostics
static const bool SuppressErrorsFalse =
false; // do not suppress error diagnostics
static const int OneRow = 1; // a single row for a type
static const bool MipsFalse = false; // a type does not support the .mips member
static const bool MipsTrue = true; // a type supports the .mips member
static const bool SampleFalse =
false; // a type does not support the .sample member
static const bool SampleTrue = true; // a type supports the .sample member
static const size_t MaxVectorSize = 4; // maximum size for a vector
static QualType
GetOrCreateTemplateSpecialization(ASTContext &context, Sema &sema,
ClassTemplateDecl *templateDecl,
ArrayRef<TemplateArgument> templateArgs) {
DXASSERT_NOMSG(templateDecl);
DeclContext *currentDeclContext = context.getTranslationUnitDecl();
SmallVector<TemplateArgument, 3> templateArgsForDecl;
for (const TemplateArgument &Arg : templateArgs) {
if (Arg.getKind() == TemplateArgument::Type) {
// the class template need to use CanonicalType
templateArgsForDecl.emplace_back(
TemplateArgument(Arg.getAsType().getCanonicalType()));
} else
templateArgsForDecl.emplace_back(Arg);
}
// First, try looking up existing specialization
void *InsertPos = nullptr;
ClassTemplateSpecializationDecl *specializationDecl =
templateDecl->findSpecialization(templateArgsForDecl, InsertPos);
if (specializationDecl) {
// Instantiate the class template if not yet.
if (specializationDecl->getInstantiatedFrom().isNull()) {
// InstantiateClassTemplateSpecialization returns true if it finds an
// error.
DXVERIFY_NOMSG(false ==
sema.InstantiateClassTemplateSpecialization(
NoLoc, specializationDecl,
TemplateSpecializationKind::TSK_ImplicitInstantiation,
true));
}
return context.getTemplateSpecializationType(
TemplateName(templateDecl), templateArgs.data(), templateArgs.size(),
context.getTypeDeclType(specializationDecl));
}
specializationDecl = ClassTemplateSpecializationDecl::Create(
context, TagDecl::TagKind::TTK_Class, currentDeclContext, NoLoc, NoLoc,
templateDecl, templateArgsForDecl.data(), templateArgsForDecl.size(),
nullptr);
// InstantiateClassTemplateSpecialization returns true if it finds an error.
DXVERIFY_NOMSG(false ==
sema.InstantiateClassTemplateSpecialization(
NoLoc, specializationDecl,
TemplateSpecializationKind::TSK_ImplicitInstantiation,
true));
templateDecl->AddSpecialization(specializationDecl, InsertPos);
specializationDecl->setImplicit(true);
QualType canonType = context.getTypeDeclType(specializationDecl);
DXASSERT(isa<RecordType>(canonType),
"type of non-dependent specialization is not a RecordType");
TemplateArgumentListInfo templateArgumentList(NoLoc, NoLoc);
TemplateArgumentLocInfo NoTemplateArgumentLocInfo;
for (unsigned i = 0; i < templateArgs.size(); i++) {
templateArgumentList.addArgument(
TemplateArgumentLoc(templateArgs[i], NoTemplateArgumentLocInfo));
}
return context.getTemplateSpecializationType(TemplateName(templateDecl),
templateArgumentList, canonType);
}
/// <summary>Instantiates a new matrix type specialization or gets an existing
/// one from the AST.</summary>
static QualType GetOrCreateMatrixSpecialization(
ASTContext &context, Sema *sema, ClassTemplateDecl *matrixTemplateDecl,
QualType elementType, uint64_t rowCount, uint64_t colCount) {
DXASSERT_NOMSG(sema);
TemplateArgument templateArgs[3] = {
TemplateArgument(elementType),
TemplateArgument(
context,
llvm::APSInt(
llvm::APInt(context.getIntWidth(context.IntTy), rowCount), false),
context.IntTy),
TemplateArgument(
context,
llvm::APSInt(
llvm::APInt(context.getIntWidth(context.IntTy), colCount), false),
context.IntTy)};
QualType matrixSpecializationType = GetOrCreateTemplateSpecialization(
context, *sema, matrixTemplateDecl,
ArrayRef<TemplateArgument>(templateArgs));
#ifndef NDEBUG
// Verify that we can read the field member from the template record.
DXASSERT(matrixSpecializationType->getAsCXXRecordDecl(),
"type of non-dependent specialization is not a RecordType");
DeclContext::lookup_result lookupResult =
matrixSpecializationType->getAsCXXRecordDecl()->lookup(
DeclarationName(&context.Idents.get(StringRef("h"))));
DXASSERT(!lookupResult.empty(),
"otherwise matrix handle cannot be looked up");
#endif
return matrixSpecializationType;
}
/// <summary>Instantiates a new vector type specialization or gets an existing
/// one from the AST.</summary>
static QualType
GetOrCreateVectorSpecialization(ASTContext &context, Sema *sema,
ClassTemplateDecl *vectorTemplateDecl,
QualType elementType, uint64_t colCount) {
DXASSERT_NOMSG(sema);
DXASSERT_NOMSG(vectorTemplateDecl);
TemplateArgument templateArgs[2] = {
TemplateArgument(elementType),
TemplateArgument(
context,
llvm::APSInt(
llvm::APInt(context.getIntWidth(context.IntTy), colCount), false),
context.IntTy)};
QualType vectorSpecializationType = GetOrCreateTemplateSpecialization(
context, *sema, vectorTemplateDecl,
ArrayRef<TemplateArgument>(templateArgs));
#ifndef NDEBUG
// Verify that we can read the field member from the template record.
DXASSERT(vectorSpecializationType->getAsCXXRecordDecl(),
"type of non-dependent specialization is not a RecordType");
DeclContext::lookup_result lookupResult =
vectorSpecializationType->getAsCXXRecordDecl()->lookup(
DeclarationName(&context.Idents.get(StringRef("h"))));
DXASSERT(!lookupResult.empty(),
"otherwise vector handle cannot be looked up");
#endif
return vectorSpecializationType;
}
/// <summary>Instantiates a new *NodeOutputRecords type specialization or gets
/// an existing one from the AST.</summary>
static QualType
GetOrCreateNodeOutputRecordSpecialization(ASTContext &context, Sema *sema,
_In_ ClassTemplateDecl *templateDecl,
QualType elementType) {
DXASSERT_NOMSG(sema);
DXASSERT_NOMSG(templateDecl);
TemplateArgument templateArgs[1] = {TemplateArgument(elementType)};
QualType specializationType = GetOrCreateTemplateSpecialization(
context, *sema, templateDecl, ArrayRef<TemplateArgument>(templateArgs));
#ifdef DBG
// Verify that we can read the field member from the template record.
DXASSERT(specializationType->getAsCXXRecordDecl(),
"type of non-dependent specialization is not a RecordType");
DeclContext::lookup_result lookupResult =
specializationType->getAsCXXRecordDecl()->lookup(
DeclarationName(&context.Idents.get(StringRef("h"))));
DXASSERT(!lookupResult.empty(),
"otherwise *NodeOutputRecords handle cannot be looked up");
#endif
return specializationType;
}
// Decls.cpp constants start here - these should be refactored or, better,
// replaced with clang::Type-based constructs.
static const LPCSTR kBuiltinIntrinsicTableName = "op";
static const ArTypeObjectKind g_ScalarTT[] = {AR_TOBJ_SCALAR, AR_TOBJ_UNKNOWN};
static const ArTypeObjectKind g_VectorTT[] = {AR_TOBJ_VECTOR, AR_TOBJ_UNKNOWN};
static const ArTypeObjectKind g_MatrixTT[] = {AR_TOBJ_MATRIX, AR_TOBJ_UNKNOWN};
static const ArTypeObjectKind g_AnyTT[] = {AR_TOBJ_SCALAR, AR_TOBJ_VECTOR,
AR_TOBJ_MATRIX, AR_TOBJ_UNKNOWN};
static const ArTypeObjectKind g_ObjectTT[] = {AR_TOBJ_OBJECT, AR_TOBJ_STRING,
AR_TOBJ_UNKNOWN};
static const ArTypeObjectKind g_NullTT[] = {AR_TOBJ_VOID, AR_TOBJ_UNKNOWN};
static const ArTypeObjectKind g_ArrayTT[] = {AR_TOBJ_ARRAY, AR_TOBJ_UNKNOWN};
const ArTypeObjectKind *g_LegalIntrinsicTemplates[] = {
g_NullTT, g_ScalarTT, g_VectorTT, g_MatrixTT,
g_AnyTT, g_ObjectTT, g_ArrayTT,
};
C_ASSERT(ARRAYSIZE(g_LegalIntrinsicTemplates) == LITEMPLATE_COUNT);
//
// The first one is used to name the representative group, so make
// sure its name will make sense in error messages.
//
static const ArBasicKind g_BoolCT[] = {AR_BASIC_BOOL, AR_BASIC_UNKNOWN};
static const ArBasicKind g_IntCT[] = {AR_BASIC_INT32, AR_BASIC_LITERAL_INT,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_UIntCT[] = {AR_BASIC_UINT32, AR_BASIC_LITERAL_INT,
AR_BASIC_UNKNOWN};
// We use the first element for default if matching kind is missing in the list.
// AR_BASIC_INT32 should be the default for any int since min precision integers
// should map to int32, not int16 or int64
static const ArBasicKind g_AnyIntCT[] = {
AR_BASIC_INT32, AR_BASIC_INT16, AR_BASIC_UINT32, AR_BASIC_UINT16,
AR_BASIC_INT64, AR_BASIC_UINT64, AR_BASIC_LITERAL_INT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_AnyInt32CT[] = {
AR_BASIC_INT32, AR_BASIC_UINT32, AR_BASIC_LITERAL_INT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_UIntOnlyCT[] = {AR_BASIC_UINT32, AR_BASIC_UINT64,
AR_BASIC_LITERAL_INT,
AR_BASIC_NOCAST, AR_BASIC_UNKNOWN};
static const ArBasicKind g_FloatCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_AnyFloatCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_FLOAT16, AR_BASIC_FLOAT64,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_MIN10FLOAT,
AR_BASIC_MIN16FLOAT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_FloatLikeCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_FLOAT16, AR_BASIC_LITERAL_FLOAT,
AR_BASIC_MIN10FLOAT, AR_BASIC_MIN16FLOAT,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_FloatDoubleCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION, AR_BASIC_FLOAT64,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_DoubleCT[] = {
AR_BASIC_FLOAT64, AR_BASIC_LITERAL_FLOAT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_DoubleOnlyCT[] = {AR_BASIC_FLOAT64,
AR_BASIC_LITERAL_FLOAT,
AR_BASIC_NOCAST, AR_BASIC_UNKNOWN};
static const ArBasicKind g_NumericCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_FLOAT16, AR_BASIC_FLOAT64,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_MIN10FLOAT,
AR_BASIC_MIN16FLOAT, AR_BASIC_LITERAL_INT,
AR_BASIC_INT16, AR_BASIC_INT32,
AR_BASIC_UINT16, AR_BASIC_UINT32,
AR_BASIC_MIN12INT, AR_BASIC_MIN16INT,
AR_BASIC_MIN16UINT, AR_BASIC_INT64,
AR_BASIC_UINT64, AR_BASIC_UNKNOWN};
static const ArBasicKind g_Numeric32CT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_LITERAL_INT,
AR_BASIC_INT32, AR_BASIC_UINT32,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_Numeric32OnlyCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_LITERAL_INT,
AR_BASIC_INT32, AR_BASIC_UINT32,
AR_BASIC_NOCAST, AR_BASIC_UNKNOWN};
static const ArBasicKind g_AnyCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_FLOAT32_PARTIAL_PRECISION,
AR_BASIC_FLOAT16, AR_BASIC_FLOAT64,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_MIN10FLOAT,
AR_BASIC_MIN16FLOAT, AR_BASIC_INT16,
AR_BASIC_UINT16, AR_BASIC_LITERAL_INT,
AR_BASIC_INT32, AR_BASIC_UINT32,
AR_BASIC_MIN12INT, AR_BASIC_MIN16INT,
AR_BASIC_MIN16UINT, AR_BASIC_BOOL,
AR_BASIC_INT64, AR_BASIC_UINT64,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_AnySamplerCT[] = {
AR_OBJECT_SAMPLER, AR_OBJECT_SAMPLERCOMPARISON, AR_BASIC_UNKNOWN};
static const ArBasicKind g_Sampler1DCT[] = {AR_OBJECT_SAMPLER1D,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_Sampler2DCT[] = {AR_OBJECT_SAMPLER2D,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_Sampler3DCT[] = {AR_OBJECT_SAMPLER3D,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_SamplerCUBECT[] = {AR_OBJECT_SAMPLERCUBE,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_SamplerCmpCT[] = {AR_OBJECT_SAMPLERCOMPARISON,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_SamplerCT[] = {AR_OBJECT_SAMPLER, AR_BASIC_UNKNOWN};
static const ArBasicKind g_Texture2DCT[] = {AR_OBJECT_TEXTURE2D,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_Texture2DArrayCT[] = {AR_OBJECT_TEXTURE2D_ARRAY,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_ResourceCT[] = {AR_OBJECT_HEAP_RESOURCE,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_RayDescCT[] = {AR_OBJECT_RAY_DESC, AR_BASIC_UNKNOWN};
static const ArBasicKind g_AccelerationStructCT[] = {
AR_OBJECT_ACCELERATION_STRUCT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_UDTCT[] = {AR_OBJECT_USER_DEFINED_TYPE,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_StringCT[] = {AR_OBJECT_STRING_LITERAL,
AR_OBJECT_STRING, AR_BASIC_UNKNOWN};
static const ArBasicKind g_NullCT[] = {AR_OBJECT_NULL, AR_BASIC_UNKNOWN};
static const ArBasicKind g_WaveCT[] = {AR_OBJECT_WAVE, AR_BASIC_UNKNOWN};
static const ArBasicKind g_UInt64CT[] = {AR_BASIC_UINT64, AR_BASIC_UNKNOWN};
static const ArBasicKind g_Float16CT[] = {
AR_BASIC_FLOAT16, AR_BASIC_LITERAL_FLOAT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_Int16CT[] = {AR_BASIC_INT16, AR_BASIC_LITERAL_INT,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_UInt16CT[] = {AR_BASIC_UINT16, AR_BASIC_LITERAL_INT,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_Numeric16OnlyCT[] = {
AR_BASIC_FLOAT16, AR_BASIC_INT16, AR_BASIC_UINT16,
AR_BASIC_LITERAL_FLOAT, AR_BASIC_LITERAL_INT, AR_BASIC_NOCAST,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_Int32OnlyCT[] = {AR_BASIC_INT32, AR_BASIC_UINT32,
AR_BASIC_LITERAL_INT,
AR_BASIC_NOCAST, AR_BASIC_UNKNOWN};
static const ArBasicKind g_Float32OnlyCT[] = {
AR_BASIC_FLOAT32, AR_BASIC_LITERAL_FLOAT, AR_BASIC_NOCAST,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_Int64OnlyCT[] = {AR_BASIC_UINT64, AR_BASIC_INT64,
AR_BASIC_LITERAL_INT,
AR_BASIC_NOCAST, AR_BASIC_UNKNOWN};
static const ArBasicKind g_AnyInt64CT[] = {
AR_BASIC_INT64, AR_BASIC_UINT64, AR_BASIC_LITERAL_INT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_Int8_4PackedCT[] = {
AR_BASIC_INT8_4PACKED, AR_BASIC_UINT32, AR_BASIC_LITERAL_INT,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_UInt8_4PackedCT[] = {
AR_BASIC_UINT8_4PACKED, AR_BASIC_UINT32, AR_BASIC_LITERAL_INT,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_AnyInt16Or32CT[] = {
AR_BASIC_INT32, AR_BASIC_UINT32, AR_BASIC_INT16,
AR_BASIC_UINT16, AR_BASIC_LITERAL_INT, AR_BASIC_UNKNOWN};
static const ArBasicKind g_SInt16Or32OnlyCT[] = {
AR_BASIC_INT32, AR_BASIC_INT16, AR_BASIC_LITERAL_INT, AR_BASIC_NOCAST,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_ByteAddressBufferCT[] = {
AR_OBJECT_BYTEADDRESS_BUFFER, AR_BASIC_UNKNOWN};
static const ArBasicKind g_RWByteAddressBufferCT[] = {
AR_OBJECT_RWBYTEADDRESS_BUFFER, AR_BASIC_UNKNOWN};
static const ArBasicKind g_NodeRecordOrUAVCT[] = {
AR_OBJECT_DISPATCH_NODE_INPUT_RECORD,
AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD,
AR_OBJECT_GROUP_NODE_INPUT_RECORDS,
AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS,
AR_OBJECT_THREAD_NODE_INPUT_RECORD,
AR_OBJECT_RWTHREAD_NODE_INPUT_RECORD,
AR_OBJECT_NODE_OUTPUT,
AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS,
AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS,
AR_OBJECT_RWBUFFER,
AR_OBJECT_RWTEXTURE1D,
AR_OBJECT_RWTEXTURE1D_ARRAY,
AR_OBJECT_RWTEXTURE2D,
AR_OBJECT_RWTEXTURE2D_ARRAY,
AR_OBJECT_RWTEXTURE3D,
AR_OBJECT_RWSTRUCTURED_BUFFER,
AR_OBJECT_RWBYTEADDRESS_BUFFER,
AR_OBJECT_APPEND_STRUCTURED_BUFFER,
AR_BASIC_UNKNOWN};
static const ArBasicKind g_GroupNodeOutputRecordsCT[] = {
AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS, AR_BASIC_UNKNOWN};
static const ArBasicKind g_ThreadNodeOutputRecordsCT[] = {
AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS, AR_BASIC_UNKNOWN};
static const ArBasicKind g_AnyOutputRecordCT[] = {
AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS, AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS,
AR_BASIC_UNKNOWN};
// Basic kinds, indexed by a LEGAL_INTRINSIC_COMPTYPES value.
const ArBasicKind *g_LegalIntrinsicCompTypes[] = {
g_NullCT, // LICOMPTYPE_VOID
g_BoolCT, // LICOMPTYPE_BOOL
g_IntCT, // LICOMPTYPE_INT
g_UIntCT, // LICOMPTYPE_UINT
g_AnyIntCT, // LICOMPTYPE_ANY_INT
g_AnyInt32CT, // LICOMPTYPE_ANY_INT32
g_UIntOnlyCT, // LICOMPTYPE_UINT_ONLY
g_FloatCT, // LICOMPTYPE_FLOAT
g_AnyFloatCT, // LICOMPTYPE_ANY_FLOAT
g_FloatLikeCT, // LICOMPTYPE_FLOAT_LIKE
g_FloatDoubleCT, // LICOMPTYPE_FLOAT_DOUBLE
g_DoubleCT, // LICOMPTYPE_DOUBLE
g_DoubleOnlyCT, // LICOMPTYPE_DOUBLE_ONLY
g_NumericCT, // LICOMPTYPE_NUMERIC
g_Numeric32CT, // LICOMPTYPE_NUMERIC32
g_Numeric32OnlyCT, // LICOMPTYPE_NUMERIC32_ONLY
g_AnyCT, // LICOMPTYPE_ANY
g_Sampler1DCT, // LICOMPTYPE_SAMPLER1D
g_Sampler2DCT, // LICOMPTYPE_SAMPLER2D
g_Sampler3DCT, // LICOMPTYPE_SAMPLER3D
g_SamplerCUBECT, // LICOMPTYPE_SAMPLERCUBE
g_SamplerCmpCT, // LICOMPTYPE_SAMPLERCMP
g_SamplerCT, // LICOMPTYPE_SAMPLER
g_StringCT, // LICOMPTYPE_STRING
g_WaveCT, // LICOMPTYPE_WAVE
g_UInt64CT, // LICOMPTYPE_UINT64
g_Float16CT, // LICOMPTYPE_FLOAT16
g_Int16CT, // LICOMPTYPE_INT16
g_UInt16CT, // LICOMPTYPE_UINT16
g_Numeric16OnlyCT, // LICOMPTYPE_NUMERIC16_ONLY
g_RayDescCT, // LICOMPTYPE_RAYDESC
g_AccelerationStructCT, // LICOMPTYPE_ACCELERATION_STRUCT,
g_UDTCT, // LICOMPTYPE_USER_DEFINED_TYPE
g_Texture2DCT, // LICOMPTYPE_TEXTURE2D
g_Texture2DArrayCT, // LICOMPTYPE_TEXTURE2DARRAY
g_ResourceCT, // LICOMPTYPE_RESOURCE
g_Int32OnlyCT, // LICOMPTYPE_INT32_ONLY
g_Int64OnlyCT, // LICOMPTYPE_INT64_ONLY
g_AnyInt64CT, // LICOMPTYPE_ANY_INT64
g_Float32OnlyCT, // LICOMPTYPE_FLOAT32_ONLY
g_Int8_4PackedCT, // LICOMPTYPE_INT8_4PACKED
g_UInt8_4PackedCT, // LICOMPTYPE_UINT8_4PACKED
g_AnyInt16Or32CT, // LICOMPTYPE_ANY_INT16_OR_32
g_SInt16Or32OnlyCT, // LICOMPTYPE_SINT16_OR_32_ONLY
g_AnySamplerCT, // LICOMPTYPE_ANY_SAMPLER
g_ByteAddressBufferCT, // LICOMPTYPE_BYTEADDRESSBUFFER
g_RWByteAddressBufferCT, // LICOMPTYPE_RWBYTEADDRESSBUFFER
g_NodeRecordOrUAVCT, // LICOMPTYPE_NODE_RECORD_OR_UAV
g_AnyOutputRecordCT, // LICOMPTYPE_ANY_NODE_OUTPUT_RECORD
g_GroupNodeOutputRecordsCT, // LICOMPTYPE_GROUP_NODE_OUTPUT_RECORDS
g_ThreadNodeOutputRecordsCT, // LICOMPTYPE_THREAD_NODE_OUTPUT_RECORDS
};
static_assert(
ARRAYSIZE(g_LegalIntrinsicCompTypes) == LICOMPTYPE_COUNT,
"Intrinsic comp type table must be updated when new enumerants are added.");
// Decls.cpp constants ends here - these should be refactored or, better,
// replaced with clang::Type-based constructs.
// Basic kind objects that are represented as HLSL structures or templates.
static const ArBasicKind g_ArBasicKindsAsTypes[] = {
AR_OBJECT_BUFFER, // Buffer
// AR_OBJECT_TEXTURE,
AR_OBJECT_TEXTURE1D, // Texture1D
AR_OBJECT_TEXTURE1D_ARRAY, // Texture1DArray
AR_OBJECT_TEXTURE2D, // Texture2D
AR_OBJECT_TEXTURE2D_ARRAY, // Texture2DArray
AR_OBJECT_TEXTURE3D, // Texture3D
AR_OBJECT_TEXTURECUBE, // TextureCube
AR_OBJECT_TEXTURECUBE_ARRAY, // TextureCubeArray
AR_OBJECT_TEXTURE2DMS, // Texture2DMS
AR_OBJECT_TEXTURE2DMS_ARRAY, // Texture2DMSArray
AR_OBJECT_SAMPLER,
// AR_OBJECT_SAMPLER1D,
// AR_OBJECT_SAMPLER2D,
// AR_OBJECT_SAMPLER3D,
// AR_OBJECT_SAMPLERCUBE,
AR_OBJECT_SAMPLERCOMPARISON,
AR_OBJECT_CONSTANT_BUFFER, AR_OBJECT_TEXTURE_BUFFER,
AR_OBJECT_POINTSTREAM, AR_OBJECT_LINESTREAM, AR_OBJECT_TRIANGLESTREAM,
AR_OBJECT_INPUTPATCH, AR_OBJECT_OUTPUTPATCH,
AR_OBJECT_RWTEXTURE1D, AR_OBJECT_RWTEXTURE1D_ARRAY, AR_OBJECT_RWTEXTURE2D,
AR_OBJECT_RWTEXTURE2D_ARRAY, AR_OBJECT_RWTEXTURE3D, AR_OBJECT_RWBUFFER,
AR_OBJECT_BYTEADDRESS_BUFFER, AR_OBJECT_RWBYTEADDRESS_BUFFER,
AR_OBJECT_STRUCTURED_BUFFER, AR_OBJECT_RWSTRUCTURED_BUFFER,
// AR_OBJECT_RWSTRUCTURED_BUFFER_ALLOC,
// AR_OBJECT_RWSTRUCTURED_BUFFER_CONSUME,
AR_OBJECT_APPEND_STRUCTURED_BUFFER, AR_OBJECT_CONSUME_STRUCTURED_BUFFER,
AR_OBJECT_ROVBUFFER, AR_OBJECT_ROVBYTEADDRESS_BUFFER,
AR_OBJECT_ROVSTRUCTURED_BUFFER, AR_OBJECT_ROVTEXTURE1D,
AR_OBJECT_ROVTEXTURE1D_ARRAY, AR_OBJECT_ROVTEXTURE2D,
AR_OBJECT_ROVTEXTURE2D_ARRAY, AR_OBJECT_ROVTEXTURE3D,
AR_OBJECT_FEEDBACKTEXTURE2D, AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY,
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
AR_OBJECT_VK_SUBPASS_INPUT, AR_OBJECT_VK_SUBPASS_INPUT_MS,
AR_OBJECT_VK_SPIRV_TYPE, AR_OBJECT_VK_SPIRV_OPAQUE_TYPE,
AR_OBJECT_VK_INTEGRAL_CONSTANT, AR_OBJECT_VK_LITERAL,
AR_OBJECT_VK_SPV_INTRINSIC_TYPE, AR_OBJECT_VK_SPV_INTRINSIC_RESULT_ID,
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
AR_OBJECT_LEGACY_EFFECT, // Used for all unsupported but ignored legacy
// effect types
AR_OBJECT_WAVE, AR_OBJECT_RAY_DESC, AR_OBJECT_ACCELERATION_STRUCT,
AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES,
// subobjects
AR_OBJECT_STATE_OBJECT_CONFIG, AR_OBJECT_GLOBAL_ROOT_SIGNATURE,
AR_OBJECT_LOCAL_ROOT_SIGNATURE, AR_OBJECT_SUBOBJECT_TO_EXPORTS_ASSOC,
AR_OBJECT_RAYTRACING_SHADER_CONFIG, AR_OBJECT_RAYTRACING_PIPELINE_CONFIG,
AR_OBJECT_TRIANGLE_HIT_GROUP, AR_OBJECT_PROCEDURAL_PRIMITIVE_HIT_GROUP,
AR_OBJECT_RAYTRACING_PIPELINE_CONFIG1,
AR_OBJECT_RAY_QUERY, AR_OBJECT_HEAP_RESOURCE, AR_OBJECT_HEAP_SAMPLER,
AR_OBJECT_RWTEXTURE2DMS, // RWTexture2DMS
AR_OBJECT_RWTEXTURE2DMS_ARRAY, // RWTexture2DMSArray
// Work Graphs
AR_OBJECT_EMPTY_NODE_INPUT, AR_OBJECT_DISPATCH_NODE_INPUT_RECORD,
AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD, AR_OBJECT_GROUP_NODE_INPUT_RECORDS,
AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS, AR_OBJECT_THREAD_NODE_INPUT_RECORD,
AR_OBJECT_RWTHREAD_NODE_INPUT_RECORD,
AR_OBJECT_NODE_OUTPUT, AR_OBJECT_EMPTY_NODE_OUTPUT,
AR_OBJECT_NODE_OUTPUT_ARRAY, AR_OBJECT_EMPTY_NODE_OUTPUT_ARRAY,
AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS, AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS};
// Count of template arguments for basic kind of objects that look like
// templates (one or more type arguments).
static const uint8_t g_ArBasicKindsTemplateCount[] = {
1, // AR_OBJECT_BUFFER
// AR_OBJECT_TEXTURE,
1, // AR_OBJECT_TEXTURE1D
1, // AR_OBJECT_TEXTURE1D_ARRAY
1, // AR_OBJECT_TEXTURE2D
1, // AR_OBJECT_TEXTURE2D_ARRAY
1, // AR_OBJECT_TEXTURE3D
1, // AR_OBJECT_TEXTURECUBE
1, // AR_OBJECT_TEXTURECUBE_ARRAY
2, // AR_OBJECT_TEXTURE2DMS
2, // AR_OBJECT_TEXTURE2DMS_ARRAY
0, // AR_OBJECT_SAMPLER
// AR_OBJECT_SAMPLER1D,
// AR_OBJECT_SAMPLER2D,
// AR_OBJECT_SAMPLER3D,
// AR_OBJECT_SAMPLERCUBE,
0, // AR_OBJECT_SAMPLERCOMPARISON
1, // AR_OBJECT_CONSTANT_BUFFER,
1, // AR_OBJECT_TEXTURE_BUFFER,
1, // AR_OBJECT_POINTSTREAM
1, // AR_OBJECT_LINESTREAM
1, // AR_OBJECT_TRIANGLESTREAM
2, // AR_OBJECT_INPUTPATCH
2, // AR_OBJECT_OUTPUTPATCH
1, // AR_OBJECT_RWTEXTURE1D
1, // AR_OBJECT_RWTEXTURE1D_ARRAY
1, // AR_OBJECT_RWTEXTURE2D
1, // AR_OBJECT_RWTEXTURE2D_ARRAY
1, // AR_OBJECT_RWTEXTURE3D
1, // AR_OBJECT_RWBUFFER
0, // AR_OBJECT_BYTEADDRESS_BUFFER
0, // AR_OBJECT_RWBYTEADDRESS_BUFFER
1, // AR_OBJECT_STRUCTURED_BUFFER
1, // AR_OBJECT_RWSTRUCTURED_BUFFER
// 1, // AR_OBJECT_RWSTRUCTURED_BUFFER_ALLOC
// 1, // AR_OBJECT_RWSTRUCTURED_BUFFER_CONSUME
1, // AR_OBJECT_APPEND_STRUCTURED_BUFFER
1, // AR_OBJECT_CONSUME_STRUCTURED_BUFFER
1, // AR_OBJECT_ROVBUFFER
0, // AR_OBJECT_ROVBYTEADDRESS_BUFFER
1, // AR_OBJECT_ROVSTRUCTURED_BUFFER
1, // AR_OBJECT_ROVTEXTURE1D
1, // AR_OBJECT_ROVTEXTURE1D_ARRAY
1, // AR_OBJECT_ROVTEXTURE2D
1, // AR_OBJECT_ROVTEXTURE2D_ARRAY
1, // AR_OBJECT_ROVTEXTURE3D
1, // AR_OBJECT_FEEDBACKTEXTURE2D
1, // AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
1, // AR_OBJECT_VK_SUBPASS_INPUT
1, // AR_OBJECT_VK_SUBPASS_INPUT_MS,
1, // AR_OBJECT_VK_SPIRV_TYPE
1, // AR_OBJECT_VK_SPIRV_OPAQUE_TYPE
1, // AR_OBJECT_VK_INTEGRAL_CONSTANT,
1, // AR_OBJECT_VK_LITERAL,
1, // AR_OBJECT_VK_SPV_INTRINSIC_TYPE
1, // AR_OBJECT_VK_SPV_INTRINSIC_RESULT_ID
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
0, // AR_OBJECT_LEGACY_EFFECT // Used for all unsupported but ignored
// legacy effect types
0, // AR_OBJECT_WAVE
0, // AR_OBJECT_RAY_DESC
0, // AR_OBJECT_ACCELERATION_STRUCT
0, // AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES
0, // AR_OBJECT_STATE_OBJECT_CONFIG,
0, // AR_OBJECT_GLOBAL_ROOT_SIGNATURE,
0, // AR_OBJECT_LOCAL_ROOT_SIGNATURE,
0, // AR_OBJECT_SUBOBJECT_TO_EXPORTS_ASSOC,
0, // AR_OBJECT_RAYTRACING_SHADER_CONFIG,
0, // AR_OBJECT_RAYTRACING_PIPELINE_CONFIG,
0, // AR_OBJECT_TRIANGLE_HIT_GROUP,
0, // AR_OBJECT_PROCEDURAL_PRIMITIVE_HIT_GROUP,
0, // AR_OBJECT_RAYTRACING_PIPELINE_CONFIG1,
1, // AR_OBJECT_RAY_QUERY,
0, // AR_OBJECT_HEAP_RESOURCE,
0, // AR_OBJECT_HEAP_SAMPLER,
2, // AR_OBJECT_RWTEXTURE2DMS
2, // AR_OBJECT_RWTEXTURE2DMS_ARRAY
// WorkGraphs
0, // AR_OBJECT_EMPTY_NODE_INPUT,
1, // AR_OBJECT_DISPATCH_NODE_INPUT_RECORD,
1, // AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD,
1, // AR_OBJECT_GROUP_NODE_INPUT_RECORDS,
1, // AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS,
1, // AR_OBJECT_THREAD_NODE_INPUT_RECORD,
1, // AR_OBJECT_RWTHREAD_NODE_INPUT_RECORD,
1, // AR_OBJECT_NODE_OUTPUT,
0, // AR_OBJECT_EMPTY_NODE_OUTPUT,
1, // AR_OBJECT_NODE_OUTPUT_ARRAY,
0, // AR_OBJECT_EMPTY_NODE_OUTPUT_ARRAY,
1, // AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS,
1, // AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS
};
C_ASSERT(_countof(g_ArBasicKindsAsTypes) ==
_countof(g_ArBasicKindsTemplateCount));
/// <summary>Describes the how the subscript or indexing operators work on a
/// given type.</summary>
struct SubscriptOperatorRecord {
unsigned int
SubscriptCardinality : 4; // Number of elements expected in subscript -
// zero if operator not supported.
bool HasMips : 1; // true if the kind has a mips member; false otherwise
bool HasSample : 1; // true if the kind has a sample member; false otherwise
};
// Subscript operators for objects that are represented as HLSL structures or
// templates.
static const SubscriptOperatorRecord g_ArBasicKindsSubscripts[] = {
{1, MipsFalse, SampleFalse}, // AR_OBJECT_BUFFER (Buffer)
// AR_OBJECT_TEXTURE,
{1, MipsTrue, SampleFalse}, // AR_OBJECT_TEXTURE1D (Texture1D)
{2, MipsTrue, SampleFalse}, // AR_OBJECT_TEXTURE1D_ARRAY (Texture1DArray)
{2, MipsTrue, SampleFalse}, // AR_OBJECT_TEXTURE2D (Texture2D)
{3, MipsTrue, SampleFalse}, // AR_OBJECT_TEXTURE2D_ARRAY (Texture2DArray)
{3, MipsTrue, SampleFalse}, // AR_OBJECT_TEXTURE3D (Texture3D)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_TEXTURECUBE (TextureCube)
{0, MipsFalse,
SampleFalse}, // AR_OBJECT_TEXTURECUBE_ARRAY (TextureCubeArray)
{2, MipsFalse, SampleTrue}, // AR_OBJECT_TEXTURE2DMS (Texture2DMS)
{3, MipsFalse,
SampleTrue}, // AR_OBJECT_TEXTURE2DMS_ARRAY (Texture2DMSArray)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_SAMPLER (SamplerState)
// AR_OBJECT_SAMPLER1D,
// AR_OBJECT_SAMPLER2D,
// AR_OBJECT_SAMPLER3D,
// AR_OBJECT_SAMPLERCUBE,
{0, MipsFalse,
SampleFalse}, // AR_OBJECT_SAMPLERCOMPARISON (SamplerComparison)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_CONSTANT_BUFFER
{0, MipsFalse, SampleFalse}, // AR_OBJECT_TEXTURE_BUFFER
{0, MipsFalse, SampleFalse}, // AR_OBJECT_POINTSTREAM (PointStream)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_LINESTREAM (LineStream)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_TRIANGLESTREAM (TriangleStream)
{1, MipsFalse, SampleFalse}, // AR_OBJECT_INPUTPATCH (InputPatch)
{1, MipsFalse, SampleFalse}, // AR_OBJECT_OUTPUTPATCH (OutputPatch)
{1, MipsFalse, SampleFalse}, // AR_OBJECT_RWTEXTURE1D (RWTexture1D)
{2, MipsFalse,
SampleFalse}, // AR_OBJECT_RWTEXTURE1D_ARRAY (RWTexture1DArray)
{2, MipsFalse, SampleFalse}, // AR_OBJECT_RWTEXTURE2D (RWTexture2D)
{3, MipsFalse,
SampleFalse}, // AR_OBJECT_RWTEXTURE2D_ARRAY (RWTexture2DArray)
{3, MipsFalse, SampleFalse}, // AR_OBJECT_RWTEXTURE3D (RWTexture3D)
{1, MipsFalse, SampleFalse}, // AR_OBJECT_RWBUFFER (RWBuffer)
{0, MipsFalse,
SampleFalse}, // AR_OBJECT_BYTEADDRESS_BUFFER (ByteAddressBuffer)
{0, MipsFalse,
SampleFalse}, // AR_OBJECT_RWBYTEADDRESS_BUFFER (RWByteAddressBuffer)
{1, MipsFalse,
SampleFalse}, // AR_OBJECT_STRUCTURED_BUFFER (StructuredBuffer)
{1, MipsFalse,
SampleFalse}, // AR_OBJECT_RWSTRUCTURED_BUFFER (RWStructuredBuffer)
// AR_OBJECT_RWSTRUCTURED_BUFFER_ALLOC,
// AR_OBJECT_RWSTRUCTURED_BUFFER_CONSUME,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_APPEND_STRUCTURED_BUFFER
// (AppendStructuredBuffer)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_CONSUME_STRUCTURED_BUFFER
// (ConsumeStructuredBuffer)
{1, MipsFalse, SampleFalse}, // AR_OBJECT_ROVBUFFER (ROVBuffer)
{0, MipsFalse,
SampleFalse}, // AR_OBJECT_ROVBYTEADDRESS_BUFFER (ROVByteAddressBuffer)
{1, MipsFalse,
SampleFalse}, // AR_OBJECT_ROVSTRUCTURED_BUFFER (ROVStructuredBuffer)
{1, MipsFalse, SampleFalse}, // AR_OBJECT_ROVTEXTURE1D (ROVTexture1D)
{2, MipsFalse,
SampleFalse}, // AR_OBJECT_ROVTEXTURE1D_ARRAY (ROVTexture1DArray)
{2, MipsFalse, SampleFalse}, // AR_OBJECT_ROVTEXTURE2D (ROVTexture2D)
{3, MipsFalse,
SampleFalse}, // AR_OBJECT_ROVTEXTURE2D_ARRAY (ROVTexture2DArray)
{3, MipsFalse, SampleFalse}, // AR_OBJECT_ROVTEXTURE3D (ROVTexture3D)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_FEEDBACKTEXTURE2D
{0, MipsFalse, SampleFalse}, // AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
{0, MipsFalse, SampleFalse}, // AR_OBJECT_VK_SUBPASS_INPUT (SubpassInput)
{0, MipsFalse,
SampleFalse}, // AR_OBJECT_VK_SUBPASS_INPUT_MS (SubpassInputMS)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_VK_SPIRV_TYPE
{0, MipsFalse, SampleFalse}, // AR_OBJECT_VK_SPIRV_OPAQUE_TYPE
{0, MipsFalse, SampleFalse}, // AR_OBJECT_VK_INTEGRAL_CONSTANT,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_VK_LITERAL,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_VK_SPV_INTRINSIC_TYPE
{0, MipsFalse, SampleFalse}, // AR_OBJECT_VK_SPV_INTRINSIC_RESULT_ID
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
{0, MipsFalse,
SampleFalse}, // AR_OBJECT_LEGACY_EFFECT (legacy effect objects)
{0, MipsFalse, SampleFalse}, // AR_OBJECT_WAVE
{0, MipsFalse, SampleFalse}, // AR_OBJECT_RAY_DESC
{0, MipsFalse, SampleFalse}, // AR_OBJECT_ACCELERATION_STRUCT
{0, MipsFalse, SampleFalse}, // AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES
{0, MipsFalse, SampleFalse}, // AR_OBJECT_STATE_OBJECT_CONFIG,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_GLOBAL_ROOT_SIGNATURE,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_LOCAL_ROOT_SIGNATURE,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_SUBOBJECT_TO_EXPORTS_ASSOC,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_RAYTRACING_SHADER_CONFIG,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_RAYTRACING_PIPELINE_CONFIG,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_TRIANGLE_HIT_GROUP,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_PROCEDURAL_PRIMITIVE_HIT_GROUP,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_RAYTRACING_PIPELINE_CONFIG1,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_RAY_QUERY,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_HEAP_RESOURCE,
{0, MipsFalse, SampleFalse}, // AR_OBJECT_HEAP_SAMPLER,
{2, MipsFalse, SampleTrue}, // AR_OBJECT_RWTEXTURE2DMS (RWTexture2DMS)
{3, MipsFalse,
SampleTrue}, // AR_OBJECT_RWTEXTURE2DMS_ARRAY (RWTexture2DMSArray)
// WorkGraphs
{0, MipsFalse, SampleFalse}, // AR_OBJECT_EMPTY_NODE_INPUT
{0, MipsFalse, SampleFalse}, // AR_OBJECT_DISPATCH_NODE_INPUT_RECORD
{0, MipsFalse, SampleFalse}, // AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD
{1, MipsFalse, SampleFalse}, // AR_OBJECT_GROUP_NODE_INPUT_RECORDS
{1, MipsFalse, SampleFalse}, // AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS
{0, MipsFalse, SampleFalse}, // AR_OBJECT_GROUP_NODE_INPUT_RECORD
{0, MipsFalse, SampleFalse}, // AR_OBJECT_RWGROUP_NODE_INPUT_RECORD
{0, MipsFalse, SampleFalse}, // AR_OBJECT_NODE_OUTPUT
{0, MipsFalse, SampleFalse}, // AR_OBJECT_EMPTY_NODE_OUTPUT
{1, MipsFalse, SampleFalse}, // AR_OBJECT_NODE_OUTPUT_ARRAY
{1, MipsFalse, SampleFalse}, // AR_OBJECT_EMPTY_NODE_OUTPUT_ARRAY
{1, MipsFalse, SampleFalse}, // AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS
{1, MipsFalse, SampleFalse}, // AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS
};
C_ASSERT(_countof(g_ArBasicKindsAsTypes) == _countof(g_ArBasicKindsSubscripts));
// Type names for ArBasicKind values.
static const char *g_ArBasicTypeNames[] = {
"bool", "float", "half", "half", "float", "double", "int", "sbyte", "byte",
"short", "ushort", "int", "uint", "long", "ulong", "min10float",
"min16float", "min12int", "min16int", "min16uint", "int8_t4_packed",
"uint8_t4_packed", "enum",
"<count>", "<none>", "<unknown>", "<nocast>", "<dependent>", "<pointer>",
"enum class",
"null", "literal string", "string",
// "texture",
"Texture1D", "Texture1DArray", "Texture2D", "Texture2DArray", "Texture3D",
"TextureCube", "TextureCubeArray", "Texture2DMS", "Texture2DMSArray",
"SamplerState", "sampler1D", "sampler2D", "sampler3D", "samplerCUBE",
"SamplerComparisonState", "Buffer", "RenderTargetView", "DepthStencilView",
"ComputeShader", "DomainShader", "GeometryShader", "HullShader",
"PixelShader", "VertexShader", "pixelfragment", "vertexfragment",
"StateBlock", "Rasterizer", "DepthStencil", "Blend", "PointStream",
"LineStream", "TriangleStream", "InputPatch", "OutputPatch", "RWTexture1D",
"RWTexture1DArray", "RWTexture2D", "RWTexture2DArray", "RWTexture3D",
"RWBuffer", "ByteAddressBuffer", "RWByteAddressBuffer", "StructuredBuffer",
"RWStructuredBuffer", "RWStructuredBuffer(Incrementable)",
"RWStructuredBuffer(Decrementable)", "AppendStructuredBuffer",
"ConsumeStructuredBuffer",
"ConstantBuffer", "TextureBuffer",
"RasterizerOrderedBuffer", "RasterizerOrderedByteAddressBuffer",
"RasterizerOrderedStructuredBuffer", "RasterizerOrderedTexture1D",
"RasterizerOrderedTexture1DArray", "RasterizerOrderedTexture2D",
"RasterizerOrderedTexture2DArray", "RasterizerOrderedTexture3D",
"FeedbackTexture2D", "FeedbackTexture2DArray",
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
"SubpassInput", "SubpassInputMS", "SpirvType", "SpirvOpaqueType",
"integral_constant", "Literal", "ext_type", "ext_result_id",
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
"<internal inner type object>",
"deprecated effect object", "wave_t", "RayDesc",
"RaytracingAccelerationStructure", "user defined type",
"BuiltInTriangleIntersectionAttributes",
// subobjects
"StateObjectConfig", "GlobalRootSignature", "LocalRootSignature",
"SubobjectToExportsAssociation", "RaytracingShaderConfig",
"RaytracingPipelineConfig", "TriangleHitGroup",
"ProceduralPrimitiveHitGroup", "RaytracingPipelineConfig1",
"RayQuery", "HEAP_Resource", "HEAP_Sampler",
"RWTexture2DMS", "RWTexture2DMSArray",
// Workgraphs
"EmptyNodeInput", "DispatchNodeInputRecord", "RWDispatchNodeInputRecord",
"GroupNodeInputRecords", "RWGroupNodeInputRecords", "ThreadNodeInputRecord",
"RWThreadNodeInputRecord",
"NodeOutput", "EmptyNodeOutput", "NodeOutputArray", "EmptyNodeOutputArray",
"ThreadNodeOutputRecords", "GroupNodeOutputRecords"};
C_ASSERT(_countof(g_ArBasicTypeNames) == AR_BASIC_MAXIMUM_COUNT);
static bool IsValidBasicKind(ArBasicKind kind) {
return kind != AR_BASIC_COUNT && kind != AR_BASIC_NONE &&
kind != AR_BASIC_UNKNOWN && kind != AR_BASIC_NOCAST &&
kind != AR_BASIC_POINTER && kind != AR_OBJECT_RENDERTARGETVIEW &&
kind != AR_OBJECT_DEPTHSTENCILVIEW &&
kind != AR_OBJECT_COMPUTESHADER && kind != AR_OBJECT_DOMAINSHADER &&
kind != AR_OBJECT_GEOMETRYSHADER && kind != AR_OBJECT_HULLSHADER &&
kind != AR_OBJECT_PIXELSHADER && kind != AR_OBJECT_VERTEXSHADER &&
kind != AR_OBJECT_PIXELFRAGMENT && kind != AR_OBJECT_VERTEXFRAGMENT;
}
// kind should never be a flag value or effects framework type - we simply do
// not expect to deal with these
#define DXASSERT_VALIDBASICKIND(kind) \
DXASSERT(IsValidBasicKind(kind), "otherwise caller is using a special flag " \
"or an unsupported kind value");
static const char *g_DeprecatedEffectObjectNames[] = {
// These are case insensitive in fxc, but we'll just create two case aliases
// to capture the majority of cases
"texture", "Texture", "pixelshader", "PixelShader", "vertexshader",
"VertexShader",
// These are case sensitive in fxc
"pixelfragment", // 13
"vertexfragment", // 14
"ComputeShader", // 13
"DomainShader", // 12
"GeometryShader", // 14
"HullShader", // 10
"BlendState", // 10
"DepthStencilState", // 17
"DepthStencilView", // 16
"RasterizerState", // 15
"RenderTargetView", // 16
};
static bool IsVariadicIntrinsicFunction(const HLSL_INTRINSIC *fn) {
return fn->pArgs[fn->uNumArgs - 1].uTemplateId == INTRIN_TEMPLATE_VARARGS;
}
static bool IsVariadicArgument(const HLSL_INTRINSIC_ARGUMENT &arg) {
return arg.uTemplateId == INTRIN_TEMPLATE_VARARGS;
}
static hlsl::ParameterModifier
ParamModsFromIntrinsicArg(const HLSL_INTRINSIC_ARGUMENT *pArg) {
UINT64 qwUsage = pArg->qwUsage & AR_QUAL_IN_OUT;
if (qwUsage == AR_QUAL_IN_OUT) {
return hlsl::ParameterModifier(hlsl::ParameterModifier::Kind::InOut);
}
if (qwUsage == AR_QUAL_OUT) {
return hlsl::ParameterModifier(hlsl::ParameterModifier::Kind::Out);
}
if (pArg->qwUsage == AR_QUAL_REF)
return hlsl::ParameterModifier(hlsl::ParameterModifier::Kind::Ref);
DXASSERT(qwUsage & AR_QUAL_IN, "else usage is incorrect");
return hlsl::ParameterModifier(hlsl::ParameterModifier::Kind::In);
}
static void InitParamMods(const HLSL_INTRINSIC *pIntrinsic,
SmallVectorImpl<hlsl::ParameterModifier> ¶mMods,
size_t VariadicArgumentCount = 0u) {
// The first argument is the return value, which isn't included.
for (unsigned i = 1; i < pIntrinsic->uNumArgs; ++i) {
// Once we reach varargs we can break out of this loop.
if (IsVariadicArgument(pIntrinsic->pArgs[i]))
break;
paramMods.push_back(ParamModsFromIntrinsicArg(&pIntrinsic->pArgs[i]));
}
// For variadic functions, any argument not explicitly specified will be
// considered an input argument.
for (unsigned i = 0; i < VariadicArgumentCount; ++i) {
paramMods.push_back(
hlsl::ParameterModifier(hlsl::ParameterModifier::Kind::In));
}
}
static bool IsBuiltinTable(StringRef tableName) {
return tableName.compare(kBuiltinIntrinsicTableName) == 0;
}
static bool HasUnsignedOpcode(LPCSTR tableName, IntrinsicOp opcode) {
return IsBuiltinTable(tableName) && HasUnsignedIntrinsicOpcode(opcode);
}
static void AddHLSLIntrinsicAttr(FunctionDecl *FD, ASTContext &context,
LPCSTR tableName, LPCSTR lowering,
const HLSL_INTRINSIC *pIntrinsic) {
unsigned opcode = pIntrinsic->Op;
if (HasUnsignedOpcode(tableName, static_cast<IntrinsicOp>(opcode))) {
QualType Ty = FD->getReturnType();
if (pIntrinsic->iOverloadParamIndex != -1) {
const FunctionProtoType *FT =
FD->getFunctionType()->getAs<FunctionProtoType>();
Ty = FT->getParamType(pIntrinsic->iOverloadParamIndex);
// To go thru reference type.
if (Ty->isReferenceType())
Ty = Ty.getNonReferenceType();
}
// TODO: refine the code for getting element type
if (const ExtVectorType *VecTy =
hlsl::ConvertHLSLVecMatTypeToExtVectorType(context, Ty)) {
Ty = VecTy->getElementType();
}
// Make sure to use unsigned op when return type is 'unsigned' matrix
bool isUnsignedMatOp =
IsHLSLMatType(Ty) && GetHLSLMatElementType(Ty)->isUnsignedIntegerType();
if (Ty->isUnsignedIntegerType() || isUnsignedMatOp) {
opcode = hlsl::GetUnsignedOpcode(opcode);
}
}
FD->addAttr(
HLSLIntrinsicAttr::CreateImplicit(context, tableName, lowering, opcode));
if (pIntrinsic->bReadNone)
FD->addAttr(ConstAttr::CreateImplicit(context));
if (pIntrinsic->bReadOnly)
FD->addAttr(PureAttr::CreateImplicit(context));
if (pIntrinsic->bIsWave)
FD->addAttr(HLSLWaveSensitiveAttr::CreateImplicit(context));
}
static FunctionDecl *
AddHLSLIntrinsicFunction(ASTContext &context, NamespaceDecl *NS,
LPCSTR tableName, LPCSTR lowering,
const HLSL_INTRINSIC *pIntrinsic,
std::vector<QualType> *functionArgQualTypesVector) {
DeclContext *currentDeclContext = context.getTranslationUnitDecl();
std::vector<QualType> &functionArgQualTypes = *functionArgQualTypesVector;
const size_t functionArgTypeCount = functionArgQualTypes.size();
const bool isVariadic = IsVariadicIntrinsicFunction(pIntrinsic);
// For variadic functions, the number of arguments is larger than the
// function declaration signature.
const size_t VariadicArgumentCount =
isVariadic ? (functionArgTypeCount - (pIntrinsic->uNumArgs - 1)) : 0;
DXASSERT(isVariadic || functionArgTypeCount - 1 <= g_MaxIntrinsicParamCount,
"otherwise g_MaxIntrinsicParamCount should be larger");
SmallVector<hlsl::ParameterModifier, g_MaxIntrinsicParamCount> paramMods;
InitParamMods(pIntrinsic, paramMods, VariadicArgumentCount);
for (size_t i = 1; i < functionArgTypeCount; i++) {
// Change out/inout param to reference type.
if (paramMods[i - 1].isAnyOut() ||
paramMods[i - 1].GetKind() == hlsl::ParameterModifier::Kind::Ref) {
QualType Ty = functionArgQualTypes[i];
// Aggregate type will be indirect param convert to pointer type.
// Don't need add reference for it.
if ((!Ty->isArrayType() && !Ty->isRecordType()) ||
hlsl::IsHLSLVecMatType(Ty)) {
functionArgQualTypes[i] = context.getLValueReferenceType(Ty);
}
}
}
IdentifierInfo &functionId = context.Idents.get(
StringRef(pIntrinsic->pArgs[0].pName), tok::TokenKind::identifier);
DeclarationName functionName(&functionId);
auto protoInfo = clang::FunctionProtoType::ExtProtoInfo();
protoInfo.Variadic = isVariadic;
// functionArgQualTypes first element is the function return type, and
// function argument types start at index 1.
const QualType fnReturnType = functionArgQualTypes[0];
std::vector<QualType> fnArgTypes(functionArgQualTypes.begin() + 1,
functionArgQualTypes.end());
QualType functionType =
context.getFunctionType(fnReturnType, fnArgTypes, protoInfo, paramMods);
FunctionDecl *functionDecl = FunctionDecl::Create(
context, currentDeclContext, NoLoc,
DeclarationNameInfo(functionName, NoLoc), functionType, nullptr,
StorageClass::SC_Extern, InlineSpecifiedFalse, HasWrittenPrototypeTrue);
currentDeclContext->addDecl(functionDecl);
functionDecl->setLexicalDeclContext(currentDeclContext);
// put under hlsl namespace
functionDecl->setDeclContext(NS);
// Add intrinsic attribute
AddHLSLIntrinsicAttr(functionDecl, context, tableName, lowering, pIntrinsic);
llvm::SmallVector<ParmVarDecl *, 4> paramDecls;
for (size_t i = 1; i < functionArgTypeCount; i++) {
// For variadic functions all non-explicit arguments will have the same
// name: "..."
std::string name = i < pIntrinsic->uNumArgs - 1
? pIntrinsic->pArgs[i].pName
: pIntrinsic->pArgs[pIntrinsic->uNumArgs - 1].pName;
IdentifierInfo ¶meterId =
context.Idents.get(name, tok::TokenKind::identifier);
ParmVarDecl *paramDecl =
ParmVarDecl::Create(context, functionDecl, NoLoc, NoLoc, ¶meterId,
functionArgQualTypes[i], nullptr,
StorageClass::SC_None, nullptr, paramMods[i - 1]);
functionDecl->addDecl(paramDecl);
paramDecls.push_back(paramDecl);
}
functionDecl->setParams(paramDecls);
functionDecl->setImplicit(true);
return functionDecl;
}
/// <summary>
/// Checks whether the specified expression is a (possibly parenthesized) comma
/// operator.
/// </summary>
static bool IsExpressionBinaryComma(const Expr *expr) {
DXASSERT_NOMSG(expr != nullptr);
expr = expr->IgnoreParens();
return expr->getStmtClass() == Expr::StmtClass::BinaryOperatorClass &&
cast<BinaryOperator>(expr)->getOpcode() ==
BinaryOperatorKind::BO_Comma;
}
/// <summary>
/// Silences diagnostics for the initialization sequence, typically because they
/// have already been emitted.
/// </summary>
static void SilenceSequenceDiagnostics(InitializationSequence *initSequence) {
DXASSERT_NOMSG(initSequence != nullptr);
initSequence->SetFailed(InitializationSequence::FK_ListInitializationFailed);
}
class UsedIntrinsic {
public:
static int compareArgs(const QualType &LHS, const QualType &RHS) {
// The canonical representations are unique'd in an ASTContext, and so these
// should be stable.
return RHS.getTypePtr() - LHS.getTypePtr();
}
static int compareIntrinsic(const HLSL_INTRINSIC *LHS,
const HLSL_INTRINSIC *RHS) {
// The intrinsics are defined in a single static table, and so should be
// stable.
return RHS - LHS;
}
int compare(const UsedIntrinsic &other) const {
// Check whether it's the same instance.
if (this == &other)
return 0;
int result = compareIntrinsic(m_intrinsicSource, other.m_intrinsicSource);
if (result != 0)
return result;
// At this point, it's the exact same intrinsic name.
// Compare the arguments for ordering then.
DXASSERT(IsVariadicIntrinsicFunction(m_intrinsicSource) ||
m_args.size() == other.m_args.size(),
"only variadic intrinsics can be overloaded on argument count");
// For variadic functions with different number of args, order by number of
// arguments.
if (m_args.size() != other.m_args.size())
return m_args.size() - other.m_args.size();
for (size_t i = 0; i < m_args.size(); i++) {
int argComparison = compareArgs(m_args[i], other.m_args[i]);
if (argComparison != 0)
return argComparison;
}
// Exactly the same.
return 0;
}
public:
UsedIntrinsic(const HLSL_INTRINSIC *intrinsicSource,
llvm::ArrayRef<QualType> args)
: m_args(args.begin(), args.end()), m_intrinsicSource(intrinsicSource),
m_functionDecl(nullptr) {}
void setFunctionDecl(FunctionDecl *value) const {
DXASSERT(value != nullptr, "no reason to clear this out");
DXASSERT(m_functionDecl == nullptr,
"otherwise cached value is being invaldiated");
m_functionDecl = value;
}
FunctionDecl *getFunctionDecl() const { return m_functionDecl; }
bool operator==(const UsedIntrinsic &other) const {
return compare(other) == 0;
}
bool operator<(const UsedIntrinsic &other) const {
return compare(other) < 0;
}
private:
std::vector<QualType> m_args;
const HLSL_INTRINSIC *m_intrinsicSource;
mutable FunctionDecl *m_functionDecl;
};
template <typename T> inline void AssignOpt(T value, T *ptr) {
if (ptr != nullptr) {
*ptr = value;
}
}
static bool CombineBasicTypes(ArBasicKind LeftKind, ArBasicKind RightKind,
ArBasicKind *pOutKind) {
// Make sure the kinds are both valid
if ((LeftKind < 0 || LeftKind >= AR_BASIC_MAXIMUM_COUNT) ||
(RightKind < 0 || RightKind >= AR_BASIC_MAXIMUM_COUNT)) {
return false;
}
// If kinds match perfectly, succeed without requiring they be basic
if (LeftKind == RightKind) {
*pOutKind = LeftKind;
return true;
}
// More complicated combination requires that the kinds be basic
if (LeftKind >= AR_BASIC_COUNT || RightKind >= AR_BASIC_COUNT) {
return false;
}
UINT uLeftProps = GetBasicKindProps(LeftKind);
UINT uRightProps = GetBasicKindProps(RightKind);
UINT uBits = GET_BPROP_BITS(uLeftProps) > GET_BPROP_BITS(uRightProps)
? GET_BPROP_BITS(uLeftProps)
: GET_BPROP_BITS(uRightProps);
UINT uBothFlags = uLeftProps & uRightProps;
UINT uEitherFlags = uLeftProps | uRightProps;
// Notes: all numeric types have either BPROP_FLOATING or BPROP_INTEGER (even
// bool)
// unsigned only applies to non-literal ints, not bool or enum
// literals, bool, and enum are all BPROP_BITS0
if (uBothFlags & BPROP_BOOLEAN) {
*pOutKind = AR_BASIC_BOOL;
return true;
}
bool bFloatResult = 0 != (uEitherFlags & BPROP_FLOATING);
if (uBothFlags & BPROP_LITERAL) {
*pOutKind = bFloatResult ? AR_BASIC_LITERAL_FLOAT : AR_BASIC_LITERAL_INT;
return true;
}
// Starting approximation of result properties:
// - float if either are float, otherwise int (see Notes above)
// - min/partial precision if both have same flag
// - if not float, add unsigned if either is unsigned
UINT uResultFlags = (uBothFlags & (BPROP_INTEGER | BPROP_MIN_PRECISION |
BPROP_PARTIAL_PRECISION)) |
(uEitherFlags & BPROP_FLOATING) |
(!bFloatResult ? (uEitherFlags & BPROP_UNSIGNED) : 0);
// If one is literal/bool/enum, use min/partial precision from the other
if (uEitherFlags & (BPROP_LITERAL | BPROP_BOOLEAN | BPROP_ENUM)) {
uResultFlags |=
uEitherFlags & (BPROP_MIN_PRECISION | BPROP_PARTIAL_PRECISION);
}
// Now if we have partial precision, we know the result must be half
if (uResultFlags & BPROP_PARTIAL_PRECISION) {
*pOutKind = AR_BASIC_FLOAT32_PARTIAL_PRECISION;
return true;
}
// uBits are already initialized to max of either side, so now:
// if only one is float, get result props from float side
// min16float + int -> min16float
// also take min precision from that side
if (bFloatResult && 0 == (uBothFlags & BPROP_FLOATING)) {
uResultFlags = (uLeftProps & BPROP_FLOATING) ? uLeftProps : uRightProps;
uBits = GET_BPROP_BITS(uResultFlags);
uResultFlags &= ~BPROP_LITERAL;
}
bool bMinPrecisionResult = uResultFlags & BPROP_MIN_PRECISION;
// if uBits is 0 here, upgrade to 32-bits
// this happens if bool, literal or enum on both sides,
// or if float came from literal side
if (uBits == BPROP_BITS0)
uBits = BPROP_BITS32;
DXASSERT(uBits != BPROP_BITS0,
"CombineBasicTypes: uBits should not be zero at this point");
DXASSERT(uBits != BPROP_BITS8,
"CombineBasicTypes: 8-bit types not supported at this time");
if (bMinPrecisionResult) {
DXASSERT(
uBits < BPROP_BITS32,
"CombineBasicTypes: min-precision result must be less than 32-bits");
} else {
DXASSERT(uBits > BPROP_BITS12,
"CombineBasicTypes: 10 or 12 bit result must be min precision");
}
if (bFloatResult) {
DXASSERT(uBits != BPROP_BITS12,
"CombineBasicTypes: 12-bit result must be int");
} else {
DXASSERT(uBits != BPROP_BITS10,
"CombineBasicTypes: 10-bit result must be float");
}
if (uBits == BPROP_BITS12) {
DXASSERT(!(uResultFlags & BPROP_UNSIGNED),
"CombineBasicTypes: 12-bit result must not be unsigned");
}
if (bFloatResult) {
switch (uBits) {
case BPROP_BITS10:
*pOutKind = AR_BASIC_MIN10FLOAT;
break;
case BPROP_BITS16:
*pOutKind = bMinPrecisionResult ? AR_BASIC_MIN16FLOAT : AR_BASIC_FLOAT16;
break;
case BPROP_BITS32:
*pOutKind = AR_BASIC_FLOAT32;
break;
case BPROP_BITS64:
*pOutKind = AR_BASIC_FLOAT64;
break;
default:
DXASSERT(false, "Unexpected bit count for float result");
break;
}
} else {
// int or unsigned int
switch (uBits) {
case BPROP_BITS12:
*pOutKind = AR_BASIC_MIN12INT;
break;
case BPROP_BITS16:
if (uResultFlags & BPROP_UNSIGNED)
*pOutKind = bMinPrecisionResult ? AR_BASIC_MIN16UINT : AR_BASIC_UINT16;
else
*pOutKind = bMinPrecisionResult ? AR_BASIC_MIN16INT : AR_BASIC_INT16;
break;
case BPROP_BITS32:
*pOutKind =
(uResultFlags & BPROP_UNSIGNED) ? AR_BASIC_UINT32 : AR_BASIC_INT32;
break;
case BPROP_BITS64:
*pOutKind =
(uResultFlags & BPROP_UNSIGNED) ? AR_BASIC_UINT64 : AR_BASIC_INT64;
break;
default:
DXASSERT(false, "Unexpected bit count for int result");
break;
}
}
return true;
}
class UsedIntrinsicStore : public std::set<UsedIntrinsic> {};
static void GetIntrinsicMethods(ArBasicKind kind,
const HLSL_INTRINSIC **intrinsics,
size_t *intrinsicCount) {
DXASSERT_NOMSG(intrinsics != nullptr);
DXASSERT_NOMSG(intrinsicCount != nullptr);
switch (kind) {
case AR_OBJECT_TRIANGLESTREAM:
case AR_OBJECT_POINTSTREAM:
case AR_OBJECT_LINESTREAM:
*intrinsics = g_StreamMethods;
*intrinsicCount = _countof(g_StreamMethods);
break;
case AR_OBJECT_TEXTURE1D:
*intrinsics = g_Texture1DMethods;
*intrinsicCount = _countof(g_Texture1DMethods);
break;
case AR_OBJECT_TEXTURE1D_ARRAY:
*intrinsics = g_Texture1DArrayMethods;
*intrinsicCount = _countof(g_Texture1DArrayMethods);
break;
case AR_OBJECT_TEXTURE2D:
*intrinsics = g_Texture2DMethods;
*intrinsicCount = _countof(g_Texture2DMethods);
break;
case AR_OBJECT_TEXTURE2DMS:
*intrinsics = g_Texture2DMSMethods;
*intrinsicCount = _countof(g_Texture2DMSMethods);
break;
case AR_OBJECT_TEXTURE2D_ARRAY:
*intrinsics = g_Texture2DArrayMethods;
*intrinsicCount = _countof(g_Texture2DArrayMethods);
break;
case AR_OBJECT_TEXTURE2DMS_ARRAY:
*intrinsics = g_Texture2DArrayMSMethods;
*intrinsicCount = _countof(g_Texture2DArrayMSMethods);
break;
case AR_OBJECT_TEXTURE3D:
*intrinsics = g_Texture3DMethods;
*intrinsicCount = _countof(g_Texture3DMethods);
break;
case AR_OBJECT_TEXTURECUBE:
*intrinsics = g_TextureCUBEMethods;
*intrinsicCount = _countof(g_TextureCUBEMethods);
break;
case AR_OBJECT_TEXTURECUBE_ARRAY:
*intrinsics = g_TextureCUBEArrayMethods;
*intrinsicCount = _countof(g_TextureCUBEArrayMethods);
break;
case AR_OBJECT_BUFFER:
*intrinsics = g_BufferMethods;
*intrinsicCount = _countof(g_BufferMethods);
break;
case AR_OBJECT_RWTEXTURE1D:
case AR_OBJECT_ROVTEXTURE1D:
*intrinsics = g_RWTexture1DMethods;
*intrinsicCount = _countof(g_RWTexture1DMethods);
break;
case AR_OBJECT_RWTEXTURE1D_ARRAY:
case AR_OBJECT_ROVTEXTURE1D_ARRAY:
*intrinsics = g_RWTexture1DArrayMethods;
*intrinsicCount = _countof(g_RWTexture1DArrayMethods);
break;
case AR_OBJECT_RWTEXTURE2D:
case AR_OBJECT_ROVTEXTURE2D:
*intrinsics = g_RWTexture2DMethods;
*intrinsicCount = _countof(g_RWTexture2DMethods);
break;
case AR_OBJECT_RWTEXTURE2D_ARRAY:
case AR_OBJECT_ROVTEXTURE2D_ARRAY:
*intrinsics = g_RWTexture2DArrayMethods;
*intrinsicCount = _countof(g_RWTexture2DArrayMethods);
break;
case AR_OBJECT_RWTEXTURE3D:
case AR_OBJECT_ROVTEXTURE3D:
*intrinsics = g_RWTexture3DMethods;
*intrinsicCount = _countof(g_RWTexture3DMethods);
break;
case AR_OBJECT_FEEDBACKTEXTURE2D:
*intrinsics = g_FeedbackTexture2DMethods;
*intrinsicCount = _countof(g_FeedbackTexture2DMethods);
break;
case AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY:
*intrinsics = g_FeedbackTexture2DArrayMethods;
*intrinsicCount = _countof(g_FeedbackTexture2DArrayMethods);
break;
case AR_OBJECT_RWBUFFER:
case AR_OBJECT_ROVBUFFER:
*intrinsics = g_RWBufferMethods;
*intrinsicCount = _countof(g_RWBufferMethods);
break;
case AR_OBJECT_BYTEADDRESS_BUFFER:
*intrinsics = g_ByteAddressBufferMethods;
*intrinsicCount = _countof(g_ByteAddressBufferMethods);
break;
case AR_OBJECT_RWBYTEADDRESS_BUFFER:
case AR_OBJECT_ROVBYTEADDRESS_BUFFER:
*intrinsics = g_RWByteAddressBufferMethods;
*intrinsicCount = _countof(g_RWByteAddressBufferMethods);
break;
case AR_OBJECT_STRUCTURED_BUFFER:
*intrinsics = g_StructuredBufferMethods;
*intrinsicCount = _countof(g_StructuredBufferMethods);
break;
case AR_OBJECT_RWSTRUCTURED_BUFFER:
case AR_OBJECT_ROVSTRUCTURED_BUFFER:
*intrinsics = g_RWStructuredBufferMethods;
*intrinsicCount = _countof(g_RWStructuredBufferMethods);
break;
case AR_OBJECT_APPEND_STRUCTURED_BUFFER:
*intrinsics = g_AppendStructuredBufferMethods;
*intrinsicCount = _countof(g_AppendStructuredBufferMethods);
break;
case AR_OBJECT_CONSUME_STRUCTURED_BUFFER:
*intrinsics = g_ConsumeStructuredBufferMethods;
*intrinsicCount = _countof(g_ConsumeStructuredBufferMethods);
break;
case AR_OBJECT_RAY_QUERY:
*intrinsics = g_RayQueryMethods;
*intrinsicCount = _countof(g_RayQueryMethods);
break;
case AR_OBJECT_RWTEXTURE2DMS:
*intrinsics = g_RWTexture2DMSMethods;
*intrinsicCount = _countof(g_RWTexture2DMSMethods);
break;
case AR_OBJECT_RWTEXTURE2DMS_ARRAY:
*intrinsics = g_RWTexture2DMSArrayMethods;
*intrinsicCount = _countof(g_RWTexture2DMSArrayMethods);
break;
case AR_OBJECT_EMPTY_NODE_INPUT:
*intrinsics = g_EmptyNodeInputMethods;
*intrinsicCount = _countof(g_EmptyNodeInputMethods);
break;
case AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD:
*intrinsics = g_RWDispatchNodeInputRecordMethods;
*intrinsicCount = _countof(g_RWDispatchNodeInputRecordMethods);
break;
case AR_OBJECT_GROUP_NODE_INPUT_RECORDS:
case AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS:
*intrinsics = g_GroupNodeInputRecordsMethods;
*intrinsicCount = _countof(g_GroupNodeInputRecordsMethods);
break;
case AR_OBJECT_NODE_OUTPUT:
*intrinsics = g_NodeOutputMethods;
*intrinsicCount = _countof(g_NodeOutputMethods);
break;
case AR_OBJECT_EMPTY_NODE_OUTPUT:
*intrinsics = g_EmptyNodeOutputMethods;
*intrinsicCount = _countof(g_EmptyNodeOutputMethods);
break;
case AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS:
case AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS:
*intrinsics = g_GroupOrThreadNodeOutputRecordsMethods;
*intrinsicCount = _countof(g_GroupOrThreadNodeOutputRecordsMethods);
break;
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
case AR_OBJECT_VK_SUBPASS_INPUT:
*intrinsics = g_VkSubpassInputMethods;
*intrinsicCount = _countof(g_VkSubpassInputMethods);
break;
case AR_OBJECT_VK_SUBPASS_INPUT_MS:
*intrinsics = g_VkSubpassInputMSMethods;
*intrinsicCount = _countof(g_VkSubpassInputMSMethods);
break;
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
default:
*intrinsics = nullptr;
*intrinsicCount = 0;
break;
}
}
static bool IsRowOrColumnVariable(size_t value) {
return IA_SPECIAL_BASE <= value &&
value <= (IA_SPECIAL_BASE + IA_SPECIAL_SLOTS - 1);
}
static bool
DoesComponentTypeAcceptMultipleTypes(LEGAL_INTRINSIC_COMPTYPES value) {
return value == LICOMPTYPE_ANY_INT || // signed or unsigned ints
value == LICOMPTYPE_ANY_INT32 || // signed or unsigned ints
value == LICOMPTYPE_ANY_FLOAT || // float or double
value == LICOMPTYPE_FLOAT_LIKE || // float or min16
value == LICOMPTYPE_FLOAT_DOUBLE || // float or double
value == LICOMPTYPE_NUMERIC || // all sorts of numbers
value == LICOMPTYPE_NUMERIC32 || // all sorts of numbers
value == LICOMPTYPE_NUMERIC32_ONLY || // all sorts of numbers
value == LICOMPTYPE_ANY; // any time
}
static bool DoesComponentTypeAcceptMultipleTypes(BYTE value) {
return DoesComponentTypeAcceptMultipleTypes(
static_cast<LEGAL_INTRINSIC_COMPTYPES>(value));
}
static bool
DoesLegalTemplateAcceptMultipleTypes(LEGAL_INTRINSIC_TEMPLATES value) {
// Note that LITEMPLATE_OBJECT can accept different types, but it
// specifies a single 'layout'. In practice, this information is used
// together with a component type that specifies a single object.
return value == LITEMPLATE_ANY; // Any layout
}
static bool DoesLegalTemplateAcceptMultipleTypes(BYTE value) {
return DoesLegalTemplateAcceptMultipleTypes(
static_cast<LEGAL_INTRINSIC_TEMPLATES>(value));
}
static bool TemplateHasDefaultType(ArBasicKind kind) {
switch (kind) {
case AR_OBJECT_BUFFER:
case AR_OBJECT_TEXTURE1D:
case AR_OBJECT_TEXTURE2D:
case AR_OBJECT_TEXTURE3D:
case AR_OBJECT_TEXTURE1D_ARRAY:
case AR_OBJECT_TEXTURE2D_ARRAY:
case AR_OBJECT_TEXTURECUBE:
case AR_OBJECT_TEXTURECUBE_ARRAY:
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
case AR_OBJECT_VK_SUBPASS_INPUT:
case AR_OBJECT_VK_SUBPASS_INPUT_MS:
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
return true;
default:
// Objects with default types return true. Everything else is false.
return false;
}
}
/// <summary>
/// Use this class to iterate over intrinsic definitions that come from an
/// external source.
/// </summary>
class IntrinsicTableDefIter {
private:
StringRef _typeName;
StringRef _functionName;
llvm::SmallVector<CComPtr<IDxcIntrinsicTable>, 2> &_tables;
const HLSL_INTRINSIC *_tableIntrinsic;
UINT64 _tableLookupCookie;
unsigned _tableIndex;
unsigned _argCount;
bool _firstChecked;
IntrinsicTableDefIter(
llvm::SmallVector<CComPtr<IDxcIntrinsicTable>, 2> &tables,
StringRef typeName, StringRef functionName, unsigned argCount)
: _typeName(typeName), _functionName(functionName), _tables(tables),
_tableIntrinsic(nullptr), _tableLookupCookie(0), _tableIndex(0),
_argCount(argCount), _firstChecked(false) {}
void CheckForIntrinsic() {
if (_tableIndex >= _tables.size()) {
return;
}
_firstChecked = true;
// TODO: review this - this will allocate at least once per string
CA2WEX<> typeName(_typeName.str().c_str());
CA2WEX<> functionName(_functionName.str().c_str());
if (FAILED(_tables[_tableIndex]->LookupIntrinsic(
typeName, functionName, &_tableIntrinsic, &_tableLookupCookie))) {
_tableLookupCookie = 0;
_tableIntrinsic = nullptr;
}
}
void MoveToNext() {
for (;;) {
// If we don't have an intrinsic, try the following table.
if (_firstChecked && _tableIntrinsic == nullptr) {
_tableIndex++;
}
CheckForIntrinsic();
if (_tableIndex == _tables.size() ||
(_tableIntrinsic != nullptr &&
_tableIntrinsic->uNumArgs ==
(_argCount + 1))) // uNumArgs includes return
break;
}
}
public:
static IntrinsicTableDefIter
CreateStart(llvm::SmallVector<CComPtr<IDxcIntrinsicTable>, 2> &tables,
StringRef typeName, StringRef functionName, unsigned argCount) {
IntrinsicTableDefIter result(tables, typeName, functionName, argCount);
return result;
}
static IntrinsicTableDefIter
CreateEnd(llvm::SmallVector<CComPtr<IDxcIntrinsicTable>, 2> &tables) {
IntrinsicTableDefIter result(tables, StringRef(), StringRef(), 0);
result._tableIndex = tables.size();
return result;
}
bool operator!=(const IntrinsicTableDefIter &other) {
if (!_firstChecked) {
MoveToNext();
}
return _tableIndex != other._tableIndex; // More things could be compared
// but we only match end.
}
const HLSL_INTRINSIC *operator*() const {
DXASSERT(_firstChecked, "otherwise deref without comparing to end");
return _tableIntrinsic;
}
LPCSTR GetTableName() const {
LPCSTR tableName = nullptr;
if (FAILED(_tables[_tableIndex]->GetTableName(&tableName))) {
return nullptr;
}
return tableName;
}
LPCSTR GetLoweringStrategy() const {
LPCSTR lowering = nullptr;
if (FAILED(_tables[_tableIndex]->GetLoweringStrategy(_tableIntrinsic->Op,
&lowering))) {
return nullptr;
}
return lowering;
}
IntrinsicTableDefIter &operator++() {
MoveToNext();
return *this;
}
};
/// <summary>
/// Use this class to iterate over intrinsic definitions that have the same name
/// and parameter count.
/// </summary>
class IntrinsicDefIter {
const HLSL_INTRINSIC *_current;
const HLSL_INTRINSIC *_end;
IntrinsicTableDefIter _tableIter;
IntrinsicDefIter(const HLSL_INTRINSIC *value, const HLSL_INTRINSIC *end,
IntrinsicTableDefIter tableIter)
: _current(value), _end(end), _tableIter(tableIter) {}
public:
static IntrinsicDefIter CreateStart(const HLSL_INTRINSIC *table, size_t count,
const HLSL_INTRINSIC *start,
IntrinsicTableDefIter tableIter) {
return IntrinsicDefIter(start, table + count, tableIter);
}
static IntrinsicDefIter CreateEnd(const HLSL_INTRINSIC *table, size_t count,
IntrinsicTableDefIter tableIter) {
return IntrinsicDefIter(table + count, table + count, tableIter);
}
bool operator!=(const IntrinsicDefIter &other) {
return _current != other._current ||
_tableIter.operator!=(other._tableIter);
}
const HLSL_INTRINSIC *operator*() const {
return (_current != _end) ? _current : *_tableIter;
}
LPCSTR GetTableName() const {
return (_current != _end) ? kBuiltinIntrinsicTableName
: _tableIter.GetTableName();
}
LPCSTR GetLoweringStrategy() const {
return (_current != _end) ? "" : _tableIter.GetLoweringStrategy();
}
IntrinsicDefIter &operator++() {
if (_current != _end) {
const HLSL_INTRINSIC *next = _current + 1;
if (next != _end && _current->uNumArgs == next->uNumArgs &&
0 == strcmp(_current->pArgs[0].pName, next->pArgs[0].pName)) {
_current = next;
} else {
_current = _end;
}
} else {
++_tableIter;
}
return *this;
}
};
static void AddHLSLSubscriptAttr(Decl *D, ASTContext &context,
HLSubscriptOpcode opcode) {
StringRef group = GetHLOpcodeGroupName(HLOpcodeGroup::HLSubscript);
D->addAttr(HLSLIntrinsicAttr::CreateImplicit(context, group, "",
static_cast<unsigned>(opcode)));
D->addAttr(HLSLCXXOverloadAttr::CreateImplicit(context));
}
static void
CreateSimpleField(clang::ASTContext &context, CXXRecordDecl *recordDecl,
StringRef Name, QualType Ty,
AccessSpecifier access = AccessSpecifier::AS_public) {
IdentifierInfo &fieldId =
context.Idents.get(Name, tok::TokenKind::identifier);
TypeSourceInfo *filedTypeSource = context.getTrivialTypeSourceInfo(Ty, NoLoc);
const bool MutableFalse = false;
const InClassInitStyle initStyle = InClassInitStyle::ICIS_NoInit;
FieldDecl *fieldDecl =
FieldDecl::Create(context, recordDecl, NoLoc, NoLoc, &fieldId, Ty,
filedTypeSource, nullptr, MutableFalse, initStyle);
fieldDecl->setAccess(access);
fieldDecl->setImplicit(true);
recordDecl->addDecl(fieldDecl);
}
// struct RayDesc
//{
// float3 Origin;
// float TMin;
// float3 Direction;
// float TMax;
//};
static CXXRecordDecl *CreateRayDescStruct(clang::ASTContext &context,
QualType float3Ty) {
DeclContext *currentDeclContext = context.getTranslationUnitDecl();
IdentifierInfo &rayDesc =
context.Idents.get(StringRef("RayDesc"), tok::TokenKind::identifier);
CXXRecordDecl *rayDescDecl = CXXRecordDecl::Create(
context, TagTypeKind::TTK_Struct, currentDeclContext, NoLoc, NoLoc,
&rayDesc, nullptr, DelayTypeCreationTrue);
rayDescDecl->addAttr(
FinalAttr::CreateImplicit(context, FinalAttr::Keyword_final));
rayDescDecl->startDefinition();
QualType floatTy = context.FloatTy;
// float3 Origin;
CreateSimpleField(context, rayDescDecl, "Origin", float3Ty);
// float TMin;
CreateSimpleField(context, rayDescDecl, "TMin", floatTy);
// float3 Direction;
CreateSimpleField(context, rayDescDecl, "Direction", float3Ty);
// float TMax;
CreateSimpleField(context, rayDescDecl, "TMax", floatTy);
rayDescDecl->completeDefinition();
// Both declarations need to be present for correct handling.
currentDeclContext->addDecl(rayDescDecl);
rayDescDecl->setImplicit(true);
return rayDescDecl;
}
// struct BuiltInTriangleIntersectionAttributes
// {
// float2 barycentrics;
// };
static CXXRecordDecl *
AddBuiltInTriangleIntersectionAttributes(ASTContext &context,
QualType baryType) {
DeclContext *curDC = context.getTranslationUnitDecl();
IdentifierInfo &attributesId =
context.Idents.get(StringRef("BuiltInTriangleIntersectionAttributes"),
tok::TokenKind::identifier);
CXXRecordDecl *attributesDecl = CXXRecordDecl::Create(
context, TagTypeKind::TTK_Struct, curDC, NoLoc, NoLoc, &attributesId,
nullptr, DelayTypeCreationTrue);
attributesDecl->addAttr(
FinalAttr::CreateImplicit(context, FinalAttr::Keyword_final));
attributesDecl->startDefinition();
// float2 barycentrics;
CreateSimpleField(context, attributesDecl, "barycentrics", baryType);
attributesDecl->completeDefinition();
attributesDecl->setImplicit(true);
curDC->addDecl(attributesDecl);
return attributesDecl;
}
//
// Subobjects
static CXXRecordDecl *StartSubobjectDecl(ASTContext &context,
const char *name) {
IdentifierInfo &id =
context.Idents.get(StringRef(name), tok::TokenKind::identifier);
CXXRecordDecl *decl = CXXRecordDecl::Create(
context, TagTypeKind::TTK_Struct, context.getTranslationUnitDecl(), NoLoc,
NoLoc, &id, nullptr, DelayTypeCreationTrue);
decl->addAttr(FinalAttr::CreateImplicit(context, FinalAttr::Keyword_final));
decl->startDefinition();
return decl;
}
void FinishSubobjectDecl(ASTContext &context, CXXRecordDecl *decl) {
decl->completeDefinition();
context.getTranslationUnitDecl()->addDecl(decl);
decl->setImplicit(true);
}
// struct StateObjectConfig
// {
// uint32_t Flags;
// };
static CXXRecordDecl *CreateSubobjectStateObjectConfig(ASTContext &context) {
CXXRecordDecl *decl = StartSubobjectDecl(context, "StateObjectConfig");
CreateSimpleField(context, decl, "Flags", context.UnsignedIntTy,
AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
// struct GlobalRootSignature
// {
// string signature;
// };
static CXXRecordDecl *CreateSubobjectRootSignature(ASTContext &context,
bool global) {
CXXRecordDecl *decl = StartSubobjectDecl(
context, global ? "GlobalRootSignature" : "LocalRootSignature");
CreateSimpleField(context, decl, "Data", context.HLSLStringTy,
AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
// struct SubobjectToExportsAssociation
// {
// string Subobject;
// string Exports;
// };
static CXXRecordDecl *
CreateSubobjectSubobjectToExportsAssoc(ASTContext &context) {
CXXRecordDecl *decl =
StartSubobjectDecl(context, "SubobjectToExportsAssociation");
CreateSimpleField(context, decl, "Subobject", context.HLSLStringTy,
AccessSpecifier::AS_private);
CreateSimpleField(context, decl, "Exports", context.HLSLStringTy,
AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
// struct RaytracingShaderConfig
// {
// uint32_t MaxPayloadSizeInBytes;
// uint32_t MaxAttributeSizeInBytes;
// };
static CXXRecordDecl *
CreateSubobjectRaytracingShaderConfig(ASTContext &context) {
CXXRecordDecl *decl = StartSubobjectDecl(context, "RaytracingShaderConfig");
CreateSimpleField(context, decl, "MaxPayloadSizeInBytes",
context.UnsignedIntTy, AccessSpecifier::AS_private);
CreateSimpleField(context, decl, "MaxAttributeSizeInBytes",
context.UnsignedIntTy, AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
// struct RaytracingPipelineConfig
// {
// uint32_t MaxTraceRecursionDepth;
// };
static CXXRecordDecl *
CreateSubobjectRaytracingPipelineConfig(ASTContext &context) {
CXXRecordDecl *decl = StartSubobjectDecl(context, "RaytracingPipelineConfig");
CreateSimpleField(context, decl, "MaxTraceRecursionDepth",
context.UnsignedIntTy, AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
// struct RaytracingPipelineConfig1
// {
// uint32_t MaxTraceRecursionDepth;
// uint32_t Flags;
// };
static CXXRecordDecl *
CreateSubobjectRaytracingPipelineConfig1(ASTContext &context) {
CXXRecordDecl *decl =
StartSubobjectDecl(context, "RaytracingPipelineConfig1");
CreateSimpleField(context, decl, "MaxTraceRecursionDepth",
context.UnsignedIntTy, AccessSpecifier::AS_private);
CreateSimpleField(context, decl, "Flags", context.UnsignedIntTy,
AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
// struct TriangleHitGroup
// {
// string AnyHit;
// string ClosestHit;
// };
static CXXRecordDecl *CreateSubobjectTriangleHitGroup(ASTContext &context) {
CXXRecordDecl *decl = StartSubobjectDecl(context, "TriangleHitGroup");
CreateSimpleField(context, decl, "AnyHit", context.HLSLStringTy,
AccessSpecifier::AS_private);
CreateSimpleField(context, decl, "ClosestHit", context.HLSLStringTy,
AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
// struct ProceduralPrimitiveHitGroup
// {
// string AnyHit;
// string ClosestHit;
// string Intersection;
// };
static CXXRecordDecl *
CreateSubobjectProceduralPrimitiveHitGroup(ASTContext &context) {
CXXRecordDecl *decl =
StartSubobjectDecl(context, "ProceduralPrimitiveHitGroup");
CreateSimpleField(context, decl, "AnyHit", context.HLSLStringTy,
AccessSpecifier::AS_private);
CreateSimpleField(context, decl, "ClosestHit", context.HLSLStringTy,
AccessSpecifier::AS_private);
CreateSimpleField(context, decl, "Intersection", context.HLSLStringTy,
AccessSpecifier::AS_private);
FinishSubobjectDecl(context, decl);
return decl;
}
/// <summary>Creates a Typedef in the specified ASTContext.</summary>
static TypedefDecl *CreateGlobalTypedef(ASTContext *context, const char *ident,
QualType baseType) {
DXASSERT_NOMSG(context != nullptr);
DXASSERT_NOMSG(ident != nullptr);
DXASSERT_NOMSG(!baseType.isNull());
DeclContext *declContext = context->getTranslationUnitDecl();
TypeSourceInfo *typeSource = context->getTrivialTypeSourceInfo(baseType);
TypedefDecl *decl =
TypedefDecl::Create(*context, declContext, NoLoc, NoLoc,
&context->Idents.get(ident), typeSource);
declContext->addDecl(decl);
decl->setImplicit(true);
return decl;
}
class HLSLExternalSource : public ExternalSemaSource {
private:
// Inner types.
struct FindStructBasicTypeResult {
ArBasicKind Kind; // Kind of struct (eg, AR_OBJECT_TEXTURE2D)
unsigned int BasicKindsAsTypeIndex; // Index into g_ArBasicKinds*
FindStructBasicTypeResult(ArBasicKind kind,
unsigned int basicKindAsTypeIndex)
: Kind(kind), BasicKindsAsTypeIndex(basicKindAsTypeIndex) {}
bool Found() const { return Kind != AR_BASIC_UNKNOWN; }
};
// Declaration for matrix and vector templates.
ClassTemplateDecl *m_matrixTemplateDecl;
ClassTemplateDecl *m_vectorTemplateDecl;
ClassTemplateDecl *m_vkIntegralConstantTemplateDecl;
ClassTemplateDecl *m_vkLiteralTemplateDecl;
// Declarations for Work Graph Output Record types
ClassTemplateDecl *m_GroupNodeOutputRecordsTemplateDecl;
ClassTemplateDecl *m_ThreadNodeOutputRecordsTemplateDecl;
// Namespace decl for hlsl intrinsic functions
NamespaceDecl *m_hlslNSDecl;
// Namespace decl for Vulkan-specific intrinsic functions
NamespaceDecl *m_vkNSDecl;
// Context being processed.
ASTContext *m_context;
// Semantic analyzer being processed.
Sema *m_sema;
// Intrinsic tables available externally.
llvm::SmallVector<CComPtr<IDxcIntrinsicTable>, 2> m_intrinsicTables;
// Scalar types indexed by HLSLScalarType.
QualType m_scalarTypes[HLSLScalarTypeCount];
// Scalar types already built.
TypedefDecl *m_scalarTypeDefs[HLSLScalarTypeCount];
// Matrix types already built indexed by type, row-count, col-count. Should
// probably move to a sparse map. Instrument to figure out best initial size.
QualType m_matrixTypes[HLSLScalarTypeCount][4][4];
// Matrix types already built, in shorthand form.
TypedefDecl *m_matrixShorthandTypes[HLSLScalarTypeCount][4][4];
// Vector types already built.
QualType m_vectorTypes[HLSLScalarTypeCount][4];
TypedefDecl *m_vectorTypedefs[HLSLScalarTypeCount][4];
// BuiltinType for each scalar type.
QualType m_baseTypes[HLSLScalarTypeCount];
// String type
QualType m_hlslStringType;
TypedefDecl *m_hlslStringTypedef;
// Built-in object types declarations, indexed by basic kind constant.
CXXRecordDecl *m_objectTypeDecls[_countof(g_ArBasicKindsAsTypes)];
// Map from object decl to the object index.
using ObjectTypeDeclMapType =
std::array<std::pair<CXXRecordDecl *, unsigned>,
_countof(g_ArBasicKindsAsTypes) +
_countof(g_DeprecatedEffectObjectNames)>;
ObjectTypeDeclMapType m_objectTypeDeclsMap;
UsedIntrinsicStore m_usedIntrinsics;
/// <summary>Add all base QualTypes for each hlsl scalar types.</summary>
void AddBaseTypes();
/// <summary>Adds all supporting declarations to reference scalar
/// types.</summary>
void AddHLSLScalarTypes();
/// <summary>Adds string type QualType for HLSL string declarations</summary>
void AddHLSLStringType();
QualType GetTemplateObjectDataType(CXXRecordDecl *recordDecl) {
DXASSERT_NOMSG(recordDecl != nullptr);
TemplateParameterList *parameterList =
recordDecl->getTemplateParameterList(0);
NamedDecl *parameterDecl = parameterList->getParam(0);
DXASSERT(parameterDecl->getKind() == Decl::Kind::TemplateTypeParm,
"otherwise recordDecl isn't one of the built-in objects with "
"templates");
TemplateTypeParmDecl *parmDecl =
dyn_cast<TemplateTypeParmDecl>(parameterDecl);
return QualType(parmDecl->getTypeForDecl(), 0);
}
// Determines whether the given intrinsic parameter type has a single QualType
// mapping.
QualType GetSingleQualTypeForMapping(const HLSL_INTRINSIC *intrinsic,
int index) {
int templateRef = intrinsic->pArgs[index].uTemplateId;
int componentRef = intrinsic->pArgs[index].uComponentTypeId;
const HLSL_INTRINSIC_ARGUMENT *templateArg = &intrinsic->pArgs[templateRef];
const HLSL_INTRINSIC_ARGUMENT *componentArg =
&intrinsic->pArgs[componentRef];
const HLSL_INTRINSIC_ARGUMENT *matrixArg = &intrinsic->pArgs[index];
if (templateRef >= 0 && templateArg->uTemplateId == templateRef &&
!DoesLegalTemplateAcceptMultipleTypes(templateArg->uLegalTemplates) &&
componentRef >= 0 && componentRef != INTRIN_COMPTYPE_FROM_TYPE_ELT0 &&
componentRef != INTRIN_COMPTYPE_FROM_NODEOUTPUT &&
componentArg->uComponentTypeId == 0 &&
!DoesComponentTypeAcceptMultipleTypes(
componentArg->uLegalComponentTypes) &&
!IsRowOrColumnVariable(matrixArg->uCols) &&
!IsRowOrColumnVariable(matrixArg->uRows)) {
ArTypeObjectKind templateKind =
g_LegalIntrinsicTemplates[templateArg->uLegalTemplates][0];
ArBasicKind elementKind =
g_LegalIntrinsicCompTypes[componentArg->uLegalComponentTypes][0];
return NewSimpleAggregateType(templateKind, elementKind, 0,
matrixArg->uRows, matrixArg->uCols);
}
return QualType();
}
// Adds a new template parameter declaration to the specified array and
// returns the type for the parameter.
QualType AddTemplateParamToArray(
const char *name, CXXRecordDecl *recordDecl, int templateDepth,
NamedDecl *(&templateParamNamedDecls)[g_MaxIntrinsicParamCount + 1],
size_t *templateParamNamedDeclsCount) {
DXASSERT_NOMSG(name != nullptr);
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(templateParamNamedDecls != nullptr);
DXASSERT_NOMSG(templateParamNamedDeclsCount != nullptr);
DXASSERT(*templateParamNamedDeclsCount < _countof(templateParamNamedDecls),
"otherwise constants should be updated");
assert(*templateParamNamedDeclsCount < _countof(templateParamNamedDecls));
// Create the declaration for the template parameter.
IdentifierInfo *id = &m_context->Idents.get(StringRef(name));
TemplateTypeParmDecl *templateTypeParmDecl = TemplateTypeParmDecl::Create(
*m_context, recordDecl, NoLoc, NoLoc, templateDepth,
*templateParamNamedDeclsCount, id, TypenameTrue, ParameterPackFalse);
templateParamNamedDecls[*templateParamNamedDeclsCount] =
templateTypeParmDecl;
// Create the type that the parameter represents.
QualType result = m_context->getTemplateTypeParmType(
templateDepth, *templateParamNamedDeclsCount, ParameterPackFalse,
templateTypeParmDecl);
// Increment the declaration count for the array; as long as caller passes
// in both arguments, it need not concern itself with maintaining this
// value.
(*templateParamNamedDeclsCount)++;
return result;
}
// Adds a function specified by the given intrinsic to a record declaration.
// The template depth will be zero for records that don't have a "template<>"
// line even if conceptual; or one if it does have one.
void AddObjectIntrinsicTemplate(CXXRecordDecl *recordDecl, int templateDepth,
const HLSL_INTRINSIC *intrinsic) {
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(intrinsic != nullptr);
DXASSERT(intrinsic->uNumArgs > 0,
"otherwise there isn't even an intrinsic name");
DXASSERT(intrinsic->uNumArgs <= (g_MaxIntrinsicParamCount + 1),
"otherwise g_MaxIntrinsicParamCount should be updated");
// uNumArgs includes the result type, g_MaxIntrinsicParamCount doesn't, thus
// the +1.
assert(intrinsic->uNumArgs <= (g_MaxIntrinsicParamCount + 1));
// TODO: implement template parameter constraints for HLSL intrinsic methods
// in declarations
//
// Build template parameters, parameter types, and the return type.
// Parameter declarations are built after the function is created, to use it
// as their scope.
//
unsigned int numParams = intrinsic->uNumArgs - 1;
NamedDecl *templateParamNamedDecls[g_MaxIntrinsicParamCount + 1];
size_t templateParamNamedDeclsCount = 0;
QualType argsQTs[g_MaxIntrinsicParamCount];
StringRef argNames[g_MaxIntrinsicParamCount];
QualType functionResultQT = recordDecl->getASTContext().VoidTy;
DXASSERT(_countof(templateParamNamedDecls) >= numParams + 1,
"need enough templates for all parameters and the return type, "
"otherwise constants need updating");
// Handle the return type.
// functionResultQT = GetSingleQualTypeForMapping(intrinsic, 0);
// if (functionResultQT.isNull()) {
// Workaround for template parameter argument count mismatch.
// Create template parameter for return type always
// TODO: reenable the check and skip template argument.
functionResultQT = AddTemplateParamToArray(
"TResult", recordDecl, templateDepth, templateParamNamedDecls,
&templateParamNamedDeclsCount);
// }
SmallVector<hlsl::ParameterModifier, g_MaxIntrinsicParamCount> paramMods;
InitParamMods(intrinsic, paramMods);
// Consider adding more cases where return type can be handled a priori.
// Ultimately #260431 should do significantly better.
// Handle parameters.
for (unsigned int i = 1; i < intrinsic->uNumArgs; i++) {
//
// GetSingleQualTypeForMapping can be used here to remove unnecessary
// template arguments.
//
// However this may produce template instantiations with equivalent
// template arguments for overloaded methods. It's possible to resolve
// some of these by generating specializations, but the current intrinsic
// table has rules that are hard to process in their current form to find
// all cases.
//
char name[g_MaxIntrinsicParamName + 2];
name[0] = 'T';
name[1] = '\0';
strcat_s(name, intrinsic->pArgs[i].pName);
argsQTs[i - 1] = AddTemplateParamToArray(name, recordDecl, templateDepth,
templateParamNamedDecls,
&templateParamNamedDeclsCount);
// Change out/inout param to reference type.
if (paramMods[i - 1].isAnyOut())
argsQTs[i - 1] = m_context->getLValueReferenceType(argsQTs[i - 1]);
argNames[i - 1] = StringRef(intrinsic->pArgs[i].pName);
}
// Create the declaration.
IdentifierInfo *ii =
&m_context->Idents.get(StringRef(intrinsic->pArgs[0].pName));
DeclarationName declarationName = DeclarationName(ii);
CXXMethodDecl *functionDecl = CreateObjectFunctionDeclarationWithParams(
*m_context, recordDecl, functionResultQT,
ArrayRef<QualType>(argsQTs, numParams),
ArrayRef<StringRef>(argNames, numParams), declarationName, true,
templateParamNamedDeclsCount > 0);
functionDecl->setImplicit(true);
// If the function is a template function, create the declaration and
// cross-reference.
if (templateParamNamedDeclsCount > 0) {
hlsl::CreateFunctionTemplateDecl(*m_context, recordDecl, functionDecl,
templateParamNamedDecls,
templateParamNamedDeclsCount);
}
}
// Checks whether the two specified intrinsics generate equivalent templates.
// For example: foo (any_int) and foo (any_float) are only unambiguous in the
// context of HLSL intrinsic rules, and their difference can't be expressed
// with C++ templates.
bool AreIntrinsicTemplatesEquivalent(const HLSL_INTRINSIC *left,
const HLSL_INTRINSIC *right) {
if (left == right) {
return true;
}
if (left == nullptr || right == nullptr) {
return false;
}
return (left->uNumArgs == right->uNumArgs &&
0 == strcmp(left->pArgs[0].pName, right->pArgs[0].pName));
}
// Adds all the intrinsic methods that correspond to the specified type.
void AddObjectMethods(ArBasicKind kind, CXXRecordDecl *recordDecl,
int templateDepth) {
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(templateDepth >= 0);
const HLSL_INTRINSIC *intrinsics;
const HLSL_INTRINSIC *prior = nullptr;
size_t intrinsicCount;
GetIntrinsicMethods(kind, &intrinsics, &intrinsicCount);
DXASSERT((intrinsics == nullptr) == (intrinsicCount == 0),
"intrinsic table pointer must match count (null for zero, "
"something valid otherwise");
while (intrinsicCount--) {
if (!AreIntrinsicTemplatesEquivalent(intrinsics, prior)) {
AddObjectIntrinsicTemplate(recordDecl, templateDepth, intrinsics);
prior = intrinsics;
}
intrinsics++;
}
}
void AddDoubleSubscriptSupport(
ClassTemplateDecl *typeDecl, CXXRecordDecl *recordDecl,
const char *memberName, QualType elementType,
TemplateTypeParmDecl *templateTypeParmDecl, const char *type0Name,
const char *type1Name, const char *indexer0Name, QualType indexer0Type,
const char *indexer1Name, QualType indexer1Type) {
DXASSERT_NOMSG(typeDecl != nullptr);
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(memberName != nullptr);
DXASSERT_NOMSG(!elementType.isNull());
DXASSERT_NOMSG(templateTypeParmDecl != nullptr);
DXASSERT_NOMSG(type0Name != nullptr);
DXASSERT_NOMSG(type1Name != nullptr);
DXASSERT_NOMSG(indexer0Name != nullptr);
DXASSERT_NOMSG(!indexer0Type.isNull());
DXASSERT_NOMSG(indexer1Name != nullptr);
DXASSERT_NOMSG(!indexer1Type.isNull());
//
// Add inner types to the templates to represent the following C++ code
// inside the class. public:
// class sample_slice_type
// {
// public: TElement operator[](uint3 index);
// };
// class sample_type
// {
// public: sample_slice_type operator[](uint slice);
// };
// sample_type sample;
//
// Variable names reflect this structure, but this code will also produce
// the types for .mips access.
//
const bool MutableTrue = true;
DeclarationName subscriptName =
m_context->DeclarationNames.getCXXOperatorName(OO_Subscript);
CXXRecordDecl *sampleSliceTypeDecl =
CXXRecordDecl::Create(*m_context, TTK_Class, recordDecl, NoLoc, NoLoc,
&m_context->Idents.get(StringRef(type1Name)));
sampleSliceTypeDecl->setAccess(AS_public);
sampleSliceTypeDecl->setImplicit();
recordDecl->addDecl(sampleSliceTypeDecl);
sampleSliceTypeDecl->startDefinition();
const bool MutableFalse = false;
FieldDecl *sliceHandleDecl = FieldDecl::Create(
*m_context, sampleSliceTypeDecl, NoLoc, NoLoc,
&m_context->Idents.get(StringRef("handle")), indexer0Type,
m_context->CreateTypeSourceInfo(indexer0Type), nullptr, MutableFalse,
ICIS_NoInit);
sliceHandleDecl->setAccess(AS_private);
sampleSliceTypeDecl->addDecl(sliceHandleDecl);
CXXMethodDecl *sampleSliceSubscriptDecl =
CreateObjectFunctionDeclarationWithParams(
*m_context, sampleSliceTypeDecl, elementType,
ArrayRef<QualType>(indexer1Type),
ArrayRef<StringRef>(StringRef(indexer1Name)), subscriptName, true);
hlsl::CreateFunctionTemplateDecl(
*m_context, sampleSliceTypeDecl, sampleSliceSubscriptDecl,
reinterpret_cast<NamedDecl **>(&templateTypeParmDecl), 1);
sampleSliceTypeDecl->completeDefinition();
CXXRecordDecl *sampleTypeDecl =
CXXRecordDecl::Create(*m_context, TTK_Class, recordDecl, NoLoc, NoLoc,
&m_context->Idents.get(StringRef(type0Name)));
sampleTypeDecl->setAccess(AS_public);
recordDecl->addDecl(sampleTypeDecl);
sampleTypeDecl->startDefinition();
sampleTypeDecl->setImplicit();
FieldDecl *sampleHandleDecl = FieldDecl::Create(
*m_context, sampleTypeDecl, NoLoc, NoLoc,
&m_context->Idents.get(StringRef("handle")), indexer0Type,
m_context->CreateTypeSourceInfo(indexer0Type), nullptr, MutableFalse,
ICIS_NoInit);
sampleHandleDecl->setAccess(AS_private);
sampleTypeDecl->addDecl(sampleHandleDecl);
QualType sampleSliceType = m_context->getRecordType(sampleSliceTypeDecl);
CXXMethodDecl *sampleSubscriptDecl =
CreateObjectFunctionDeclarationWithParams(
*m_context, sampleTypeDecl,
m_context->getLValueReferenceType(sampleSliceType),
ArrayRef<QualType>(indexer0Type),
ArrayRef<StringRef>(StringRef(indexer0Name)), subscriptName, true);
sampleTypeDecl->completeDefinition();
// Add subscript attribute
AddHLSLSubscriptAttr(sampleSubscriptDecl, *m_context,
HLSubscriptOpcode::DoubleSubscript);
QualType sampleTypeQT = m_context->getRecordType(sampleTypeDecl);
FieldDecl *sampleFieldDecl = FieldDecl::Create(
*m_context, recordDecl, NoLoc, NoLoc,
&m_context->Idents.get(StringRef(memberName)), sampleTypeQT,
m_context->CreateTypeSourceInfo(sampleTypeQT), nullptr, MutableTrue,
ICIS_NoInit);
sampleFieldDecl->setAccess(AS_public);
recordDecl->addDecl(sampleFieldDecl);
}
void AddObjectSubscripts(ArBasicKind kind, ClassTemplateDecl *typeDecl,
CXXRecordDecl *recordDecl,
SubscriptOperatorRecord op) {
DXASSERT_NOMSG(typeDecl != nullptr);
DXASSERT_NOMSG(recordDecl != nullptr);
DXASSERT_NOMSG(0 <= op.SubscriptCardinality &&
op.SubscriptCardinality <= 3);
DXASSERT(op.SubscriptCardinality > 0 ||
(op.HasMips == false && op.HasSample == false),
"objects that have .mips or .sample member also have a plain "
"subscript defined (otherwise static table is "
"likely incorrect, and this function won't know the cardinality "
"of the position parameter");
bool isReadWrite = GetBasicKindProps(kind) & BPROP_RWBUFFER;
DXASSERT(!isReadWrite || (op.HasMips == false),
"read/write objects don't have .mips members");
// Return early if there is no work to be done.
if (op.SubscriptCardinality == 0) {
return;
}
const unsigned int templateDepth = 1;
// Add an operator[].
TemplateTypeParmDecl *templateTypeParmDecl = cast<TemplateTypeParmDecl>(
typeDecl->getTemplateParameters()->getParam(0));
QualType resultType = m_context->getTemplateTypeParmType(
templateDepth, 0, ParameterPackFalse, templateTypeParmDecl);
if (!isReadWrite)
resultType = m_context->getConstType(resultType);
resultType = m_context->getLValueReferenceType(resultType);
QualType indexType =
op.SubscriptCardinality == 1
? m_context->UnsignedIntTy
: NewSimpleAggregateType(AR_TOBJ_VECTOR, AR_BASIC_UINT32, 0, 1,
op.SubscriptCardinality);
CXXMethodDecl *functionDecl = CreateObjectFunctionDeclarationWithParams(
*m_context, recordDecl, resultType, ArrayRef<QualType>(indexType),
ArrayRef<StringRef>(StringRef("index")),
m_context->DeclarationNames.getCXXOperatorName(OO_Subscript), true,
true);
hlsl::CreateFunctionTemplateDecl(
*m_context, recordDecl, functionDecl,
reinterpret_cast<NamedDecl **>(&templateTypeParmDecl), 1);
functionDecl->addAttr(HLSLCXXOverloadAttr::CreateImplicit(*m_context));
// Add a .mips member if necessary.
QualType uintType = m_context->UnsignedIntTy;
if (op.HasMips) {
AddDoubleSubscriptSupport(typeDecl, recordDecl, "mips", resultType,
templateTypeParmDecl, "mips_type",
"mips_slice_type", "mipSlice", uintType, "pos",
indexType);
}
// Add a .sample member if necessary.
if (op.HasSample) {
AddDoubleSubscriptSupport(typeDecl, recordDecl, "sample", resultType,
templateTypeParmDecl, "sample_type",
"sample_slice_type", "sampleSlice", uintType,
"pos", indexType);
// TODO: support operator[][](indexType, uint).
}
}
static bool
ObjectTypeDeclMapTypeCmp(const std::pair<CXXRecordDecl *, unsigned> &a,
const std::pair<CXXRecordDecl *, unsigned> &b) {
return a.first < b.first;
};
int FindObjectBasicKindIndex(const CXXRecordDecl *recordDecl) {
auto begin = m_objectTypeDeclsMap.begin();
auto end = m_objectTypeDeclsMap.end();
auto val = std::make_pair(const_cast<CXXRecordDecl *>(recordDecl), 0);
auto low = std::lower_bound(begin, end, val, ObjectTypeDeclMapTypeCmp);
if (low == end)
return -1;
if (recordDecl == low->first)
return low->second;
else
return -1;
}
#ifdef ENABLE_SPIRV_CODEGEN
SmallVector<NamedDecl *, 1> CreateTemplateTypeParmDeclsForVkIntrinsicFunction(
const HLSL_INTRINSIC *intrinsic) {
SmallVector<NamedDecl *, 1> templateTypeParmDecls;
auto &context = m_sema->getASTContext();
const HLSL_INTRINSIC_ARGUMENT *pArgs = intrinsic->pArgs;
UINT uNumArgs = intrinsic->uNumArgs;
TypeSourceInfo *TInfo = nullptr;
for (UINT i = 0; i < uNumArgs; ++i) {
if (pArgs[i].uTemplateId == INTRIN_TEMPLATE_FROM_FUNCTION ||
pArgs[i].uLegalTemplates == LITEMPLATE_ANY) {
IdentifierInfo *id = &context.Idents.get("T");
TemplateTypeParmDecl *templateTypeParmDecl =
TemplateTypeParmDecl::Create(context, m_vkNSDecl, NoLoc, NoLoc, 0,
0, id, TypenameTrue,
ParameterPackFalse);
if (TInfo == nullptr) {
TInfo = m_sema->getASTContext().CreateTypeSourceInfo(
m_context->UnsignedIntTy, 0);
}
templateTypeParmDecl->setDefaultArgument(TInfo);
templateTypeParmDecls.push_back(templateTypeParmDecl);
continue;
}
}
return templateTypeParmDecls;
}
SmallVector<ParmVarDecl *, g_MaxIntrinsicParamCount>
CreateParmDeclsForVkIntrinsicFunction(
const HLSL_INTRINSIC *intrinsic,
const SmallVectorImpl<QualType> ¶mTypes,
const SmallVectorImpl<ParameterModifier> ¶mMods) {
auto &context = m_sema->getASTContext();
SmallVector<ParmVarDecl *, g_MaxIntrinsicParamCount> paramDecls;
const HLSL_INTRINSIC_ARGUMENT *pArgs = intrinsic->pArgs;
UINT uNumArgs = intrinsic->uNumArgs;
for (UINT i = 1, numVariadicArgs = 0; i < uNumArgs; ++i) {
if (IsVariadicArgument(pArgs[i])) {
++numVariadicArgs;
continue;
}
IdentifierInfo *id = &context.Idents.get(StringRef(pArgs[i].pName));
TypeSourceInfo *TInfo = m_sema->getASTContext().CreateTypeSourceInfo(
paramTypes[i - numVariadicArgs], 0);
ParmVarDecl *paramDecl = ParmVarDecl::Create(
context, nullptr, NoLoc, NoLoc, id, paramTypes[i - numVariadicArgs],
TInfo, StorageClass::SC_None, nullptr,
paramMods[i - 1 - numVariadicArgs]);
paramDecls.push_back(paramDecl);
}
return paramDecls;
}
SmallVector<QualType, 2> VkIntrinsicFunctionParamTypes(
const HLSL_INTRINSIC *intrinsic,
const SmallVectorImpl<NamedDecl *> &templateTypeParmDecls) {
auto &context = m_sema->getASTContext();
const HLSL_INTRINSIC_ARGUMENT *pArgs = intrinsic->pArgs;
UINT uNumArgs = intrinsic->uNumArgs;
SmallVector<QualType, 2> paramTypes;
auto templateParmItr = templateTypeParmDecls.begin();
for (UINT i = 0; i < uNumArgs; ++i) {
if (pArgs[i].uTemplateId == INTRIN_TEMPLATE_FROM_FUNCTION ||
pArgs[i].uLegalTemplates == LITEMPLATE_ANY) {
DXASSERT(templateParmItr != templateTypeParmDecls.end(),
"Missing TemplateTypeParmDecl for a template type parameter");
TemplateTypeParmDecl *templateParmDecl =
dyn_cast<TemplateTypeParmDecl>(*templateParmItr);
DXASSERT(templateParmDecl != nullptr,
"TemplateTypeParmDecl is nullptr");
paramTypes.push_back(context.getTemplateTypeParmType(
0, i, ParameterPackFalse, templateParmDecl));
++templateParmItr;
continue;
}
if (IsVariadicArgument(pArgs[i])) {
continue;
}
switch (pArgs[i].uLegalComponentTypes) {
case LICOMPTYPE_UINT64:
paramTypes.push_back(context.UnsignedLongLongTy);
break;
case LICOMPTYPE_UINT:
paramTypes.push_back(context.UnsignedIntTy);
break;
case LICOMPTYPE_VOID:
paramTypes.push_back(context.VoidTy);
break;
default:
DXASSERT(false, "Argument type of vk:: intrinsic function is not "
"supported");
break;
}
}
return paramTypes;
}
QualType
VkIntrinsicFunctionType(const SmallVectorImpl<QualType> ¶mTypes,
const SmallVectorImpl<ParameterModifier> ¶mMods) {
DXASSERT(!paramTypes.empty(), "Given param type vector is empty");
ArrayRef<QualType> params({});
if (paramTypes.size() > 1) {
params = ArrayRef<QualType>(¶mTypes[1], paramTypes.size() - 1);
}
FunctionProtoType::ExtProtoInfo EmptyEPI;
return m_sema->getASTContext().getFunctionType(paramTypes[0], params,
EmptyEPI, paramMods);
}
void SetParmDeclsForVkIntrinsicFunction(
TypeSourceInfo *TInfo, FunctionDecl *functionDecl,
const SmallVectorImpl<ParmVarDecl *> ¶mDecls) {
FunctionProtoTypeLoc Proto =
TInfo->getTypeLoc().getAs<FunctionProtoTypeLoc>();
// Attach the parameters
for (unsigned P = 0; P < paramDecls.size(); ++P) {
paramDecls[P]->setOwningFunction(functionDecl);
paramDecls[P]->setScopeInfo(0, P);
Proto.setParam(P, paramDecls[P]);
}
functionDecl->setParams(paramDecls);
}
// Adds intrinsic function declarations to the "vk" namespace.
// It does so only if SPIR-V code generation is being done.
// Assumes the implicit "vk" namespace has already been created.
void AddVkIntrinsicFunctions() {
// If not doing SPIR-V CodeGen, return.
if (!m_sema->getLangOpts().SPIRV)
return;
DXASSERT(m_vkNSDecl, "caller has not created the vk namespace yet");
auto &context = m_sema->getASTContext();
for (uint32_t i = 0; i < _countof(g_VkIntrinsics); ++i) {
const HLSL_INTRINSIC *intrinsic = &g_VkIntrinsics[i];
const IdentifierInfo &fnII = context.Idents.get(
intrinsic->pArgs->pName, tok::TokenKind::identifier);
DeclarationName functionName(&fnII);
// Create TemplateTypeParmDecl.
SmallVector<NamedDecl *, 1> templateTypeParmDecls =
CreateTemplateTypeParmDeclsForVkIntrinsicFunction(intrinsic);
// Get types for parameters.
SmallVector<QualType, 2> paramTypes =
VkIntrinsicFunctionParamTypes(intrinsic, templateTypeParmDecls);
SmallVector<hlsl::ParameterModifier, g_MaxIntrinsicParamCount> paramMods;
InitParamMods(intrinsic, paramMods);
// Create FunctionDecl.
QualType fnType = VkIntrinsicFunctionType(paramTypes, paramMods);
TypeSourceInfo *TInfo =
m_sema->getASTContext().CreateTypeSourceInfo(fnType, 0);
FunctionDecl *functionDecl = FunctionDecl::Create(
context, m_vkNSDecl, NoLoc, DeclarationNameInfo(functionName, NoLoc),
fnType, TInfo, StorageClass::SC_Extern, InlineSpecifiedFalse,
HasWrittenPrototypeTrue);
// Create and set ParmVarDecl.
SmallVector<ParmVarDecl *, g_MaxIntrinsicParamCount> paramDecls =
CreateParmDeclsForVkIntrinsicFunction(intrinsic, paramTypes,
paramMods);
SetParmDeclsForVkIntrinsicFunction(TInfo, functionDecl, paramDecls);
if (!templateTypeParmDecls.empty()) {
TemplateParameterList *templateParmList = TemplateParameterList::Create(
context, NoLoc, NoLoc, templateTypeParmDecls.data(),
templateTypeParmDecls.size(), NoLoc);
functionDecl->setTemplateParameterListsInfo(context, 1,
&templateParmList);
FunctionTemplateDecl *functionTemplate = FunctionTemplateDecl::Create(
context, m_vkNSDecl, NoLoc, functionName, templateParmList,
functionDecl);
functionDecl->setDescribedFunctionTemplate(functionTemplate);
m_vkNSDecl->addDecl(functionTemplate);
functionTemplate->setDeclContext(m_vkNSDecl);
} else {
m_vkNSDecl->addDecl(functionDecl);
functionDecl->setLexicalDeclContext(m_vkNSDecl);
functionDecl->setDeclContext(m_vkNSDecl);
}
functionDecl->setImplicit(true);
}
}
// Adds implicitly defined Vulkan-specific constants to the "vk" namespace.
// It does so only if SPIR-V code generation is being done.
// Assumes the implicit "vk" namespace has already been created.
void AddVkIntrinsicConstants() {
// If not doing SPIR-V CodeGen, return.
if (!m_sema->getLangOpts().SPIRV)
return;
DXASSERT(m_vkNSDecl, "caller has not created the vk namespace yet");
for (auto intConst : GetVkIntegerConstants()) {
const llvm::StringRef name = intConst.first;
const uint32_t value = intConst.second;
auto &context = m_sema->getASTContext();
QualType type = context.getConstType(context.UnsignedIntTy);
IdentifierInfo &Id = context.Idents.get(name, tok::TokenKind::identifier);
VarDecl *varDecl =
VarDecl::Create(context, m_vkNSDecl, NoLoc, NoLoc, &Id, type,
context.getTrivialTypeSourceInfo(type),
clang::StorageClass::SC_Static);
Expr *exprVal = IntegerLiteral::Create(
context, llvm::APInt(context.getIntWidth(type), value), type, NoLoc);
varDecl->setInit(exprVal);
varDecl->setImplicit(true);
m_vkNSDecl->addDecl(varDecl);
}
}
#endif // ENABLE_SPIRV_CODEGEN
// Adds all built-in HLSL object types.
void AddObjectTypes() {
DXASSERT(m_context != nullptr,
"otherwise caller hasn't initialized context yet");
QualType float4Type = LookupVectorType(HLSLScalarType_float, 4);
TypeSourceInfo *float4TypeSourceInfo =
m_context->getTrivialTypeSourceInfo(float4Type, NoLoc);
unsigned effectKindIndex = 0;
const auto *SM =
hlsl::ShaderModel::GetByName(m_sema->getLangOpts().HLSLProfile.c_str());
CXXRecordDecl *nodeOutputDecl = nullptr, *emptyNodeOutputDecl = nullptr;
for (unsigned i = 0; i < _countof(g_ArBasicKindsAsTypes); i++) {
ArBasicKind kind = g_ArBasicKindsAsTypes[i];
if (kind == AR_OBJECT_WAVE) { // wave objects are currently unused
continue;
}
if (kind == AR_OBJECT_LEGACY_EFFECT)
effectKindIndex = i;
DXASSERT(kind < _countof(g_ArBasicTypeNames),
"g_ArBasicTypeNames has the wrong number of entries");
assert(kind < _countof(g_ArBasicTypeNames));
const char *typeName = g_ArBasicTypeNames[kind];
uint8_t templateArgCount = g_ArBasicKindsTemplateCount[i];
CXXRecordDecl *recordDecl = nullptr;
if (kind == AR_OBJECT_RAY_DESC) {
QualType float3Ty =
LookupVectorType(HLSLScalarType::HLSLScalarType_float, 3);
recordDecl = CreateRayDescStruct(*m_context, float3Ty);
} else if (kind == AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES) {
QualType float2Type =
LookupVectorType(HLSLScalarType::HLSLScalarType_float, 2);
recordDecl =
AddBuiltInTriangleIntersectionAttributes(*m_context, float2Type);
} else if (IsSubobjectBasicKind(kind)) {
switch (kind) {
case AR_OBJECT_STATE_OBJECT_CONFIG:
recordDecl = CreateSubobjectStateObjectConfig(*m_context);
break;
case AR_OBJECT_GLOBAL_ROOT_SIGNATURE:
recordDecl = CreateSubobjectRootSignature(*m_context, true);
break;
case AR_OBJECT_LOCAL_ROOT_SIGNATURE:
recordDecl = CreateSubobjectRootSignature(*m_context, false);
break;
case AR_OBJECT_SUBOBJECT_TO_EXPORTS_ASSOC:
recordDecl = CreateSubobjectSubobjectToExportsAssoc(*m_context);
break;
case AR_OBJECT_RAYTRACING_SHADER_CONFIG:
recordDecl = CreateSubobjectRaytracingShaderConfig(*m_context);
break;
case AR_OBJECT_RAYTRACING_PIPELINE_CONFIG:
recordDecl = CreateSubobjectRaytracingPipelineConfig(*m_context);
break;
case AR_OBJECT_TRIANGLE_HIT_GROUP:
recordDecl = CreateSubobjectTriangleHitGroup(*m_context);
break;
case AR_OBJECT_PROCEDURAL_PRIMITIVE_HIT_GROUP:
recordDecl = CreateSubobjectProceduralPrimitiveHitGroup(*m_context);
break;
case AR_OBJECT_RAYTRACING_PIPELINE_CONFIG1:
recordDecl = CreateSubobjectRaytracingPipelineConfig1(*m_context);
break;
}
} else if (kind == AR_OBJECT_CONSTANT_BUFFER) {
recordDecl = DeclareConstantBufferViewType(*m_context, /*bTBuf*/ false);
} else if (kind == AR_OBJECT_TEXTURE_BUFFER) {
recordDecl = DeclareConstantBufferViewType(*m_context, /*bTBuf*/ true);
} else if (kind == AR_OBJECT_RAY_QUERY) {
recordDecl = DeclareRayQueryType(*m_context);
} else if (kind == AR_OBJECT_HEAP_RESOURCE) {
recordDecl = DeclareResourceType(*m_context, /*bSampler*/ false);
if (SM->IsSM66Plus()) {
// create Resource ResourceDescriptorHeap;
DeclareBuiltinGlobal("ResourceDescriptorHeap",
m_context->getRecordType(recordDecl),
*m_context);
}
} else if (kind == AR_OBJECT_HEAP_SAMPLER) {
recordDecl = DeclareResourceType(*m_context, /*bSampler*/ true);
if (SM->IsSM66Plus()) {
// create Resource SamplerDescriptorHeap;
DeclareBuiltinGlobal("SamplerDescriptorHeap",
m_context->getRecordType(recordDecl),
*m_context);
}
} else if (kind == AR_OBJECT_FEEDBACKTEXTURE2D) {
recordDecl = DeclareUIntTemplatedTypeWithHandle(
*m_context, "FeedbackTexture2D", "kind");
} else if (kind == AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY) {
recordDecl = DeclareUIntTemplatedTypeWithHandle(
*m_context, "FeedbackTexture2DArray", "kind");
} else if (kind == AR_OBJECT_EMPTY_NODE_INPUT) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::EmptyInput,
/*IsRecordTypeTemplate*/ false, /*IsConst*/ true,
/*HasGetMethods*/ false,
/*IsArray*/ false, /*IsCompleteType*/ false);
} else if (kind == AR_OBJECT_DISPATCH_NODE_INPUT_RECORD) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::DispatchNodeInputRecord,
/*IsRecordTypeTemplate*/ true,
/*IsConst*/ true, /*HasGetMethods*/ true,
/*IsArray*/ false, /*IsCompleteType*/ true);
} else if (kind == AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::RWDispatchNodeInputRecord,
/*IsRecordTypeTemplate*/ true, /*IsConst*/ false,
/*HasGetMethods*/ true,
/*IsArray*/ false, /*IsCompleteType*/ false);
} else if (kind == AR_OBJECT_GROUP_NODE_INPUT_RECORDS) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::GroupNodeInputRecords,
/*IsRecordTypeTemplate*/ true,
/*IsConst*/ true, /*HasGetMethods*/ true,
/*IsArray*/ true, /*IsCompleteType*/ false);
} else if (kind == AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::RWGroupNodeInputRecords,
/*IsRecordTypeTemplate*/ true,
/*IsConst*/ false, /*HasGetMethods*/ true,
/*IsArray*/ true, /*IsCompleteType*/ false);
} else if (kind == AR_OBJECT_THREAD_NODE_INPUT_RECORD) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::ThreadNodeInputRecord,
/*IsRecordTypeTemplate*/ true,
/*IsConst*/ true, /*HasGetMethods*/ true,
/*IsArray*/ false, /*IsCompleteType*/ true);
} else if (kind == AR_OBJECT_RWTHREAD_NODE_INPUT_RECORD) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::RWThreadNodeInputRecord,
/*IsRecordTypeTemplate*/ true,
/*IsConst*/ false, /*HasGetMethods*/ true,
/*IsArray*/ false, /*IsCompleteType*/ true);
} else if (kind == AR_OBJECT_NODE_OUTPUT) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::NodeOutput,
/*IsRecordTypeTemplate*/ true, /*IsConst*/ true,
/*HasGetMethods*/ false,
/*IsArray*/ false, /*IsCompleteType*/ false);
nodeOutputDecl = recordDecl;
} else if (kind == AR_OBJECT_EMPTY_NODE_OUTPUT) {
recordDecl = DeclareNodeOrRecordType(
*m_context, DXIL::NodeIOKind::EmptyOutput,
/*IsRecordTypeTemplate*/ false, /*IsConst*/ true,
/*HasGetMethods*/ false,
/*IsArray*/ false, /*IsCompleteType*/ false);
emptyNodeOutputDecl = recordDecl;
} else if (kind == AR_OBJECT_NODE_OUTPUT_ARRAY) {
assert(nodeOutputDecl != nullptr);
recordDecl = DeclareNodeOutputArray(*m_context,
DXIL::NodeIOKind::NodeOutputArray,
/* ItemType */ nodeOutputDecl,
/*IsRecordTypeTemplate*/ true);
} else if (kind == AR_OBJECT_EMPTY_NODE_OUTPUT_ARRAY) {
assert(emptyNodeOutputDecl != nullptr);
recordDecl = DeclareNodeOutputArray(*m_context,
DXIL::NodeIOKind::EmptyOutputArray,
/* ItemType */ emptyNodeOutputDecl,
/*IsRecordTypeTemplate*/ false);
} else if (kind == AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS) {
recordDecl = m_GroupNodeOutputRecordsTemplateDecl->getTemplatedDecl();
} else if (kind == AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS) {
recordDecl = m_ThreadNodeOutputRecordsTemplateDecl->getTemplatedDecl();
}
#ifdef ENABLE_SPIRV_CODEGEN
else if (kind == AR_OBJECT_VK_SPIRV_TYPE) {
if (!m_vkNSDecl)
continue;
recordDecl =
DeclareInlineSpirvType(*m_context, m_vkNSDecl, typeName, false);
recordDecl->setImplicit(true);
} else if (kind == AR_OBJECT_VK_SPIRV_OPAQUE_TYPE) {
if (!m_vkNSDecl)
continue;
recordDecl =
DeclareInlineSpirvType(*m_context, m_vkNSDecl, typeName, true);
recordDecl->setImplicit(true);
} else if (kind == AR_OBJECT_VK_INTEGRAL_CONSTANT) {
if (!m_vkNSDecl)
continue;
recordDecl =
DeclareVkIntegralConstant(*m_context, m_vkNSDecl, typeName,
&m_vkIntegralConstantTemplateDecl);
recordDecl->setImplicit(true);
} else if (kind == AR_OBJECT_VK_LITERAL) {
if (!m_vkNSDecl)
continue;
recordDecl = DeclareTemplateTypeWithHandleInDeclContext(
*m_context, m_vkNSDecl, typeName, 1, nullptr);
recordDecl->setImplicit(true);
m_vkLiteralTemplateDecl = recordDecl->getDescribedClassTemplate();
} else if (kind == AR_OBJECT_VK_SPV_INTRINSIC_TYPE) {
if (!m_vkNSDecl)
continue;
recordDecl = DeclareUIntTemplatedTypeWithHandleInDeclContext(
*m_context, m_vkNSDecl, typeName, "id");
recordDecl->setImplicit(true);
} else if (kind == AR_OBJECT_VK_SPV_INTRINSIC_RESULT_ID) {
if (!m_vkNSDecl)
continue;
recordDecl = DeclareTemplateTypeWithHandleInDeclContext(
*m_context, m_vkNSDecl, typeName, 1, nullptr);
recordDecl->setImplicit(true);
}
#endif
else if (templateArgCount == 0) {
recordDecl = DeclareRecordTypeWithHandle(*m_context, typeName,
/*isCompleteType*/ false);
} else {
DXASSERT(templateArgCount == 1 || templateArgCount == 2,
"otherwise a new case has been added");
TypeSourceInfo *typeDefault =
TemplateHasDefaultType(kind) ? float4TypeSourceInfo : nullptr;
recordDecl = DeclareTemplateTypeWithHandle(
*m_context, typeName, templateArgCount, typeDefault);
}
m_objectTypeDecls[i] = recordDecl;
m_objectTypeDeclsMap[i] = std::make_pair(recordDecl, i);
}
// Create an alias for SamplerState. 'sampler' is very commonly used.
{
DeclContext *currentDeclContext = m_context->getTranslationUnitDecl();
IdentifierInfo &samplerId = m_context->Idents.get(
StringRef("sampler"), tok::TokenKind::identifier);
TypeSourceInfo *samplerTypeSource = m_context->getTrivialTypeSourceInfo(
GetBasicKindType(AR_OBJECT_SAMPLER));
TypedefDecl *samplerDecl =
TypedefDecl::Create(*m_context, currentDeclContext, NoLoc, NoLoc,
&samplerId, samplerTypeSource);
currentDeclContext->addDecl(samplerDecl);
samplerDecl->setImplicit(true);
// Create decls for each deprecated effect object type:
unsigned effectObjBase = _countof(g_ArBasicKindsAsTypes);
// TypeSourceInfo* effectObjTypeSource =
// m_context->getTrivialTypeSourceInfo(GetBasicKindType(AR_OBJECT_LEGACY_EFFECT));
for (unsigned i = 0; i < _countof(g_DeprecatedEffectObjectNames); i++) {
IdentifierInfo &idInfo =
m_context->Idents.get(StringRef(g_DeprecatedEffectObjectNames[i]),
tok::TokenKind::identifier);
// TypedefDecl* effectObjDecl = TypedefDecl::Create(*m_context,
// currentDeclContext, NoLoc, NoLoc, &idInfo, effectObjTypeSource);
CXXRecordDecl *effectObjDecl =
CXXRecordDecl::Create(*m_context, TagTypeKind::TTK_Struct,
currentDeclContext, NoLoc, NoLoc, &idInfo);
currentDeclContext->addDecl(effectObjDecl);
effectObjDecl->setImplicit(true);
m_objectTypeDeclsMap[i + effectObjBase] =
std::make_pair(effectObjDecl, effectKindIndex);
}
}
// Make sure it's in order.
std::sort(m_objectTypeDeclsMap.begin(), m_objectTypeDeclsMap.end(),
ObjectTypeDeclMapTypeCmp);
}
FunctionDecl *
AddSubscriptSpecialization(FunctionTemplateDecl *functionTemplate,
QualType objectElement,
const FindStructBasicTypeResult &findResult);
ImplicitCastExpr *CreateLValueToRValueCast(Expr *input) {
return ImplicitCastExpr::Create(*m_context, input->getType(),
CK_LValueToRValue, input, nullptr,
VK_RValue);
}
ImplicitCastExpr *CreateFlatConversionCast(Expr *input) {
return ImplicitCastExpr::Create(*m_context, input->getType(),
CK_LValueToRValue, input, nullptr,
VK_RValue);
}
static TYPE_CONVERSION_REMARKS RemarksUnused;
static ImplicitConversionKind ImplicitConversionKindUnused;
HRESULT CombineDimensions(
QualType leftType, QualType rightType, QualType *resultType,
ImplicitConversionKind &convKind = ImplicitConversionKindUnused,
TYPE_CONVERSION_REMARKS &Remarks = RemarksUnused);
clang::TypedefDecl *LookupMatrixShorthandType(HLSLScalarType scalarType,
UINT rowCount, UINT colCount) {
DXASSERT_NOMSG(scalarType != HLSLScalarType::HLSLScalarType_unknown &&
rowCount <= 4 && colCount <= 4);
TypedefDecl *qts =
m_matrixShorthandTypes[scalarType][rowCount - 1][colCount - 1];
if (qts == nullptr) {
QualType type = LookupMatrixType(scalarType, rowCount, colCount);
qts = CreateMatrixSpecializationShorthand(*m_context, type, scalarType,
rowCount, colCount);
m_matrixShorthandTypes[scalarType][rowCount - 1][colCount - 1] = qts;
}
return qts;
}
clang::TypedefDecl *LookupVectorShorthandType(HLSLScalarType scalarType,
UINT colCount) {
DXASSERT_NOMSG(scalarType != HLSLScalarType::HLSLScalarType_unknown &&
colCount <= 4);
TypedefDecl *qts = m_vectorTypedefs[scalarType][colCount - 1];
if (qts == nullptr) {
QualType type = LookupVectorType(scalarType, colCount);
qts = CreateVectorSpecializationShorthand(*m_context, type, scalarType,
colCount);
m_vectorTypedefs[scalarType][colCount - 1] = qts;
}
return qts;
}
public:
HLSLExternalSource()
: m_matrixTemplateDecl(nullptr), m_vectorTemplateDecl(nullptr),
m_vkIntegralConstantTemplateDecl(nullptr),
m_vkLiteralTemplateDecl(nullptr), m_hlslNSDecl(nullptr),
m_vkNSDecl(nullptr), m_context(nullptr), m_sema(nullptr),
m_hlslStringTypedef(nullptr) {
memset(m_matrixTypes, 0, sizeof(m_matrixTypes));
memset(m_matrixShorthandTypes, 0, sizeof(m_matrixShorthandTypes));
memset(m_vectorTypes, 0, sizeof(m_vectorTypes));
memset(m_vectorTypedefs, 0, sizeof(m_vectorTypedefs));
memset(m_scalarTypes, 0, sizeof(m_scalarTypes));
memset(m_scalarTypeDefs, 0, sizeof(m_scalarTypeDefs));
memset(m_baseTypes, 0, sizeof(m_baseTypes));
}
~HLSLExternalSource() {}
static HLSLExternalSource *FromSema(Sema *self) {
DXASSERT_NOMSG(self != nullptr);
ExternalSemaSource *externalSource = self->getExternalSource();
DXASSERT(externalSource != nullptr,
"otherwise caller shouldn't call HLSL-specific function");
HLSLExternalSource *hlsl =
reinterpret_cast<HLSLExternalSource *>(externalSource);
return hlsl;
}
void InitializeSema(Sema &S) override {
auto &context = S.getASTContext();
m_sema = &S;
S.addExternalSource(this);
#ifdef ENABLE_SPIRV_CODEGEN
if (m_sema->getLangOpts().SPIRV) {
// Create the "vk" namespace which contains Vulkan-specific intrinsics.
m_vkNSDecl =
NamespaceDecl::Create(context, context.getTranslationUnitDecl(),
/*Inline*/ false, SourceLocation(),
SourceLocation(), &context.Idents.get("vk"),
/*PrevDecl*/ nullptr);
context.getTranslationUnitDecl()->addDecl(m_vkNSDecl);
}
#endif // ENABLE_SPIRV_CODEGEN
AddObjectTypes();
AddStdIsEqualImplementation(context, S);
for (auto &&intrinsic : m_intrinsicTables) {
AddIntrinsicTableMethods(intrinsic);
}
#ifdef ENABLE_SPIRV_CODEGEN
if (m_sema->getLangOpts().SPIRV) {
// Add Vulkan-specific intrinsics.
AddVkIntrinsicFunctions();
AddVkIntrinsicConstants();
}
#endif // ENABLE_SPIRV_CODEGEN
}
void ForgetSema() override { m_sema = nullptr; }
Sema *getSema() { return m_sema; }
TypedefDecl *LookupScalarTypeDef(HLSLScalarType scalarType) {
// We shouldn't create Typedef for built in scalar types.
// For built in scalar types, this funciton may be called for
// TypoCorrection. In that case, we return a nullptr.
if (m_scalarTypes[scalarType].isNull()) {
m_scalarTypeDefs[scalarType] = CreateGlobalTypedef(
m_context, HLSLScalarTypeNames[scalarType], m_baseTypes[scalarType]);
m_scalarTypes[scalarType] =
m_context->getTypeDeclType(m_scalarTypeDefs[scalarType]);
}
return m_scalarTypeDefs[scalarType];
}
QualType LookupMatrixType(HLSLScalarType scalarType, unsigned int rowCount,
unsigned int colCount) {
QualType qt = m_matrixTypes[scalarType][rowCount - 1][colCount - 1];
if (qt.isNull()) {
// lazy initialization of scalar types
if (m_scalarTypes[scalarType].isNull()) {
LookupScalarTypeDef(scalarType);
}
qt = GetOrCreateMatrixSpecialization(
*m_context, m_sema, m_matrixTemplateDecl, m_scalarTypes[scalarType],
rowCount, colCount);
m_matrixTypes[scalarType][rowCount - 1][colCount - 1] = qt;
}
return qt;
}
QualType LookupVectorType(HLSLScalarType scalarType, unsigned int colCount) {
QualType qt = m_vectorTypes[scalarType][colCount - 1];
if (qt.isNull()) {
if (m_scalarTypes[scalarType].isNull()) {
LookupScalarTypeDef(scalarType);
}
qt = GetOrCreateVectorSpecialization(*m_context, m_sema,
m_vectorTemplateDecl,
m_scalarTypes[scalarType], colCount);
m_vectorTypes[scalarType][colCount - 1] = qt;
}
return qt;
}
TypedefDecl *GetStringTypedef() {
if (m_hlslStringTypedef == nullptr) {
m_hlslStringTypedef =
CreateGlobalTypedef(m_context, "string", m_hlslStringType);
m_hlslStringType = m_context->getTypeDeclType(m_hlslStringTypedef);
}
DXASSERT_NOMSG(m_hlslStringTypedef != nullptr);
return m_hlslStringTypedef;
}
static bool IsSubobjectBasicKind(ArBasicKind kind) {
return kind >= AR_OBJECT_STATE_OBJECT_CONFIG &&
kind <= AR_OBJECT_RAYTRACING_PIPELINE_CONFIG1;
}
bool IsSubobjectType(QualType type) {
return IsSubobjectBasicKind(GetTypeElementKind(type));
}
bool IsRayQueryBasicKind(ArBasicKind kind) {
return kind == AR_OBJECT_RAY_QUERY;
}
bool IsRayQueryType(QualType type) {
return IsRayQueryBasicKind(GetTypeElementKind(type));
}
void WarnMinPrecision(QualType Type, SourceLocation Loc) {
Type = Type->getCanonicalTypeUnqualified();
if (IsVectorType(m_sema, Type) || IsMatrixType(m_sema, Type)) {
Type = GetOriginalMatrixOrVectorElementType(Type);
}
// TODO: enalbe this once we introduce precise master option
bool UseMinPrecision = m_context->getLangOpts().UseMinPrecision;
if (Type == m_context->Min12IntTy) {
QualType PromotedType =
UseMinPrecision ? m_context->Min16IntTy : m_context->ShortTy;
m_sema->Diag(Loc, diag::warn_hlsl_sema_minprecision_promotion)
<< Type << PromotedType;
} else if (Type == m_context->Min10FloatTy) {
QualType PromotedType =
UseMinPrecision ? m_context->Min16FloatTy : m_context->HalfTy;
m_sema->Diag(Loc, diag::warn_hlsl_sema_minprecision_promotion)
<< Type << PromotedType;
}
if (!UseMinPrecision) {
if (Type == m_context->Min16FloatTy) {
m_sema->Diag(Loc, diag::warn_hlsl_sema_minprecision_promotion)
<< Type << m_context->HalfTy;
} else if (Type == m_context->Min16IntTy) {
m_sema->Diag(Loc, diag::warn_hlsl_sema_minprecision_promotion)
<< Type << m_context->ShortTy;
} else if (Type == m_context->Min16UIntTy) {
m_sema->Diag(Loc, diag::warn_hlsl_sema_minprecision_promotion)
<< Type << m_context->UnsignedShortTy;
}
}
}
bool DiagnoseHLSLScalarType(HLSLScalarType type, SourceLocation Loc) {
if (getSema()->getLangOpts().HLSLVersion < hlsl::LangStd::v2018) {
switch (type) {
case HLSLScalarType_float16:
case HLSLScalarType_float32:
case HLSLScalarType_float64:
case HLSLScalarType_int16:
case HLSLScalarType_int32:
case HLSLScalarType_uint16:
case HLSLScalarType_uint32:
m_sema->Diag(Loc, diag::err_hlsl_unsupported_keyword_for_version)
<< HLSLScalarTypeNames[type] << "2018";
return false;
default:
break;
}
}
if (getSema()->getLangOpts().UseMinPrecision) {
switch (type) {
case HLSLScalarType_float16:
case HLSLScalarType_int16:
case HLSLScalarType_uint16:
m_sema->Diag(Loc, diag::err_hlsl_unsupported_keyword_for_min_precision)
<< HLSLScalarTypeNames[type];
return false;
default:
break;
}
}
return true;
}
bool LookupUnqualified(LookupResult &R, Scope *S) override {
const DeclarationNameInfo declName = R.getLookupNameInfo();
IdentifierInfo *idInfo = declName.getName().getAsIdentifierInfo();
if (idInfo == nullptr) {
return false;
}
// Currently template instantiation is blocked when a fatal error is
// detected. So no faulting-in types at this point, instead we simply
// back out.
if (this->m_sema->Diags.hasFatalErrorOccurred()) {
return false;
}
StringRef nameIdentifier = idInfo->getName();
HLSLScalarType parsedType;
int rowCount;
int colCount;
// Try parsing hlsl scalar types that is not initialized at AST time.
if (TryParseAny(nameIdentifier.data(), nameIdentifier.size(), &parsedType,
&rowCount, &colCount, getSema()->getLangOpts())) {
assert(parsedType != HLSLScalarType_unknown &&
"otherwise, TryParseHLSLScalarType should not have succeeded.");
if (rowCount == 0 && colCount == 0) { // scalar
if (!DiagnoseHLSLScalarType(parsedType, R.getNameLoc()))
return false;
TypedefDecl *typeDecl = LookupScalarTypeDef(parsedType);
if (!typeDecl)
return false;
R.addDecl(typeDecl);
} else if (rowCount == 0) { // vector
TypedefDecl *qts = LookupVectorShorthandType(parsedType, colCount);
R.addDecl(qts);
} else { // matrix
TypedefDecl *qts =
LookupMatrixShorthandType(parsedType, rowCount, colCount);
R.addDecl(qts);
}
return true;
}
// string
else if (TryParseString(nameIdentifier.data(), nameIdentifier.size(),
getSema()->getLangOpts())) {
TypedefDecl *strDecl = GetStringTypedef();
R.addDecl(strDecl);
}
return false;
}
/// <summary>
/// Determines whether the specify record type is a matrix, another HLSL
/// object, or a user-defined structure.
/// </summary>
ArTypeObjectKind ClassifyRecordType(const RecordType *type) {
DXASSERT_NOMSG(type != nullptr);
const CXXRecordDecl *typeRecordDecl = type->getAsCXXRecordDecl();
const ClassTemplateSpecializationDecl *templateSpecializationDecl =
dyn_cast<ClassTemplateSpecializationDecl>(typeRecordDecl);
if (templateSpecializationDecl) {
ClassTemplateDecl *decl =
templateSpecializationDecl->getSpecializedTemplate();
if (decl == m_matrixTemplateDecl)
return AR_TOBJ_MATRIX;
else if (decl == m_vectorTemplateDecl)
return AR_TOBJ_VECTOR;
else if (decl == m_vkIntegralConstantTemplateDecl ||
decl == m_vkLiteralTemplateDecl)
return AR_TOBJ_COMPOUND;
else if (!decl->isImplicit())
return AR_TOBJ_COMPOUND;
return AR_TOBJ_OBJECT;
}
if (typeRecordDecl && typeRecordDecl->isImplicit()) {
if (typeRecordDecl->getDeclContext()->isFileContext()) {
int index = FindObjectBasicKindIndex(typeRecordDecl);
if (index != -1) {
ArBasicKind kind = g_ArBasicKindsAsTypes[index];
if (AR_OBJECT_RAY_DESC == kind ||
AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES == kind)
return AR_TOBJ_COMPOUND;
}
return AR_TOBJ_OBJECT;
} else
return AR_TOBJ_INNER_OBJ;
}
return AR_TOBJ_COMPOUND;
}
/// <summary>Given a Clang type, determines whether it is a built-in object
/// type (sampler, texture, etc).</summary>
bool IsBuiltInObjectType(QualType type) {
type = GetStructuralForm(type);
if (!type.isNull() && type->isStructureOrClassType()) {
const RecordType *recordType = type->getAs<RecordType>();
return ClassifyRecordType(recordType) == AR_TOBJ_OBJECT;
}
return false;
}
/// <summary>
/// Given the specified type (typed a DeclContext for convenience), determines
/// its RecordDecl, possibly refering to original template record if it's a
/// specialization; this makes the result suitable for looking up in
/// initialization tables.
/// </summary>
const CXXRecordDecl *
GetRecordDeclForBuiltInOrStruct(const DeclContext *context) {
const CXXRecordDecl *recordDecl;
if (const ClassTemplateSpecializationDecl *decl =
dyn_cast<ClassTemplateSpecializationDecl>(context)) {
recordDecl = decl->getSpecializedTemplate()->getTemplatedDecl();
} else {
recordDecl = dyn_cast<CXXRecordDecl>(context);
}
return recordDecl;
}
/// <summary>Given a Clang type, return the ArTypeObjectKind classification,
/// (eg AR_TOBJ_VECTOR).</summary>
ArTypeObjectKind GetTypeObjectKind(QualType type) {
DXASSERT_NOMSG(!type.isNull());
type = GetStructuralForm(type);
if (type->isVoidType())
return AR_TOBJ_VOID;
if (type->isArrayType()) {
return hlsl::IsArrayConstantStringType(type) ? AR_TOBJ_STRING
: AR_TOBJ_ARRAY;
}
if (type->isPointerType()) {
return hlsl::IsPointerStringType(type) ? AR_TOBJ_STRING : AR_TOBJ_POINTER;
}
if (type->isDependentType()) {
return AR_TOBJ_DEPENDENT;
}
if (type->isStructureOrClassType()) {
const RecordType *recordType = type->getAs<RecordType>();
return ClassifyRecordType(recordType);
} else if (const InjectedClassNameType *ClassNameTy =
type->getAs<InjectedClassNameType>()) {
const CXXRecordDecl *typeRecordDecl = ClassNameTy->getDecl();
const ClassTemplateSpecializationDecl *templateSpecializationDecl =
dyn_cast<ClassTemplateSpecializationDecl>(typeRecordDecl);
if (templateSpecializationDecl) {
ClassTemplateDecl *decl =
templateSpecializationDecl->getSpecializedTemplate();
if (decl == m_matrixTemplateDecl)
return AR_TOBJ_MATRIX;
else if (decl == m_vectorTemplateDecl)
return AR_TOBJ_VECTOR;
DXASSERT(decl->isImplicit(),
"otherwise object template decl is not set to implicit");
return AR_TOBJ_OBJECT;
}
if (typeRecordDecl && typeRecordDecl->isImplicit()) {
if (typeRecordDecl->getDeclContext()->isFileContext())
return AR_TOBJ_OBJECT;
else
return AR_TOBJ_INNER_OBJ;
}
return AR_TOBJ_COMPOUND;
}
if (type->isBuiltinType())
return AR_TOBJ_BASIC;
if (type->isEnumeralType())
return AR_TOBJ_BASIC;
return AR_TOBJ_INVALID;
}
/// <summary>Gets the element type of a matrix or vector type (eg, the 'float'
/// in 'float4x4' or 'float4').</summary>
QualType GetMatrixOrVectorElementType(QualType type) {
type = GetStructuralForm(type);
const CXXRecordDecl *typeRecordDecl = type->getAsCXXRecordDecl();
DXASSERT_NOMSG(typeRecordDecl);
const ClassTemplateSpecializationDecl *templateSpecializationDecl =
dyn_cast<ClassTemplateSpecializationDecl>(typeRecordDecl);
DXASSERT_NOMSG(templateSpecializationDecl);
DXASSERT_NOMSG(templateSpecializationDecl->getSpecializedTemplate() ==
m_matrixTemplateDecl ||
templateSpecializationDecl->getSpecializedTemplate() ==
m_vectorTemplateDecl);
return templateSpecializationDecl->getTemplateArgs().get(0).getAsType();
}
/// <summary>Gets the type with structural information (elements and shape)
/// for the given type.</summary> <remarks>This function will strip
/// lvalue/rvalue references, attributes and qualifiers.</remarks>
QualType GetStructuralForm(QualType type) {
if (type.isNull()) {
return type;
}
const ReferenceType *RefType = nullptr;
const AttributedType *AttrType = nullptr;
while ((RefType = dyn_cast<ReferenceType>(type)) ||
(AttrType = dyn_cast<AttributedType>(type))) {
type =
RefType ? RefType->getPointeeType() : AttrType->getEquivalentType();
}
// Despite its name, getCanonicalTypeUnqualified will preserve const for
// array elements or something
return QualType(type->getCanonicalTypeUnqualified()->getTypePtr(), 0);
}
/// <summary>Given a Clang type, return the QualType for its element, drilling
/// through any array/vector/matrix.</summary>
QualType GetTypeElementType(QualType type) {
type = GetStructuralForm(type);
ArTypeObjectKind kind = GetTypeObjectKind(type);
if (kind == AR_TOBJ_MATRIX || kind == AR_TOBJ_VECTOR) {
type = GetMatrixOrVectorElementType(type);
} else if (kind == AR_TOBJ_STRING) {
// return original type even if it's an array (string literal)
} else if (type->isArrayType()) {
const ArrayType *arrayType = type->getAsArrayTypeUnsafe();
type = GetTypeElementType(arrayType->getElementType());
}
return type;
}
/// <summary>Given a Clang type, return the ArBasicKind classification for its
/// contents.</summary>
ArBasicKind GetTypeElementKind(QualType type) {
type = GetStructuralForm(type);
ArTypeObjectKind kind = GetTypeObjectKind(type);
if (kind == AR_TOBJ_MATRIX || kind == AR_TOBJ_VECTOR) {
QualType elementType = GetMatrixOrVectorElementType(type);
return GetTypeElementKind(elementType);
}
if (kind == AR_TOBJ_STRING) {
return type->isArrayType() ? AR_OBJECT_STRING_LITERAL : AR_OBJECT_STRING;
}
if (type->isArrayType()) {
const ArrayType *arrayType = type->getAsArrayTypeUnsafe();
return GetTypeElementKind(arrayType->getElementType());
}
if (kind == AR_TOBJ_INNER_OBJ) {
return AR_OBJECT_INNER;
} else if (kind == AR_TOBJ_OBJECT) {
// Classify the object as the element type.
const CXXRecordDecl *typeRecordDecl =
GetRecordDeclForBuiltInOrStruct(type->getAsCXXRecordDecl());
int index = FindObjectBasicKindIndex(typeRecordDecl);
// NOTE: this will likely need to be updated for specialized records
DXASSERT(index != -1,
"otherwise can't find type we already determined was an object");
return g_ArBasicKindsAsTypes[index];
}
CanQualType canType = type->getCanonicalTypeUnqualified();
return BasicTypeForScalarType(canType);
}
ArBasicKind BasicTypeForScalarType(CanQualType type) {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(type)) {
switch (BT->getKind()) {
case BuiltinType::Bool:
return AR_BASIC_BOOL;
case BuiltinType::Double:
return AR_BASIC_FLOAT64;
case BuiltinType::Float:
return AR_BASIC_FLOAT32;
case BuiltinType::Half:
return AR_BASIC_FLOAT16;
case BuiltinType::HalfFloat:
return AR_BASIC_FLOAT32_PARTIAL_PRECISION;
case BuiltinType::Int:
return AR_BASIC_INT32;
case BuiltinType::UInt:
return AR_BASIC_UINT32;
case BuiltinType::Short:
return AR_BASIC_INT16;
case BuiltinType::UShort:
return AR_BASIC_UINT16;
case BuiltinType::Long:
return AR_BASIC_INT32;
case BuiltinType::ULong:
return AR_BASIC_UINT32;
case BuiltinType::LongLong:
return AR_BASIC_INT64;
case BuiltinType::ULongLong:
return AR_BASIC_UINT64;
case BuiltinType::Min12Int:
return AR_BASIC_MIN12INT;
case BuiltinType::Min16Float:
return AR_BASIC_MIN16FLOAT;
case BuiltinType::Min16Int:
return AR_BASIC_MIN16INT;
case BuiltinType::Min16UInt:
return AR_BASIC_MIN16UINT;
case BuiltinType::Min10Float:
return AR_BASIC_MIN10FLOAT;
case BuiltinType::LitFloat:
return AR_BASIC_LITERAL_FLOAT;
case BuiltinType::LitInt:
return AR_BASIC_LITERAL_INT;
case BuiltinType::Int8_4Packed:
return AR_BASIC_INT8_4PACKED;
case BuiltinType::UInt8_4Packed:
return AR_BASIC_UINT8_4PACKED;
case BuiltinType::Dependent:
return AR_BASIC_DEPENDENT;
default:
// Only builtin types that have basickind equivalents.
break;
}
}
if (const EnumType *ET = dyn_cast<EnumType>(type)) {
if (ET->getDecl()->isScopedUsingClassTag())
return AR_BASIC_ENUM_CLASS;
return AR_BASIC_ENUM;
}
return AR_BASIC_UNKNOWN;
}
void AddIntrinsicTableMethods(IDxcIntrinsicTable *table) {
DXASSERT_NOMSG(table != nullptr);
// Function intrinsics are added on-demand, objects get template methods.
for (unsigned i = 0; i < _countof(g_ArBasicKindsAsTypes); i++) {
// Grab information already processed by AddObjectTypes.
ArBasicKind kind = g_ArBasicKindsAsTypes[i];
const char *typeName = g_ArBasicTypeNames[kind];
uint8_t templateArgCount = g_ArBasicKindsTemplateCount[i];
DXASSERT(templateArgCount <= 3, "otherwise a new case has been added");
int startDepth = (templateArgCount == 0) ? 0 : 1;
CXXRecordDecl *recordDecl = m_objectTypeDecls[i];
if (recordDecl == nullptr) {
continue;
}
// This is a variation of AddObjectMethods using the new table.
const HLSL_INTRINSIC *pIntrinsic = nullptr;
const HLSL_INTRINSIC *pPrior = nullptr;
UINT64 lookupCookie = 0;
CA2W wideTypeName(typeName);
HRESULT found = table->LookupIntrinsic(wideTypeName, L"*", &pIntrinsic,
&lookupCookie);
while (pIntrinsic != nullptr && SUCCEEDED(found)) {
if (!AreIntrinsicTemplatesEquivalent(pIntrinsic, pPrior)) {
AddObjectIntrinsicTemplate(recordDecl, startDepth, pIntrinsic);
// NOTE: this only works with the current implementation because
// intrinsics are alive as long as the table is alive.
pPrior = pIntrinsic;
}
found = table->LookupIntrinsic(wideTypeName, L"*", &pIntrinsic,
&lookupCookie);
}
}
}
void RegisterIntrinsicTable(IDxcIntrinsicTable *table) {
DXASSERT_NOMSG(table != nullptr);
m_intrinsicTables.push_back(table);
// If already initialized, add methods immediately.
if (m_sema != nullptr) {
AddIntrinsicTableMethods(table);
}
}
HLSLScalarType ScalarTypeForBasic(ArBasicKind kind) {
DXASSERT(kind < AR_BASIC_COUNT,
"otherwise caller didn't check that the value was in range");
switch (kind) {
case AR_BASIC_BOOL:
return HLSLScalarType_bool;
case AR_BASIC_LITERAL_FLOAT:
return HLSLScalarType_float_lit;
case AR_BASIC_FLOAT16:
return HLSLScalarType_half;
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
return HLSLScalarType_float;
case AR_BASIC_FLOAT32:
return HLSLScalarType_float;
case AR_BASIC_FLOAT64:
return HLSLScalarType_double;
case AR_BASIC_LITERAL_INT:
return HLSLScalarType_int_lit;
case AR_BASIC_INT8:
return HLSLScalarType_int;
case AR_BASIC_UINT8:
return HLSLScalarType_uint;
case AR_BASIC_INT16:
return HLSLScalarType_int16;
case AR_BASIC_UINT16:
return HLSLScalarType_uint16;
case AR_BASIC_INT32:
return HLSLScalarType_int;
case AR_BASIC_UINT32:
return HLSLScalarType_uint;
case AR_BASIC_MIN10FLOAT:
return HLSLScalarType_float_min10;
case AR_BASIC_MIN16FLOAT:
return HLSLScalarType_float_min16;
case AR_BASIC_MIN12INT:
return HLSLScalarType_int_min12;
case AR_BASIC_MIN16INT:
return HLSLScalarType_int_min16;
case AR_BASIC_MIN16UINT:
return HLSLScalarType_uint_min16;
case AR_BASIC_INT8_4PACKED:
return HLSLScalarType_int8_4packed;
case AR_BASIC_UINT8_4PACKED:
return HLSLScalarType_uint8_4packed;
case AR_BASIC_INT64:
return HLSLScalarType_int64;
case AR_BASIC_UINT64:
return HLSLScalarType_uint64;
case AR_BASIC_ENUM:
return HLSLScalarType_int;
default:
return HLSLScalarType_unknown;
}
}
QualType GetBasicKindType(ArBasicKind kind) {
DXASSERT_VALIDBASICKIND(kind);
switch (kind) {
case AR_OBJECT_NULL:
return m_context->VoidTy;
case AR_BASIC_BOOL:
return m_context->BoolTy;
case AR_BASIC_LITERAL_FLOAT:
return m_context->LitFloatTy;
case AR_BASIC_FLOAT16:
return m_context->HalfTy;
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
return m_context->HalfFloatTy;
case AR_BASIC_FLOAT32:
return m_context->FloatTy;
case AR_BASIC_FLOAT64:
return m_context->DoubleTy;
case AR_BASIC_LITERAL_INT:
return m_context->LitIntTy;
case AR_BASIC_INT8:
return m_context->IntTy;
case AR_BASIC_UINT8:
return m_context->UnsignedIntTy;
case AR_BASIC_INT16:
return m_context->ShortTy;
case AR_BASIC_UINT16:
return m_context->UnsignedShortTy;
case AR_BASIC_INT32:
return m_context->IntTy;
case AR_BASIC_UINT32:
return m_context->UnsignedIntTy;
case AR_BASIC_INT64:
return m_context->LongLongTy;
case AR_BASIC_UINT64:
return m_context->UnsignedLongLongTy;
case AR_BASIC_MIN10FLOAT:
return m_scalarTypes[HLSLScalarType_float_min10];
case AR_BASIC_MIN16FLOAT:
return m_scalarTypes[HLSLScalarType_float_min16];
case AR_BASIC_MIN12INT:
return m_scalarTypes[HLSLScalarType_int_min12];
case AR_BASIC_MIN16INT:
return m_scalarTypes[HLSLScalarType_int_min16];
case AR_BASIC_MIN16UINT:
return m_scalarTypes[HLSLScalarType_uint_min16];
case AR_BASIC_INT8_4PACKED:
return m_scalarTypes[HLSLScalarType_int8_4packed];
case AR_BASIC_UINT8_4PACKED:
return m_scalarTypes[HLSLScalarType_uint8_4packed];
case AR_BASIC_ENUM:
return m_context->IntTy;
case AR_BASIC_ENUM_CLASS:
return m_context->IntTy;
case AR_OBJECT_STRING:
return m_hlslStringType;
case AR_OBJECT_STRING_LITERAL:
// m_hlslStringType is defined as 'char *'.
// for STRING_LITERAL we should use 'const char *'.
return m_context->getPointerType(m_context->CharTy.withConst());
case AR_OBJECT_LEGACY_EFFECT: // used for all legacy effect object types
case AR_OBJECT_TEXTURE1D:
case AR_OBJECT_TEXTURE1D_ARRAY:
case AR_OBJECT_TEXTURE2D:
case AR_OBJECT_TEXTURE2D_ARRAY:
case AR_OBJECT_TEXTURE3D:
case AR_OBJECT_TEXTURECUBE:
case AR_OBJECT_TEXTURECUBE_ARRAY:
case AR_OBJECT_TEXTURE2DMS:
case AR_OBJECT_TEXTURE2DMS_ARRAY:
case AR_OBJECT_SAMPLER:
case AR_OBJECT_SAMPLERCOMPARISON:
case AR_OBJECT_HEAP_RESOURCE:
case AR_OBJECT_HEAP_SAMPLER:
case AR_OBJECT_BUFFER:
case AR_OBJECT_POINTSTREAM:
case AR_OBJECT_LINESTREAM:
case AR_OBJECT_TRIANGLESTREAM:
case AR_OBJECT_INPUTPATCH:
case AR_OBJECT_OUTPUTPATCH:
case AR_OBJECT_RWTEXTURE1D:
case AR_OBJECT_RWTEXTURE1D_ARRAY:
case AR_OBJECT_RWTEXTURE2D:
case AR_OBJECT_RWTEXTURE2D_ARRAY:
case AR_OBJECT_RWTEXTURE3D:
case AR_OBJECT_RWBUFFER:
case AR_OBJECT_BYTEADDRESS_BUFFER:
case AR_OBJECT_RWBYTEADDRESS_BUFFER:
case AR_OBJECT_STRUCTURED_BUFFER:
case AR_OBJECT_RWSTRUCTURED_BUFFER:
case AR_OBJECT_APPEND_STRUCTURED_BUFFER:
case AR_OBJECT_CONSUME_STRUCTURED_BUFFER:
case AR_OBJECT_WAVE:
case AR_OBJECT_ACCELERATION_STRUCT:
case AR_OBJECT_RAY_DESC:
case AR_OBJECT_TRIANGLE_INTERSECTION_ATTRIBUTES:
case AR_OBJECT_RWTEXTURE2DMS:
case AR_OBJECT_RWTEXTURE2DMS_ARRAY:
case AR_OBJECT_EMPTY_NODE_INPUT:
case AR_OBJECT_DISPATCH_NODE_INPUT_RECORD:
case AR_OBJECT_RWDISPATCH_NODE_INPUT_RECORD:
case AR_OBJECT_GROUP_NODE_INPUT_RECORDS:
case AR_OBJECT_RWGROUP_NODE_INPUT_RECORDS:
case AR_OBJECT_THREAD_NODE_INPUT_RECORD:
case AR_OBJECT_RWTHREAD_NODE_INPUT_RECORD:
case AR_OBJECT_NODE_OUTPUT:
case AR_OBJECT_EMPTY_NODE_OUTPUT:
case AR_OBJECT_NODE_OUTPUT_ARRAY:
case AR_OBJECT_EMPTY_NODE_OUTPUT_ARRAY:
case AR_OBJECT_THREAD_NODE_OUTPUT_RECORDS:
case AR_OBJECT_GROUP_NODE_OUTPUT_RECORDS: {
const ArBasicKind *match = std::find(
g_ArBasicKindsAsTypes,
&g_ArBasicKindsAsTypes[_countof(g_ArBasicKindsAsTypes)], kind);
DXASSERT(match != &g_ArBasicKindsAsTypes[_countof(g_ArBasicKindsAsTypes)],
"otherwise can't find constant in basic kinds");
size_t index = match - g_ArBasicKindsAsTypes;
return m_context->getTagDeclType(this->m_objectTypeDecls[index]);
}
case AR_OBJECT_SAMPLER1D:
case AR_OBJECT_SAMPLER2D:
case AR_OBJECT_SAMPLER3D:
case AR_OBJECT_SAMPLERCUBE:
// Turn dimension-typed samplers into sampler states.
return GetBasicKindType(AR_OBJECT_SAMPLER);
case AR_OBJECT_STATEBLOCK:
case AR_OBJECT_RASTERIZER:
case AR_OBJECT_DEPTHSTENCIL:
case AR_OBJECT_BLEND:
case AR_OBJECT_RWSTRUCTURED_BUFFER_ALLOC:
case AR_OBJECT_RWSTRUCTURED_BUFFER_CONSUME:
default:
return QualType();
}
}
/// <summary>Promotes the specified expression to an integer type if it's a
/// boolean type.</summary <param name="E">Expression to typecast.</param>
/// <returns>E typecast to a integer type if it's a valid boolean type; E
/// otherwise.</returns>
ExprResult PromoteToIntIfBool(ExprResult &E);
QualType NewQualifiedType(UINT64 qwUsages, QualType type) {
// NOTE: NewQualifiedType does quite a bit more in the prior compiler
(void)(qwUsages);
return type;
}
QualType NewSimpleAggregateType(ArTypeObjectKind ExplicitKind,
ArBasicKind componentType, UINT64 qwQual,
UINT uRows, UINT uCols) {
DXASSERT_VALIDBASICKIND(componentType);
QualType pType; // The type to return.
if (componentType < AR_BASIC_COUNT) {
// If basic numeric, call LookupScalarTypeDef to ensure on-demand
// initialization
LookupScalarTypeDef(ScalarTypeForBasic(componentType));
}
QualType pEltType = GetBasicKindType(componentType);
DXASSERT(!pEltType.isNull(),
"otherwise caller is specifying an incorrect basic kind type");
// TODO: handle adding qualifications like const
pType = NewQualifiedType(
qwQual & ~(UINT64)(AR_QUAL_COLMAJOR | AR_QUAL_ROWMAJOR), pEltType);
if (uRows > 1 || uCols > 1 || ExplicitKind == AR_TOBJ_VECTOR ||
ExplicitKind == AR_TOBJ_MATRIX) {
HLSLScalarType scalarType = ScalarTypeForBasic(componentType);
DXASSERT(scalarType != HLSLScalarType_unknown,
"otherwise caller is specifying an incorrect type");
if ((uRows == 1 && ExplicitKind != AR_TOBJ_MATRIX) ||
ExplicitKind == AR_TOBJ_VECTOR) {
pType = LookupVectorType(scalarType, uCols);
} else {
pType = LookupMatrixType(scalarType, uRows, uCols);
}
// TODO: handle colmajor/rowmajor
// if ((qwQual & (AR_QUAL_COLMAJOR | AR_QUAL_ROWMAJOR)) != 0)
//{
// VN(pType = NewQualifiedType(pSrcLoc,
// qwQual & (AR_QUAL_COLMAJOR |
// AR_QUAL_ROWMAJOR),
// pMatrix));
//}
// else
//{
// pType = pMatrix;
//}
}
return pType;
}
/// <summary>Attempts to match Args to the signature specification in
/// pIntrinsic.</summary> <param name="cursor">Intrinsic function
/// iterator.</param> <param name="objectElement">Type element on the class
/// intrinsic belongs to; possibly null (eg, 'float' in
/// 'Texture2D<float>').</param> <param name="Args">Invocation arguments to
/// match.</param> <param name="argTypes">After exectuion, type of
/// arguments.</param> <param name="badArgIdx">The first argument to mismatch
/// if any</param> <remarks>On success, argTypes includes the clang Types to
/// use for the signature, with the first being the return type.</remarks>
bool MatchArguments(const IntrinsicDefIter &cursor, QualType objectType,
QualType objectElement, QualType functionTemplateTypeArg,
ArrayRef<Expr *> Args, std::vector<QualType> *,
size_t &badArgIdx);
/// <summary>Validate object element on intrinsic to catch case like integer
/// on Sample.</summary> <param name="tableName">Intrinsic function to
/// validate.</param> <param name="op">Intrinsic opcode to validate.</param>
/// <param name="objectElement">Type element on the class intrinsic belongs
/// to; possibly null (eg, 'float' in 'Texture2D<float>').</param>
bool IsValidObjectElement(LPCSTR tableName, IntrinsicOp op,
QualType objectElement);
// Returns the iterator with the first entry that matches the requirement
IntrinsicDefIter FindIntrinsicByNameAndArgCount(const HLSL_INTRINSIC *table,
size_t tableSize,
StringRef typeName,
StringRef nameIdentifier,
size_t argumentCount) {
// This is implemented by a linear scan for now.
// We tested binary search on tables, and there was no performance gain on
// samples probably for the following reasons.
// 1. The tables are not big enough to make noticable difference
// 2. The user of this function assumes that it returns the first entry in
// the table that matches name and argument count. So even in the binary
// search, we have to scan backwards until the entry does not match the name
// or arg count. For linear search this is not a problem
for (unsigned int i = 0; i < tableSize; i++) {
const HLSL_INTRINSIC *pIntrinsic = &table[i];
const bool isVariadicFn = IsVariadicIntrinsicFunction(pIntrinsic);
// Do some quick checks to verify size and name.
if (!isVariadicFn && pIntrinsic->uNumArgs != 1 + argumentCount) {
continue;
}
if (!nameIdentifier.equals(StringRef(pIntrinsic->pArgs[0].pName))) {
continue;
}
return IntrinsicDefIter::CreateStart(
table, tableSize, pIntrinsic,
IntrinsicTableDefIter::CreateStart(m_intrinsicTables, typeName,
nameIdentifier, argumentCount));
}
return IntrinsicDefIter::CreateStart(
table, tableSize, table + tableSize,
IntrinsicTableDefIter::CreateStart(m_intrinsicTables, typeName,
nameIdentifier, argumentCount));
}
bool AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading) override {
DXASSERT_NOMSG(ULE != nullptr);
const bool isQualified = ULE->getQualifier();
const bool isGlobalNamespace =
ULE->getQualifier() &&
ULE->getQualifier()->getKind() == NestedNameSpecifier::Global;
const bool isVkNamespace =
ULE->getQualifier() &&
ULE->getQualifier()->getKind() == NestedNameSpecifier::Namespace &&
ULE->getQualifier()->getAsNamespace()->getName() == "vk";
// Intrinsics live in the global namespace, so references to their names
// should be either unqualified or '::'-prefixed.
// Exception: Vulkan-specific intrinsics live in the 'vk::' namespace.
if (isQualified && !isGlobalNamespace && !isVkNamespace) {
return false;
}
const DeclarationNameInfo declName = ULE->getNameInfo();
IdentifierInfo *idInfo = declName.getName().getAsIdentifierInfo();
if (idInfo == nullptr) {
return false;
}
StringRef nameIdentifier = idInfo->getName();
const HLSL_INTRINSIC *table = g_Intrinsics;
auto tableCount = _countof(g_Intrinsics);
#ifdef ENABLE_SPIRV_CODEGEN
if (isVkNamespace) {
table = g_VkIntrinsics;
tableCount = _countof(g_VkIntrinsics);
}
#endif // ENABLE_SPIRV_CODEGEN
IntrinsicDefIter cursor = FindIntrinsicByNameAndArgCount(
table, tableCount, StringRef(), nameIdentifier, Args.size());
IntrinsicDefIter end = IntrinsicDefIter::CreateEnd(
table, tableCount, IntrinsicTableDefIter::CreateEnd(m_intrinsicTables));
for (; cursor != end; ++cursor) {
// If this is the intrinsic we're interested in, build up a representation
// of the types we need.
const HLSL_INTRINSIC *pIntrinsic = *cursor;
LPCSTR tableName = cursor.GetTableName();
LPCSTR lowering = cursor.GetLoweringStrategy();
DXASSERT(pIntrinsic->uNumArgs <= g_MaxIntrinsicParamCount + 1,
"otherwise g_MaxIntrinsicParamCount needs to be updated for "
"wider signatures");
std::vector<QualType> functionArgTypes;
size_t badArgIdx;
bool argsMatch =
MatchArguments(cursor, QualType(), QualType(), QualType(), Args,
&functionArgTypes, badArgIdx);
if (!functionArgTypes.size())
return false;
// Get or create the overload we're interested in.
FunctionDecl *intrinsicFuncDecl = nullptr;
std::pair<UsedIntrinsicStore::iterator, bool> insertResult =
m_usedIntrinsics.insert(UsedIntrinsic(pIntrinsic, functionArgTypes));
bool insertedNewValue = insertResult.second;
if (insertedNewValue) {
DXASSERT(tableName,
"otherwise IDxcIntrinsicTable::GetTableName() failed");
intrinsicFuncDecl = AddHLSLIntrinsicFunction(
*m_context, isVkNamespace ? m_vkNSDecl : m_hlslNSDecl, tableName,
lowering, pIntrinsic, &functionArgTypes);
insertResult.first->setFunctionDecl(intrinsicFuncDecl);
} else {
intrinsicFuncDecl = (*insertResult.first).getFunctionDecl();
}
OverloadCandidate &candidate = CandidateSet.addCandidate(Args.size());
candidate.Function = intrinsicFuncDecl;
candidate.FoundDecl.setDecl(intrinsicFuncDecl);
candidate.Viable = argsMatch;
CandidateSet.isNewCandidate(intrinsicFuncDecl); // used to insert into set
if (argsMatch)
return true;
if (badArgIdx) {
candidate.FailureKind = ovl_fail_bad_conversion;
QualType ParamType = functionArgTypes[badArgIdx];
candidate.Conversions[badArgIdx - 1].setBad(
BadConversionSequence::no_conversion, Args[badArgIdx - 1],
ParamType);
} else {
// A less informative error. Needed when the failure relates to the
// return type
candidate.FailureKind = ovl_fail_bad_final_conversion;
}
}
return false;
}
bool Initialize(ASTContext &context) {
m_context = &context;
m_hlslNSDecl =
NamespaceDecl::Create(context, context.getTranslationUnitDecl(),
/*Inline*/ false, SourceLocation(),
SourceLocation(), &context.Idents.get("hlsl"),
/*PrevDecl*/ nullptr);
m_hlslNSDecl->setImplicit();
AddBaseTypes();
AddHLSLScalarTypes();
AddHLSLStringType();
AddHLSLVectorTemplate(*m_context, &m_vectorTemplateDecl);
DXASSERT(
m_vectorTemplateDecl != nullptr,
"AddHLSLVectorTypes failed to return the vector template declaration");
AddHLSLMatrixTemplate(*m_context, m_vectorTemplateDecl,
&m_matrixTemplateDecl);
DXASSERT(
m_matrixTemplateDecl != nullptr,
"AddHLSLMatrixTypes failed to return the matrix template declaration");
// Initializing built in integers for ray tracing
AddRaytracingConstants(*m_context);
AddSamplerFeedbackConstants(*m_context);
AddBarrierConstants(*m_context);
AddHLSLNodeOutputRecordTemplate(*m_context,
DXIL::NodeIOKind::GroupNodeOutputRecords,
&m_GroupNodeOutputRecordsTemplateDecl,
/* isCompleteType */ false);
AddHLSLNodeOutputRecordTemplate(*m_context,
DXIL::NodeIOKind::ThreadNodeOutputRecords,
&m_ThreadNodeOutputRecordsTemplateDecl,
/* isCompleteType */ false);
return true;
}
/// <summary>Checks whether the specified type is numeric or composed of
/// numeric elements exclusively.</summary>
bool IsTypeNumeric(QualType type, UINT *count);
/// <summary>Checks whether the specified type is a scalar type.</summary>
bool IsScalarType(const QualType &type) {
DXASSERT(!type.isNull(), "caller should validate its type is initialized");
return BasicTypeForScalarType(type->getCanonicalTypeUnqualified()) !=
AR_BASIC_UNKNOWN;
}
/// <summary>Checks whether the specified value is a valid vector
/// size.</summary>
bool IsValidVectorSize(size_t length) { return 1 <= length && length <= 4; }
/// <summary>Checks whether the specified value is a valid matrix row or
/// column size.</summary>
bool IsValidMatrixColOrRowSize(size_t length) {
return 1 <= length && length <= 4;
}
bool IsValidTemplateArgumentType(SourceLocation argLoc, const QualType &type,
bool requireScalar) {
if (type.isNull()) {
return false;
}
if (type.hasQualifiers()) {
return false;
}
QualType qt = GetStructuralForm(type);
if (requireScalar) {
if (!IsScalarType(qt)) {
m_sema->Diag(argLoc,
diag::err_hlsl_typeintemplateargument_requires_scalar)
<< type;
return false;
}
return true;
} else {
ArTypeObjectKind objectKind = GetTypeObjectKind(qt);
if (qt->isArrayType()) {
const ArrayType *arrayType = qt->getAsArrayTypeUnsafe();
return IsValidTemplateArgumentType(argLoc, arrayType->getElementType(),
false);
} else if (objectKind == AR_TOBJ_VECTOR) {
bool valid = true;
if (!IsValidVectorSize(GetHLSLVecSize(type))) {
valid = false;
m_sema->Diag(argLoc, diag::err_hlsl_unsupportedvectorsize)
<< type << GetHLSLVecSize(type);
}
if (!IsScalarType(GetMatrixOrVectorElementType(type))) {
valid = false;
m_sema->Diag(argLoc, diag::err_hlsl_unsupportedvectortype)
<< type << GetMatrixOrVectorElementType(type);
}
return valid;
} else if (objectKind == AR_TOBJ_MATRIX) {
bool valid = true;
UINT rowCount, colCount;
GetRowsAndCols(type, rowCount, colCount);
if (!IsValidMatrixColOrRowSize(rowCount) ||
!IsValidMatrixColOrRowSize(colCount)) {
valid = false;
m_sema->Diag(argLoc, diag::err_hlsl_unsupportedmatrixsize)
<< type << rowCount << colCount;
}
if (!IsScalarType(GetMatrixOrVectorElementType(type))) {
valid = false;
m_sema->Diag(argLoc, diag::err_hlsl_unsupportedvectortype)
<< type << GetMatrixOrVectorElementType(type);
}
return valid;
} else if (qt->isStructureOrClassType()) {
const RecordType *recordType = qt->getAs<RecordType>();
objectKind = ClassifyRecordType(recordType);
switch (objectKind) {
case AR_TOBJ_OBJECT:
m_sema->Diag(argLoc, diag::err_hlsl_objectintemplateargument) << type;
return false;
case AR_TOBJ_COMPOUND: {
const RecordDecl *recordDecl = recordType->getDecl();
if (recordDecl->isInvalidDecl())
return false;
RecordDecl::field_iterator begin = recordDecl->field_begin();
RecordDecl::field_iterator end = recordDecl->field_end();
bool result = true;
while (begin != end) {
const FieldDecl *fieldDecl = *begin;
if (!IsValidTemplateArgumentType(argLoc, fieldDecl->getType(),
false)) {
m_sema->Diag(argLoc, diag::note_field_type_usage)
<< fieldDecl->getType() << fieldDecl->getIdentifier() << type;
result = false;
}
begin++;
}
return result;
}
default:
m_sema->Diag(argLoc, diag::err_hlsl_typeintemplateargument) << type;
return false;
}
} else if (IsScalarType(qt)) {
return true;
} else {
m_sema->Diag(argLoc, diag::err_hlsl_typeintemplateargument) << type;
return false;
}
}
}
/// <summary>Checks whether the source type can be converted to the target
/// type.</summary>
bool CanConvert(SourceLocation loc, Expr *sourceExpr, QualType target,
bool explicitConversion, TYPE_CONVERSION_REMARKS *remarks,
StandardConversionSequence *sequence);
void CollectInfo(QualType type, ArTypeInfo *pTypeInfo);
void GetConversionForm(QualType type, bool explicitConversion,
ArTypeInfo *pTypeInfo);
bool ValidateCast(SourceLocation Loc, Expr *source, QualType target,
bool explicitConversion, bool suppressWarnings,
bool suppressErrors, StandardConversionSequence *sequence);
bool ValidatePrimitiveTypeForOperand(SourceLocation loc, QualType type,
ArTypeObjectKind kind);
bool ValidateTypeRequirements(SourceLocation loc, ArBasicKind elementKind,
ArTypeObjectKind objectKind,
bool requiresIntegrals, bool requiresNumerics);
/// <summary>Validates and adjusts operands for the specified binary
/// operator.</summary> <param name="OpLoc">Source location for
/// operator.</param> <param name="Opc">Kind of binary operator.</param>
/// <param name="LHS">Left-hand-side expression, possibly updated by this
/// function.</param> <param name="RHS">Right-hand-side expression, possibly
/// updated by this function.</param> <param name="ResultTy">Result type for
/// operator expression.</param> <param name="CompLHSTy">Type of LHS after
/// promotions for computation.</param> <param name="CompResultTy">Type of
/// computation result.</param>
void CheckBinOpForHLSL(SourceLocation OpLoc, BinaryOperatorKind Opc,
ExprResult &LHS, ExprResult &RHS, QualType &ResultTy,
QualType &CompLHSTy, QualType &CompResultTy);
/// <summary>Validates and adjusts operands for the specified unary
/// operator.</summary> <param name="OpLoc">Source location for
/// operator.</param> <param name="Opc">Kind of operator.</param> <param
/// name="InputExpr">Input expression to the operator.</param> <param
/// name="VK">Value kind for resulting expression.</param> <param
/// name="OK">Object kind for resulting expression.</param> <returns>The
/// result type for the expression.</returns>
QualType CheckUnaryOpForHLSL(SourceLocation OpLoc, UnaryOperatorKind Opc,
ExprResult &InputExpr, ExprValueKind &VK,
ExprObjectKind &OK);
/// <summary>Checks vector conditional operator (Cond ? LHS : RHS).</summary>
/// <param name="Cond">Vector condition expression.</param>
/// <param name="LHS">Left hand side.</param>
/// <param name="RHS">Right hand side.</param>
/// <param name="QuestionLoc">Location of question mark in operator.</param>
/// <returns>Result type of vector conditional expression.</returns>
clang::QualType CheckVectorConditional(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
clang::QualType ApplyTypeSpecSignToParsedType(clang::QualType &type,
TypeSpecifierSign TSS,
SourceLocation Loc);
bool CheckRangedTemplateArgument(SourceLocation diagLoc,
llvm::APSInt &sintValue) {
if (!sintValue.isStrictlyPositive() || sintValue.getLimitedValue() > 4) {
m_sema->Diag(diagLoc, diag::err_hlsl_invalid_range_1_4);
return true;
}
return false;
}
/// <summary>Performs HLSL-specific processing of template
/// declarations.</summary>
bool
CheckTemplateArgumentListForHLSL(TemplateDecl *Template,
SourceLocation /* TemplateLoc */,
TemplateArgumentListInfo &TemplateArgList) {
DXASSERT_NOMSG(Template != nullptr);
// Determine which object type the template refers to.
StringRef templateName = Template->getName();
// NOTE: this 'escape valve' allows unit tests to perform type checks.
if (templateName.equals(StringRef("is_same"))) {
return false;
}
// Allow object type for Constant/TextureBuffer.
if (templateName == "ConstantBuffer" || templateName == "TextureBuffer") {
if (TemplateArgList.size() == 1) {
const TemplateArgumentLoc &argLoc = TemplateArgList[0];
const TemplateArgument &arg = argLoc.getArgument();
DXASSERT(arg.getKind() == TemplateArgument::ArgKind::Type, "");
QualType argType = arg.getAsType();
SourceLocation argSrcLoc = argLoc.getLocation();
if (IsScalarType(argType) || IsVectorType(m_sema, argType) ||
IsMatrixType(m_sema, argType) || argType->isArrayType()) {
m_sema->Diag(argSrcLoc,
diag::err_hlsl_typeintemplateargument_requires_struct)
<< argType;
return true;
}
if (auto *TST = dyn_cast<TemplateSpecializationType>(argType)) {
// This is a bit of a special case we need to handle. Because the
// buffer types don't use their template parameter in a way that would
// force instantiation, we need to force specialization here.
GetOrCreateTemplateSpecialization(
*m_context, *m_sema,
cast<ClassTemplateDecl>(
TST->getTemplateName().getAsTemplateDecl()),
llvm::ArrayRef<TemplateArgument>(TST->getArgs(),
TST->getNumArgs()));
}
if (const RecordType *recordType = argType->getAs<RecordType>()) {
if (!recordType->getDecl()->isCompleteDefinition()) {
m_sema->Diag(argSrcLoc, diag::err_typecheck_decl_incomplete_type)
<< argType;
return true;
}
}
}
return false;
} else if (Template->getTemplatedDecl()->hasAttr<HLSLNodeObjectAttr>()) {
DXASSERT(TemplateArgList.size() == 1,
"otherwise the template has not been declared properly");
// The first argument must be a user defined struct type that does not
// contain any HLSL object
const TemplateArgumentLoc &ArgLoc = TemplateArgList[0];
const TemplateArgument &Arg = ArgLoc.getArgument();
// To get here the arg must have been accepted as a type acceptable to
// HLSL, but that includes HLSL templates without args which we want to
// disallow here.
if (Arg.getKind() == TemplateArgument::ArgKind::Template) {
TemplateDecl *TD = Arg.getAsTemplate().getAsTemplateDecl();
SourceLocation ArgSrcLoc = ArgLoc.getLocation();
m_sema->Diag(ArgSrcLoc, diag::err_hlsl_node_record_type)
<< TD->getName();
return true;
}
QualType ArgTy = Arg.getAsType();
// Ignore dependent types. Dependent argument types get expanded during
// template instantiation.
if (ArgTy->isDependentType())
return false;
if (auto *recordType = ArgTy->getAs<RecordType>()) {
if (CXXRecordDecl *cxxRecordDecl =
dyn_cast<CXXRecordDecl>(recordType->getDecl())) {
if (ClassTemplateSpecializationDecl *templateSpecializationDecl =
dyn_cast<ClassTemplateSpecializationDecl>(cxxRecordDecl)) {
if (templateSpecializationDecl->getSpecializationKind() ==
TSK_Undeclared) {
// Make sure specialization is done before IsTypeNumeric.
// If not, ArgTy might be treat as empty struct.
m_sema->RequireCompleteType(
ArgLoc.getLocation(), ArgTy,
diag::err_typecheck_decl_incomplete_type);
}
}
}
}
// The node record type must be compound - error if it is not.
if (GetTypeObjectKind(ArgTy) != AR_TOBJ_COMPOUND) {
m_sema->Diag(ArgLoc.getLocation(), diag::err_hlsl_node_record_type)
<< ArgTy << ArgLoc.getSourceRange();
return true;
}
bool EmptyStruct = true;
if (DiagnoseNodeStructArgument(m_sema, ArgLoc, ArgTy, EmptyStruct))
return true;
// a node input/output record can't be empty - EmptyStruct is false if
// any fields were found by DiagnoseNodeStructArgument()
if (EmptyStruct) {
m_sema->Diag(ArgLoc.getLocation(), diag::err_hlsl_zero_sized_record)
<< templateName << ArgLoc.getSourceRange();
const RecordDecl *RD = ArgTy->getAs<RecordType>()->getDecl();
m_sema->Diag(RD->getLocation(), diag::note_defined_here)
<< "zero sized record";
return true;
}
return false;
}
bool isMatrix = Template->getCanonicalDecl() ==
m_matrixTemplateDecl->getCanonicalDecl();
bool isVector = Template->getCanonicalDecl() ==
m_vectorTemplateDecl->getCanonicalDecl();
bool requireScalar = isMatrix || isVector;
// Check constraints on the type.
for (unsigned int i = 0; i < TemplateArgList.size(); i++) {
const TemplateArgumentLoc &argLoc = TemplateArgList[i];
SourceLocation argSrcLoc = argLoc.getLocation();
const TemplateArgument &arg = argLoc.getArgument();
if (arg.getKind() == TemplateArgument::ArgKind::Type) {
QualType argType = arg.getAsType();
// Skip dependent types. Types will be checked later, when concrete.
if (!argType->isDependentType()) {
if (!IsValidTemplateArgumentType(argSrcLoc, argType, requireScalar)) {
// NOTE: IsValidTemplateArgumentType emits its own diagnostics
return true;
}
}
} else if (arg.getKind() == TemplateArgument::ArgKind::Expression) {
if (isMatrix || isVector) {
Expr *expr = arg.getAsExpr();
llvm::APSInt constantResult;
if (expr != nullptr &&
expr->isIntegerConstantExpr(constantResult, *m_context)) {
if (CheckRangedTemplateArgument(argSrcLoc, constantResult)) {
return true;
}
}
}
} else if (arg.getKind() == TemplateArgument::ArgKind::Integral) {
if (isMatrix || isVector) {
llvm::APSInt Val = arg.getAsIntegral();
if (CheckRangedTemplateArgument(argSrcLoc, Val)) {
return true;
}
}
}
}
return false;
}
FindStructBasicTypeResult
FindStructBasicType(DeclContext *functionDeclContext);
/// <summary>Finds the table of intrinsics for the declaration context of a
/// member function.</summary> <param name="functionDeclContext">Declaration
/// context of function.</param> <param name="name">After execution, the name
/// of the object to which the table applies.</param> <param
/// name="intrinsics">After execution, the intrinsic table.</param> <param
/// name="intrinsicCount">After execution, the count of elements in the
/// intrinsic table.</param>
void FindIntrinsicTable(DeclContext *functionDeclContext, const char **name,
const HLSL_INTRINSIC **intrinsics,
size_t *intrinsicCount);
/// <summary>Deduces the template arguments by comparing the argument types
/// and the HLSL intrinsic tables.</summary> <param
/// name="FunctionTemplate">The declaration for the function template being
/// deduced.</param> <param name="ExplicitTemplateArgs">Explicitly-provided
/// template arguments. Should be empty for an HLSL program.</param> <param
/// name="Args">Array of expressions being used as arguments.</param> <param
/// name="Specialization">The declaration for the resolved
/// specialization.</param> <param name="Info">Provides information about an
/// attempted template argument deduction.</param> <returns>The result of the
/// template deduction, TDK_Invalid if no HLSL-specific processing
/// done.</returns>
Sema::TemplateDeductionResult DeduceTemplateArgumentsForHLSL(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, TemplateDeductionInfo &Info);
clang::OverloadingResult
GetBestViableFunction(clang::SourceLocation Loc,
clang::OverloadCandidateSet &set,
clang::OverloadCandidateSet::iterator &Best);
/// <summary>
/// Initializes the specified <paramref name="initSequence" /> describing how
/// <paramref name="Entity" /> is initialized with <paramref name="Args" />.
/// </summary>
/// <param name="Entity">Entity being initialized; a variable, return result,
/// etc.</param> <param name="Kind">Kind of initialization: copying,
/// list-initializing, constructing, etc.</param> <param name="Args">Arguments
/// to the initialization.</param> <param name="TopLevelOfInitList">Whether
/// this is the top-level of an initialization list.</param> <param
/// name="initSequence">Initialization sequence description to
/// initialize.</param>
void InitializeInitSequenceForHLSL(const InitializedEntity &Entity,
const InitializationKind &Kind,
MultiExprArg Args, bool TopLevelOfInitList,
InitializationSequence *initSequence);
/// <summary>
/// Checks whether the specified conversion occurs to a type of idential
/// element type but less elements.
/// </summary>
/// <remarks>This is an important case because a cast of this type does not
/// turn an lvalue into an rvalue.</remarks>
bool IsConversionToLessOrEqualElements(const ExprResult &sourceExpr,
const QualType &targetType,
bool explicitConversion);
/// <summary>
/// Checks whether the specified conversion occurs to a type of idential
/// element type but less elements.
/// </summary>
/// <remarks>This is an important case because a cast of this type does not
/// turn an lvalue into an rvalue.</remarks>
bool IsConversionToLessOrEqualElements(const QualType &sourceType,
const QualType &targetType,
bool explicitConversion);
/// <summary>Performs a member lookup on the specified BaseExpr if it's a
/// matrix.</summary> <param name="BaseExpr">Base expression for member
/// access.</param> <param name="MemberName">Name of member to look
/// up.</param> <param name="IsArrow">Whether access is through arrow (a->b)
/// rather than period (a.b).</param> <param name="OpLoc">Location of access
/// operand.</param> <param name="MemberLoc">Location of member.</param>
/// <returns>Result of lookup operation.</returns>
ExprResult LookupMatrixMemberExprForHLSL(Expr &BaseExpr,
DeclarationName MemberName,
bool IsArrow, SourceLocation OpLoc,
SourceLocation MemberLoc);
/// <summary>Performs a member lookup on the specified BaseExpr if it's a
/// vector.</summary> <param name="BaseExpr">Base expression for member
/// access.</param> <param name="MemberName">Name of member to look
/// up.</param> <param name="IsArrow">Whether access is through arrow (a->b)
/// rather than period (a.b).</param> <param name="OpLoc">Location of access
/// operand.</param> <param name="MemberLoc">Location of member.</param>
/// <returns>Result of lookup operation.</returns>
ExprResult LookupVectorMemberExprForHLSL(Expr &BaseExpr,
DeclarationName MemberName,
bool IsArrow, SourceLocation OpLoc,
SourceLocation MemberLoc);
/// <summary>Performs a member lookup on the specified BaseExpr if it's an
/// array.</summary> <param name="BaseExpr">Base expression for member
/// access.</param> <param name="MemberName">Name of member to look
/// up.</param> <param name="IsArrow">Whether access is through arrow (a->b)
/// rather than period (a.b).</param> <param name="OpLoc">Location of access
/// operand.</param> <param name="MemberLoc">Location of member.</param>
/// <returns>Result of lookup operation.</returns>
ExprResult LookupArrayMemberExprForHLSL(Expr &BaseExpr,
DeclarationName MemberName,
bool IsArrow, SourceLocation OpLoc,
SourceLocation MemberLoc);
/// <summary>If E is a scalar, converts it to a 1-element vector. If E is a
/// Constant/TextureBuffer<T>, converts it to const T.</summary>
/// <param name="E">Expression to convert.</param>
/// <returns>The result of the conversion; or E if the type is not a
/// scalar.</returns>
ExprResult MaybeConvertMemberAccess(clang::Expr *E);
clang::Expr *HLSLImpCastToScalar(clang::Sema *self, clang::Expr *From,
ArTypeObjectKind FromShape,
ArBasicKind EltKind);
clang::ExprResult
PerformHLSLConversion(clang::Expr *From, clang::QualType targetType,
const clang::StandardConversionSequence &SCS,
clang::Sema::CheckedConversionKind CCK);
/// <summary>Diagnoses an error when precessing the specified type if nesting
/// is too deep.</summary>
void ReportUnsupportedTypeNesting(SourceLocation loc, QualType type);
/// <summary>
/// Checks if a static cast can be performed, and performs it if possible.
/// </summary>
/// <param name="SrcExpr">Expression to cast.</param>
/// <param name="DestType">Type to cast SrcExpr to.</param>
/// <param name="CCK">Kind of conversion: implicit, C-style, functional,
/// other.</param> <param name="OpRange">Source range for the cast
/// operation.</param> <param name="msg">Error message from the diag::*
/// enumeration to fail with; zero to suppress messages.</param> <param
/// name="Kind">The kind of operation required for a conversion.</param>
/// <param name="BasePath">A simple array of base specifiers.</param>
/// <param name="ListInitialization">Whether the cast is in the context of a
/// list initialization.</param> <param name="SuppressWarnings">Whether
/// warnings should be omitted.</param> <param name="SuppressErrors">Whether
/// errors should be omitted.</param>
bool TryStaticCastForHLSL(ExprResult &SrcExpr, QualType DestType,
Sema::CheckedConversionKind CCK,
const SourceRange &OpRange, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath,
bool ListInitialization, bool SuppressWarnings,
bool SuppressErrors,
StandardConversionSequence *standard);
/// <summary>
/// Checks if a subscript index argument can be initialized from the given
/// expression.
/// </summary>
/// <param name="SrcExpr">Source expression used as argument.</param>
/// <param name="DestType">Parameter type to initialize.</param>
/// <remarks>
/// Rules for subscript index initialization follow regular implicit casting
/// rules, with the exception that no changes in arity are allowed (i.e., int2
/// can become uint2, but uint or uint3 cannot).
/// </remarks>
ImplicitConversionSequence
TrySubscriptIndexInitialization(clang::Expr *SrcExpr,
clang::QualType DestType);
void CompleteType(TagDecl *Tag) override {
if (Tag->isCompleteDefinition() || !isa<CXXRecordDecl>(Tag))
return;
CXXRecordDecl *recordDecl = cast<CXXRecordDecl>(Tag);
if (auto TDecl = dyn_cast<ClassTemplateSpecializationDecl>(recordDecl)) {
recordDecl = TDecl->getSpecializedTemplate()->getTemplatedDecl();
if (recordDecl->isCompleteDefinition())
return;
}
int idx = FindObjectBasicKindIndex(recordDecl);
// Not object type.
if (idx == -1)
return;
ArBasicKind kind = g_ArBasicKindsAsTypes[idx];
uint8_t templateArgCount = g_ArBasicKindsTemplateCount[idx];
int startDepth = 0;
if (templateArgCount > 0) {
DXASSERT(templateArgCount <= 3, "otherwise a new case has been added");
ClassTemplateDecl *typeDecl = recordDecl->getDescribedClassTemplate();
AddObjectSubscripts(kind, typeDecl, recordDecl,
g_ArBasicKindsSubscripts[idx]);
startDepth = 1;
}
AddObjectMethods(kind, recordDecl, startDepth);
recordDecl->completeDefinition();
}
FunctionDecl *AddHLSLIntrinsicMethod(LPCSTR tableName, LPCSTR lowering,
const HLSL_INTRINSIC *intrinsic,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<Expr *> Args,
QualType *parameterTypes,
size_t parameterTypeCount) {
DXASSERT_NOMSG(intrinsic != nullptr);
DXASSERT_NOMSG(FunctionTemplate != nullptr);
DXASSERT_NOMSG(parameterTypes != nullptr);
DXASSERT(parameterTypeCount >= 1,
"otherwise caller didn't initialize - there should be at least a "
"void return type");
// Create the template arguments.
SmallVector<TemplateArgument, g_MaxIntrinsicParamCount + 1> templateArgs;
for (size_t i = 0; i < parameterTypeCount; i++) {
templateArgs.push_back(TemplateArgument(parameterTypes[i]));
}
// Look for an existing specialization.
void *InsertPos = nullptr;
FunctionDecl *SpecFunc =
FunctionTemplate->findSpecialization(templateArgs, InsertPos);
if (SpecFunc != nullptr) {
return SpecFunc;
}
// Change return type to lvalue reference type for aggregate types
QualType retTy = parameterTypes[0];
if (hlsl::IsHLSLAggregateType(retTy))
parameterTypes[0] = m_context->getLValueReferenceType(retTy);
// Create a new specialization.
SmallVector<hlsl::ParameterModifier, g_MaxIntrinsicParamCount> paramMods;
InitParamMods(intrinsic, paramMods);
for (unsigned int i = 1; i < parameterTypeCount; i++) {
// Change out/inout parameter type to rvalue reference type.
if (paramMods[i - 1].isAnyOut()) {
parameterTypes[i] =
m_context->getLValueReferenceType(parameterTypes[i]);
}
}
IntrinsicOp intrinOp = static_cast<IntrinsicOp>(intrinsic->Op);
if (IsBuiltinTable(tableName) && intrinOp == IntrinsicOp::MOP_SampleBias) {
// Remove this when update intrinsic table not affect other things.
// Change vector<float,1> into float for bias.
const unsigned biasOperandID = 3; // return type, sampler, coord, bias.
DXASSERT(parameterTypeCount > biasOperandID,
"else operation was misrecognized");
if (const ExtVectorType *VecTy =
hlsl::ConvertHLSLVecMatTypeToExtVectorType(
*m_context, parameterTypes[biasOperandID])) {
if (VecTy->getNumElements() == 1)
parameterTypes[biasOperandID] = VecTy->getElementType();
}
}
DeclContext *owner = FunctionTemplate->getDeclContext();
TemplateArgumentList templateArgumentList(
TemplateArgumentList::OnStackType::OnStack, templateArgs.data(),
templateArgs.size());
MultiLevelTemplateArgumentList mlTemplateArgumentList(templateArgumentList);
TemplateDeclInstantiator declInstantiator(*this->m_sema, owner,
mlTemplateArgumentList);
FunctionProtoType::ExtProtoInfo EmptyEPI;
QualType functionType = m_context->getFunctionType(
parameterTypes[0],
ArrayRef<QualType>(parameterTypes + 1, parameterTypeCount - 1),
EmptyEPI, paramMods);
TypeSourceInfo *TInfo = m_context->CreateTypeSourceInfo(functionType, 0);
FunctionProtoTypeLoc Proto =
TInfo->getTypeLoc().getAs<FunctionProtoTypeLoc>();
SmallVector<ParmVarDecl *, g_MaxIntrinsicParamCount> Params;
for (unsigned int i = 1; i < parameterTypeCount; i++) {
IdentifierInfo *id =
&m_context->Idents.get(StringRef(intrinsic->pArgs[i - 1].pName));
ParmVarDecl *paramDecl = ParmVarDecl::Create(
*m_context, nullptr, NoLoc, NoLoc, id, parameterTypes[i], nullptr,
StorageClass::SC_None, nullptr, paramMods[i - 1]);
Params.push_back(paramDecl);
}
QualType T = TInfo->getType();
DeclarationNameInfo NameInfo(FunctionTemplate->getDeclName(), NoLoc);
CXXMethodDecl *method = CXXMethodDecl::Create(
*m_context, dyn_cast<CXXRecordDecl>(owner), NoLoc, NameInfo, T, TInfo,
SC_Extern, InlineSpecifiedFalse, IsConstexprFalse, NoLoc);
// Add intrinsic attr
AddHLSLIntrinsicAttr(method, *m_context, tableName, lowering, intrinsic);
// Record this function template specialization.
TemplateArgumentList *argListCopy = TemplateArgumentList::CreateCopy(
*m_context, templateArgs.data(), templateArgs.size());
method->setFunctionTemplateSpecialization(FunctionTemplate, argListCopy, 0);
// Attach the parameters
for (unsigned P = 0; P < Params.size(); ++P) {
Params[P]->setOwningFunction(method);
Proto.setParam(P, Params[P]);
}
method->setParams(Params);
// Adjust access.
method->setAccess(AccessSpecifier::AS_public);
FunctionTemplate->setAccess(method->getAccess());
return method;
}
// Overload support.
UINT64 ScoreCast(QualType leftType, QualType rightType);
UINT64 ScoreFunction(OverloadCandidateSet::iterator &Cand);
UINT64 ScoreImplicitConversionSequence(const ImplicitConversionSequence *s);
unsigned GetNumElements(QualType anyType);
unsigned GetNumBasicElements(QualType anyType);
unsigned GetNumConvertCheckElts(QualType leftType, unsigned leftSize,
QualType rightType, unsigned rightSize);
QualType GetNthElementType(QualType type, unsigned index);
bool IsPromotion(ArBasicKind leftKind, ArBasicKind rightKind);
bool IsCast(ArBasicKind leftKind, ArBasicKind rightKind);
bool IsIntCast(ArBasicKind leftKind, ArBasicKind rightKind);
};
TYPE_CONVERSION_REMARKS HLSLExternalSource::RemarksUnused =
TYPE_CONVERSION_REMARKS::TYPE_CONVERSION_NONE;
ImplicitConversionKind HLSLExternalSource::ImplicitConversionKindUnused =
ImplicitConversionKind::ICK_Identity;
// Use this class to flatten a type into HLSL primitives and iterate through
// them.
class FlattenedTypeIterator {
private:
enum FlattenedIterKind {
FK_Simple,
FK_Fields,
FK_Expressions,
FK_IncompleteArray,
FK_Bases,
};
// Use this struct to represent a specific point in the tracked tree.
struct FlattenedTypeTracker {
QualType Type; // Type at this position in the tree.
unsigned int Count; // Count of consecutive types
CXXRecordDecl::base_class_iterator
CurrentBase; // Current base for a structure type.
CXXRecordDecl::base_class_iterator EndBase; // STL-style end of bases.
RecordDecl::field_iterator
CurrentField; // Current field in for a structure type.
RecordDecl::field_iterator EndField; // STL-style end of fields.
MultiExprArg::iterator CurrentExpr; // Current expression (advanceable for a
// list of expressions).
MultiExprArg::iterator EndExpr; // STL-style end of expressions.
FlattenedIterKind IterKind; // Kind of tracker.
bool IsConsidered; // If a FlattenedTypeTracker already been considered.
FlattenedTypeTracker(QualType type)
: Type(type), Count(0), CurrentExpr(nullptr),
IterKind(FK_IncompleteArray), IsConsidered(false) {}
FlattenedTypeTracker(QualType type, unsigned int count,
MultiExprArg::iterator expression)
: Type(type), Count(count), CurrentExpr(expression),
IterKind(FK_Simple), IsConsidered(false) {}
FlattenedTypeTracker(QualType type, RecordDecl::field_iterator current,
RecordDecl::field_iterator end)
: Type(type), Count(0), CurrentField(current), EndField(end),
CurrentExpr(nullptr), IterKind(FK_Fields), IsConsidered(false) {}
FlattenedTypeTracker(MultiExprArg::iterator current,
MultiExprArg::iterator end)
: Count(0), CurrentExpr(current), EndExpr(end),
IterKind(FK_Expressions), IsConsidered(false) {}
FlattenedTypeTracker(QualType type,
CXXRecordDecl::base_class_iterator current,
CXXRecordDecl::base_class_iterator end)
: Count(0), CurrentBase(current), EndBase(end), CurrentExpr(nullptr),
IterKind(FK_Bases), IsConsidered(false) {}
/// <summary>Gets the current expression if one is available.</summary>
Expr *getExprOrNull() const { return CurrentExpr ? *CurrentExpr : nullptr; }
/// <summary>Replaces the current expression.</summary>
void replaceExpr(Expr *e) { *CurrentExpr = e; }
};
HLSLExternalSource &m_source; // Source driving the iteration.
SmallVector<FlattenedTypeTracker, 4>
m_typeTrackers; // Active stack of trackers.
bool m_draining; // Whether the iterator is meant to drain (will not generate
// new elements in incomplete arrays).
bool m_springLoaded; // Whether the current element has been set up by an
// incomplete array but hasn't been used yet.
unsigned int
m_incompleteCount; // The number of elements in an incomplete array.
size_t m_typeDepth; // Depth of type analysis, to avoid stack overflows.
QualType m_firstType; // Name of first type found, used for diagnostics.
SourceLocation m_loc; // Location used for diagnostics.
static const size_t MaxTypeDepth = 100;
void advanceLeafTracker();
/// <summary>Consumes leaves.</summary>
void consumeLeaf();
/// <summary>Considers whether the leaf has a usable expression without
/// consuming anything.</summary>
bool considerLeaf();
/// <summary>Pushes a tracker for the specified expression; returns true if
/// there is something to evaluate.</summary>
bool pushTrackerForExpression(MultiExprArg::iterator expression);
/// <summary>Pushes a tracker for the specified type; returns true if there is
/// something to evaluate.</summary>
bool pushTrackerForType(QualType type, MultiExprArg::iterator expression);
public:
/// <summary>Constructs a FlattenedTypeIterator for the specified
/// type.</summary>
FlattenedTypeIterator(SourceLocation loc, QualType type,
HLSLExternalSource &source);
/// <summary>Constructs a FlattenedTypeIterator for the specified
/// arguments.</summary>
FlattenedTypeIterator(SourceLocation loc, MultiExprArg args,
HLSLExternalSource &source);
/// <summary>Gets the current element in the flattened type
/// hierarchy.</summary>
QualType getCurrentElement() const;
/// <summary>Get the number of repeated current elements.</summary>
unsigned int getCurrentElementSize() const;
/// <summary>Gets the current element's Iterkind.</summary>
FlattenedIterKind getCurrentElementKind() const {
return m_typeTrackers.back().IterKind;
}
/// <summary>Checks whether the iterator has a current element type to
/// report.</summary>
bool hasCurrentElement() const;
/// <summary>Consumes count elements on this iterator.</summary>
void advanceCurrentElement(unsigned int count);
/// <summary>Counts the remaining elements in this iterator (consuming all
/// elements).</summary>
unsigned int countRemaining();
/// <summary>Gets the current expression if one is available.</summary>
Expr *getExprOrNull() const { return m_typeTrackers.back().getExprOrNull(); }
/// <summary>Replaces the current expression.</summary>
void replaceExpr(Expr *e) { m_typeTrackers.back().replaceExpr(e); }
struct ComparisonResult {
unsigned int LeftCount;
unsigned int RightCount;
/// <summary>Whether elements from right sequence are identical into left
/// sequence elements.</summary>
bool AreElementsEqual;
/// <summary>Whether elements from right sequence can be converted into left
/// sequence elements.</summary>
bool CanConvertElements;
/// <summary>Whether the elements can be converted and the sequences have
/// the same length.</summary>
bool IsConvertibleAndEqualLength() const {
return CanConvertElements && LeftCount == RightCount;
}
/// <summary>Whether the elements can be converted but the left-hand
/// sequence is longer.</summary>
bool IsConvertibleAndLeftLonger() const {
return CanConvertElements && LeftCount > RightCount;
}
bool IsRightLonger() const { return RightCount > LeftCount; }
bool IsEqualLength() const { return LeftCount == RightCount; }
};
static ComparisonResult CompareIterators(HLSLExternalSource &source,
SourceLocation loc,
FlattenedTypeIterator &leftIter,
FlattenedTypeIterator &rightIter);
static ComparisonResult CompareTypes(HLSLExternalSource &source,
SourceLocation leftLoc,
SourceLocation rightLoc, QualType left,
QualType right);
// Compares the arguments to initialize the left type, modifying them if
// necessary.
static ComparisonResult CompareTypesForInit(HLSLExternalSource &source,
QualType left, MultiExprArg args,
SourceLocation leftLoc,
SourceLocation rightLoc);
};
static QualType GetFirstElementTypeFromDecl(const Decl *decl) {
const ClassTemplateSpecializationDecl *specialization =
dyn_cast<ClassTemplateSpecializationDecl>(decl);
if (specialization) {
const TemplateArgumentList &list = specialization->getTemplateArgs();
if (list.size()) {
if (list[0].getKind() == TemplateArgument::ArgKind::Type)
return list[0].getAsType();
}
}
return QualType();
}
void HLSLExternalSource::AddBaseTypes() {
DXASSERT(m_baseTypes[HLSLScalarType_unknown].isNull(),
"otherwise unknown was initialized to an actual type");
m_baseTypes[HLSLScalarType_bool] = m_context->BoolTy;
m_baseTypes[HLSLScalarType_int] = m_context->IntTy;
m_baseTypes[HLSLScalarType_uint] = m_context->UnsignedIntTy;
m_baseTypes[HLSLScalarType_dword] = m_context->UnsignedIntTy;
m_baseTypes[HLSLScalarType_half] = m_context->getLangOpts().UseMinPrecision
? m_context->HalfFloatTy
: m_context->HalfTy;
m_baseTypes[HLSLScalarType_float] = m_context->FloatTy;
m_baseTypes[HLSLScalarType_double] = m_context->DoubleTy;
m_baseTypes[HLSLScalarType_float_min10] = m_context->Min10FloatTy;
m_baseTypes[HLSLScalarType_float_min16] = m_context->Min16FloatTy;
m_baseTypes[HLSLScalarType_int_min12] = m_context->Min12IntTy;
m_baseTypes[HLSLScalarType_int_min16] = m_context->Min16IntTy;
m_baseTypes[HLSLScalarType_uint_min16] = m_context->Min16UIntTy;
m_baseTypes[HLSLScalarType_int8_4packed] = m_context->Int8_4PackedTy;
m_baseTypes[HLSLScalarType_uint8_4packed] = m_context->UInt8_4PackedTy;
m_baseTypes[HLSLScalarType_float_lit] = m_context->LitFloatTy;
m_baseTypes[HLSLScalarType_int_lit] = m_context->LitIntTy;
m_baseTypes[HLSLScalarType_int16] = m_context->ShortTy;
m_baseTypes[HLSLScalarType_int32] = m_context->IntTy;
m_baseTypes[HLSLScalarType_int64] = m_context->LongLongTy;
m_baseTypes[HLSLScalarType_uint16] = m_context->UnsignedShortTy;
m_baseTypes[HLSLScalarType_uint32] = m_context->UnsignedIntTy;
m_baseTypes[HLSLScalarType_uint64] = m_context->UnsignedLongLongTy;
m_baseTypes[HLSLScalarType_float16] = m_context->HalfTy;
m_baseTypes[HLSLScalarType_float32] = m_context->FloatTy;
m_baseTypes[HLSLScalarType_float64] = m_context->DoubleTy;
}
void HLSLExternalSource::AddHLSLScalarTypes() {
DXASSERT(m_scalarTypes[HLSLScalarType_unknown].isNull(),
"otherwise unknown was initialized to an actual type");
m_scalarTypes[HLSLScalarType_bool] = m_baseTypes[HLSLScalarType_bool];
m_scalarTypes[HLSLScalarType_int] = m_baseTypes[HLSLScalarType_int];
m_scalarTypes[HLSLScalarType_float] = m_baseTypes[HLSLScalarType_float];
m_scalarTypes[HLSLScalarType_double] = m_baseTypes[HLSLScalarType_double];
m_scalarTypes[HLSLScalarType_float_lit] =
m_baseTypes[HLSLScalarType_float_lit];
m_scalarTypes[HLSLScalarType_int_lit] = m_baseTypes[HLSLScalarType_int_lit];
}
void HLSLExternalSource::AddHLSLStringType() {
m_hlslStringType = m_context->HLSLStringTy;
}
FunctionDecl *HLSLExternalSource::AddSubscriptSpecialization(
FunctionTemplateDecl *functionTemplate, QualType objectElement,
const FindStructBasicTypeResult &findResult) {
DXASSERT_NOMSG(functionTemplate != nullptr);
DXASSERT_NOMSG(!objectElement.isNull());
DXASSERT_NOMSG(findResult.Found());
DXASSERT(g_ArBasicKindsSubscripts[findResult.BasicKindsAsTypeIndex]
.SubscriptCardinality > 0,
"otherwise the template shouldn't have an operator[] that the "
"caller is trying to specialize");
// Subscript is templated only on its return type.
// Create the template argument.
bool isReadWrite = GetBasicKindProps(findResult.Kind) & BPROP_RWBUFFER;
QualType resultType = objectElement;
if (!isReadWrite)
resultType = m_context->getConstType(resultType);
resultType = m_context->getLValueReferenceType(resultType);
TemplateArgument templateArgument(resultType);
unsigned subscriptCardinality =
g_ArBasicKindsSubscripts[findResult.BasicKindsAsTypeIndex]
.SubscriptCardinality;
QualType subscriptIndexType =
subscriptCardinality == 1
? m_context->UnsignedIntTy
: NewSimpleAggregateType(AR_TOBJ_VECTOR, AR_BASIC_UINT32, 0, 1,
subscriptCardinality);
// Look for an existing specialization.
void *InsertPos = nullptr;
FunctionDecl *SpecFunc = functionTemplate->findSpecialization(
ArrayRef<TemplateArgument>(&templateArgument, 1), InsertPos);
if (SpecFunc != nullptr) {
return SpecFunc;
}
// Create a new specialization.
DeclContext *owner = functionTemplate->getDeclContext();
TemplateArgumentList templateArgumentList(
TemplateArgumentList::OnStackType::OnStack, &templateArgument, 1);
MultiLevelTemplateArgumentList mlTemplateArgumentList(templateArgumentList);
TemplateDeclInstantiator declInstantiator(*this->m_sema, owner,
mlTemplateArgumentList);
const FunctionType *templateFnType =
functionTemplate->getTemplatedDecl()->getType()->getAs<FunctionType>();
const FunctionProtoType *protoType =
dyn_cast<FunctionProtoType>(templateFnType);
FunctionProtoType::ExtProtoInfo templateEPI = protoType->getExtProtoInfo();
QualType functionType = m_context->getFunctionType(
resultType, subscriptIndexType, templateEPI, None);
TypeSourceInfo *TInfo = m_context->CreateTypeSourceInfo(functionType, 0);
FunctionProtoTypeLoc Proto =
TInfo->getTypeLoc().getAs<FunctionProtoTypeLoc>();
IdentifierInfo *id = &m_context->Idents.get(StringRef("index"));
ParmVarDecl *indexerParam = ParmVarDecl::Create(
*m_context, nullptr, NoLoc, NoLoc, id, subscriptIndexType, nullptr,
StorageClass::SC_None, nullptr);
QualType T = TInfo->getType();
DeclarationNameInfo NameInfo(functionTemplate->getDeclName(), NoLoc);
CXXMethodDecl *method = CXXMethodDecl::Create(
*m_context, dyn_cast<CXXRecordDecl>(owner), NoLoc, NameInfo, T, TInfo,
SC_Extern, InlineSpecifiedFalse, IsConstexprFalse, NoLoc);
// Add subscript attribute
AddHLSLSubscriptAttr(method, *m_context, HLSubscriptOpcode::DefaultSubscript);
// Record this function template specialization.
method->setFunctionTemplateSpecialization(
functionTemplate,
TemplateArgumentList::CreateCopy(*m_context, &templateArgument, 1), 0);
// Attach the parameters
indexerParam->setOwningFunction(method);
Proto.setParam(0, indexerParam);
method->setParams(ArrayRef<ParmVarDecl *>(indexerParam));
// Adjust access.
method->setAccess(AccessSpecifier::AS_public);
functionTemplate->setAccess(method->getAccess());
return method;
}
/// <summary>
/// This routine combines Source into Target. If you have a symmetric operation
/// and want to treat either side equally you should call it twice, swapping the
/// parameter order.
/// </summary>
static bool CombineObjectTypes(ArBasicKind Target, ArBasicKind Source,
ArBasicKind *pCombined) {
if (Target == Source) {
AssignOpt(Target, pCombined);
return true;
}
if (Source == AR_OBJECT_NULL) {
// NULL is valid for any object type.
AssignOpt(Target, pCombined);
return true;
}
switch (Target) {
AR_BASIC_ROBJECT_CASES:
if (Source == AR_OBJECT_STATEBLOCK) {
AssignOpt(Target, pCombined);
return true;
}
break;
AR_BASIC_TEXTURE_CASES:
AR_BASIC_NON_CMP_SAMPLER_CASES:
if (Source == AR_OBJECT_SAMPLER || Source == AR_OBJECT_STATEBLOCK) {
AssignOpt(Target, pCombined);
return true;
}
break;
case AR_OBJECT_SAMPLERCOMPARISON:
if (Source == AR_OBJECT_STATEBLOCK) {
AssignOpt(Target, pCombined);
return true;
}
break;
default:
// Not a combinable target.
break;
}
AssignOpt(AR_BASIC_UNKNOWN, pCombined);
return false;
}
static ArBasicKind LiteralToConcrete(Expr *litExpr,
HLSLExternalSource *pHLSLExternalSource) {
if (IntegerLiteral *intLit = dyn_cast<IntegerLiteral>(litExpr)) {
llvm::APInt val = intLit->getValue();
unsigned width = val.getActiveBits();
bool isNeg = val.isNegative();
if (isNeg) {
// Signed.
if (width <= 32)
return ArBasicKind::AR_BASIC_INT32;
else
return ArBasicKind::AR_BASIC_INT64;
} else {
// Unsigned.
if (width <= 32)
return ArBasicKind::AR_BASIC_UINT32;
else
return ArBasicKind::AR_BASIC_UINT64;
}
} else if (FloatingLiteral *floatLit = dyn_cast<FloatingLiteral>(litExpr)) {
llvm::APFloat val = floatLit->getValue();
unsigned width = val.getSizeInBits(val.getSemantics());
if (width <= 16)
return ArBasicKind::AR_BASIC_FLOAT16;
else if (width <= 32)
return ArBasicKind::AR_BASIC_FLOAT32;
else
return AR_BASIC_FLOAT64;
} else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(litExpr)) {
ArBasicKind kind = LiteralToConcrete(UO->getSubExpr(), pHLSLExternalSource);
if (UO->getOpcode() == UnaryOperator::Opcode::UO_Minus) {
if (kind == ArBasicKind::AR_BASIC_UINT32)
kind = ArBasicKind::AR_BASIC_INT32;
else if (kind == ArBasicKind::AR_BASIC_UINT64)
kind = ArBasicKind::AR_BASIC_INT64;
}
return kind;
} else if (HLSLVectorElementExpr *VEE =
dyn_cast<HLSLVectorElementExpr>(litExpr)) {
return pHLSLExternalSource->GetTypeElementKind(VEE->getType());
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(litExpr)) {
ArBasicKind kind = LiteralToConcrete(BO->getLHS(), pHLSLExternalSource);
ArBasicKind kind1 = LiteralToConcrete(BO->getRHS(), pHLSLExternalSource);
CombineBasicTypes(kind, kind1, &kind);
return kind;
} else if (ParenExpr *PE = dyn_cast<ParenExpr>(litExpr)) {
ArBasicKind kind = LiteralToConcrete(PE->getSubExpr(), pHLSLExternalSource);
return kind;
} else if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(litExpr)) {
ArBasicKind kind = LiteralToConcrete(CO->getLHS(), pHLSLExternalSource);
ArBasicKind kind1 = LiteralToConcrete(CO->getRHS(), pHLSLExternalSource);
CombineBasicTypes(kind, kind1, &kind);
return kind;
} else if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(litExpr)) {
// Use target Type for cast.
ArBasicKind kind = pHLSLExternalSource->GetTypeElementKind(IC->getType());
return kind;
} else {
// Could only be function call.
CallExpr *CE = cast<CallExpr>(litExpr);
// TODO: calculate the function call result.
if (CE->getNumArgs() == 1)
return LiteralToConcrete(CE->getArg(0), pHLSLExternalSource);
else {
ArBasicKind kind = LiteralToConcrete(CE->getArg(0), pHLSLExternalSource);
for (unsigned i = 1; i < CE->getNumArgs(); i++) {
ArBasicKind kindI =
LiteralToConcrete(CE->getArg(i), pHLSLExternalSource);
CombineBasicTypes(kind, kindI, &kind);
}
return kind;
}
}
}
static bool SearchTypeInTable(ArBasicKind kind, const ArBasicKind *pCT) {
while (AR_BASIC_UNKNOWN != *pCT && AR_BASIC_NOCAST != *pCT) {
if (kind == *pCT)
return true;
pCT++;
}
return false;
}
static ArBasicKind
ConcreteLiteralType(Expr *litExpr, ArBasicKind kind,
unsigned uLegalComponentTypes,
HLSLExternalSource *pHLSLExternalSource) {
const ArBasicKind *pCT = g_LegalIntrinsicCompTypes[uLegalComponentTypes];
ArBasicKind defaultKind = *pCT;
// Use first none literal kind as defaultKind.
while (AR_BASIC_UNKNOWN != *pCT && AR_BASIC_NOCAST != *pCT) {
ArBasicKind kind = *pCT;
pCT++;
// Skip literal type.
if (kind == AR_BASIC_LITERAL_INT || kind == AR_BASIC_LITERAL_FLOAT)
continue;
defaultKind = kind;
break;
}
ArBasicKind litKind = LiteralToConcrete(litExpr, pHLSLExternalSource);
if (kind == AR_BASIC_LITERAL_INT) {
// Search for match first.
// For literal arg which don't affect return type, the search should always
// success. Unless use literal int on a float parameter.
if (SearchTypeInTable(litKind,
g_LegalIntrinsicCompTypes[uLegalComponentTypes]))
return litKind;
// Return the default.
return defaultKind;
} else {
// Search for float32 first.
if (SearchTypeInTable(AR_BASIC_FLOAT32,
g_LegalIntrinsicCompTypes[uLegalComponentTypes]))
return AR_BASIC_FLOAT32;
// Search for float64.
if (SearchTypeInTable(AR_BASIC_FLOAT64,
g_LegalIntrinsicCompTypes[uLegalComponentTypes]))
return AR_BASIC_FLOAT64;
// return default.
return defaultKind;
}
}
bool HLSLExternalSource::IsValidObjectElement(LPCSTR tableName,
const IntrinsicOp op,
QualType objectElement) {
// Only meant to exclude builtins, assume others are fine
if (!IsBuiltinTable(tableName))
return true;
switch (op) {
case IntrinsicOp::MOP_Sample:
case IntrinsicOp::MOP_SampleBias:
case IntrinsicOp::MOP_SampleCmp:
case IntrinsicOp::MOP_SampleCmpLevel:
case IntrinsicOp::MOP_SampleCmpLevelZero:
case IntrinsicOp::MOP_SampleGrad:
case IntrinsicOp::MOP_SampleLevel: {
ArBasicKind kind = GetTypeElementKind(objectElement);
UINT uBits = GET_BPROP_BITS(kind);
if (IS_BASIC_FLOAT(kind) && uBits != BPROP_BITS64)
return true;
// 6.7 adds UINT sampler support
if (IS_BASIC_UINT(kind) || IS_BASIC_SINT(kind)) {
bool IsSampleC = (op == IntrinsicOp::MOP_SampleCmp ||
op == IntrinsicOp::MOP_SampleCmpLevel ||
op == IntrinsicOp::MOP_SampleCmpLevelZero);
// SampleCmp* cannot support integer resource.
if (IsSampleC)
return false;
const auto *SM = hlsl::ShaderModel::GetByName(
m_sema->getLangOpts().HLSLProfile.c_str());
return SM->IsSM67Plus();
}
return false;
}
case IntrinsicOp::MOP_GatherRaw: {
ArBasicKind kind = GetTypeElementKind(objectElement);
UINT numEles = GetNumElements(objectElement);
return IS_BASIC_UINT(kind) && numEles == 1;
} break;
default:
return true;
}
}
bool HLSLExternalSource::MatchArguments(
const IntrinsicDefIter &cursor, QualType objectType, QualType objectElement,
QualType functionTemplateTypeArg, ArrayRef<Expr *> Args,
std::vector<QualType> *argTypesVector, size_t &badArgIdx) {
const HLSL_INTRINSIC *pIntrinsic = *cursor;
LPCSTR tableName = cursor.GetTableName();
IntrinsicOp builtinOp = IntrinsicOp::Num_Intrinsics;
if (IsBuiltinTable(tableName))
builtinOp = static_cast<IntrinsicOp>(pIntrinsic->Op);
DXASSERT_NOMSG(pIntrinsic != nullptr);
DXASSERT_NOMSG(argTypesVector != nullptr);
std::vector<QualType> &argTypes = *argTypesVector;
argTypes.clear();
const bool isVariadic = IsVariadicIntrinsicFunction(pIntrinsic);
static const UINT UnusedSize = 0xFF;
static const BYTE MaxIntrinsicArgs = g_MaxIntrinsicParamCount + 1;
#define CAB(cond, arg) \
{ \
if (!(cond)) { \
badArgIdx = (arg); \
return false; \
} \
}
ArTypeObjectKind
Template[MaxIntrinsicArgs]; // Template type for each argument,
// AR_TOBJ_UNKNOWN if unspecified.
ArBasicKind
ComponentType[MaxIntrinsicArgs]; // Component type for each argument,
// AR_BASIC_UNKNOWN if unspecified.
UINT uSpecialSize[IA_SPECIAL_SLOTS]; // row/col matching types, UNUSED_INDEX32
// if unspecified.
badArgIdx = MaxIntrinsicArgs;
// Reset infos
std::fill(Template, Template + _countof(Template), AR_TOBJ_UNKNOWN);
std::fill(ComponentType, ComponentType + _countof(ComponentType),
AR_BASIC_UNKNOWN);
std::fill(uSpecialSize, uSpecialSize + _countof(uSpecialSize), UnusedSize);
const unsigned retArgIdx = 0;
unsigned retTypeIdx = pIntrinsic->pArgs[retArgIdx].uComponentTypeId;
// Populate the template for each argument.
ArrayRef<Expr *>::iterator iterArg = Args.begin();
ArrayRef<Expr *>::iterator end = Args.end();
size_t iArg = 1;
for (; iterArg != end; ++iterArg) {
Expr *pCallArg = *iterArg;
// If vararg is reached, we can break out of this loop.
if (pIntrinsic->pArgs[iArg].uTemplateId == INTRIN_TEMPLATE_VARARGS)
break;
// Check bounds for non-variadic functions.
if (iArg >= MaxIntrinsicArgs || iArg > pIntrinsic->uNumArgs) {
// Currently never reached
badArgIdx = iArg;
return false;
}
const HLSL_INTRINSIC_ARGUMENT *pIntrinsicArg;
pIntrinsicArg = &pIntrinsic->pArgs[iArg];
DXASSERT(isVariadic ||
pIntrinsicArg->uTemplateId != INTRIN_TEMPLATE_VARARGS,
"found vararg for non-variadic function");
QualType pType = pCallArg->getType();
ArTypeObjectKind TypeInfoShapeKind = GetTypeObjectKind(pType);
ArBasicKind TypeInfoEltKind = GetTypeElementKind(pType);
if (pIntrinsicArg->uLegalComponentTypes == LICOMPTYPE_RAYDESC) {
if (TypeInfoShapeKind == AR_TOBJ_COMPOUND) {
if (CXXRecordDecl *pDecl = pType->getAsCXXRecordDecl()) {
int index = FindObjectBasicKindIndex(pDecl);
if (index != -1 &&
AR_OBJECT_RAY_DESC == g_ArBasicKindsAsTypes[index]) {
++iArg;
continue;
}
}
}
m_sema->Diag(pCallArg->getExprLoc(), diag::err_hlsl_ray_desc_required);
badArgIdx = iArg;
return false;
}
if (pIntrinsicArg->uLegalComponentTypes == LICOMPTYPE_USER_DEFINED_TYPE) {
DXASSERT_NOMSG(objectElement.isNull());
QualType Ty = pCallArg->getType();
// Must be user define type for LICOMPTYPE_USER_DEFINED_TYPE arg.
if (TypeInfoShapeKind != AR_TOBJ_COMPOUND) {
m_sema->Diag(pCallArg->getExprLoc(),
diag::err_hlsl_no_struct_user_defined_type);
badArgIdx = iArg;
return false;
}
objectElement = Ty;
++iArg;
continue;
}
// If we are a type and templateID requires one, this isn't a match.
if (pIntrinsicArg->uTemplateId == INTRIN_TEMPLATE_FROM_TYPE ||
pIntrinsicArg->uTemplateId == INTRIN_TEMPLATE_FROM_FUNCTION) {
++iArg;
continue;
}
// Verify TypeInfoEltKind can be cast to something legal for this param
if (AR_BASIC_UNKNOWN != TypeInfoEltKind) {
for (const ArBasicKind *pCT =
g_LegalIntrinsicCompTypes[pIntrinsicArg->uLegalComponentTypes];
AR_BASIC_UNKNOWN != *pCT; pCT++) {
if (TypeInfoEltKind == *pCT)
break;
else if ((TypeInfoEltKind == AR_BASIC_LITERAL_INT &&
*pCT == AR_BASIC_LITERAL_FLOAT) ||
(TypeInfoEltKind == AR_BASIC_LITERAL_FLOAT &&
*pCT == AR_BASIC_LITERAL_INT))
break;
else if (*pCT == AR_BASIC_NOCAST) {
badArgIdx = std::min(badArgIdx, iArg);
}
}
}
if (TypeInfoEltKind == AR_BASIC_LITERAL_INT ||
TypeInfoEltKind == AR_BASIC_LITERAL_FLOAT) {
bool affectRetType =
(iArg != retArgIdx && retTypeIdx == pIntrinsicArg->uComponentTypeId);
// For literal arg which don't affect return type, find concrete type.
// For literal arg affect return type,
// TryEvalIntrinsic in CGHLSLMS.cpp will take care of cases
// where all argumentss are literal.
// CombineBasicTypes will cover the rest cases.
if (!affectRetType) {
TypeInfoEltKind =
ConcreteLiteralType(pCallArg, TypeInfoEltKind,
pIntrinsicArg->uLegalComponentTypes, this);
}
}
UINT TypeInfoCols = 1;
UINT TypeInfoRows = 1;
switch (TypeInfoShapeKind) {
case AR_TOBJ_MATRIX:
GetRowsAndCols(pType, TypeInfoRows, TypeInfoCols);
break;
case AR_TOBJ_VECTOR:
TypeInfoCols = GetHLSLVecSize(pType);
break;
case AR_TOBJ_BASIC:
case AR_TOBJ_OBJECT:
case AR_TOBJ_STRING:
case AR_TOBJ_ARRAY:
break;
default:
badArgIdx = std::min(badArgIdx, iArg); // no struct, arrays or void
}
DXASSERT(
pIntrinsicArg->uTemplateId < MaxIntrinsicArgs,
"otherwise intrinsic table was modified and g_MaxIntrinsicParamCount "
"was not updated (or uTemplateId is out of bounds)");
// Compare template
if ((AR_TOBJ_UNKNOWN == Template[pIntrinsicArg->uTemplateId]) ||
((AR_TOBJ_SCALAR == Template[pIntrinsicArg->uTemplateId]) &&
(AR_TOBJ_VECTOR == TypeInfoShapeKind ||
AR_TOBJ_MATRIX == TypeInfoShapeKind))) {
// Unrestricted or truncation of tuples to scalars are allowed
Template[pIntrinsicArg->uTemplateId] = TypeInfoShapeKind;
} else if (AR_TOBJ_SCALAR == TypeInfoShapeKind) {
if (AR_TOBJ_SCALAR != Template[pIntrinsicArg->uTemplateId] &&
AR_TOBJ_VECTOR != Template[pIntrinsicArg->uTemplateId] &&
AR_TOBJ_MATRIX != Template[pIntrinsicArg->uTemplateId]) {
// Scalars to tuples can be splatted, scalar to anything else is not
// allowed
badArgIdx = std::min(badArgIdx, iArg);
}
} else {
if (TypeInfoShapeKind != Template[pIntrinsicArg->uTemplateId]) {
// Outside of simple splats and truncations, templates must match
badArgIdx = std::min(badArgIdx, iArg);
}
}
// Process component type from object element after loop
if (pIntrinsicArg->uComponentTypeId == INTRIN_COMPTYPE_FROM_TYPE_ELT0) {
++iArg;
continue;
}
DXASSERT(pIntrinsicArg->uComponentTypeId < MaxIntrinsicArgs,
"otherwise intrinsic table was modified and MaxIntrinsicArgs was "
"not updated (or uComponentTypeId is out of bounds)");
// Merge ComponentTypes
if (AR_BASIC_UNKNOWN == ComponentType[pIntrinsicArg->uComponentTypeId]) {
ComponentType[pIntrinsicArg->uComponentTypeId] = TypeInfoEltKind;
} else {
if (!CombineBasicTypes(ComponentType[pIntrinsicArg->uComponentTypeId],
TypeInfoEltKind,
&ComponentType[pIntrinsicArg->uComponentTypeId])) {
badArgIdx = std::min(badArgIdx, iArg);
}
}
// Rows
if (AR_TOBJ_SCALAR != TypeInfoShapeKind) {
if (pIntrinsicArg->uRows >= IA_SPECIAL_BASE) {
UINT uSpecialId = pIntrinsicArg->uRows - IA_SPECIAL_BASE;
CAB(uSpecialId < IA_SPECIAL_SLOTS, iArg);
if (uSpecialSize[uSpecialId] > TypeInfoRows) {
uSpecialSize[uSpecialId] = TypeInfoRows;
}
} else {
if (TypeInfoRows < pIntrinsicArg->uRows) {
badArgIdx = std::min(badArgIdx, iArg);
}
}
}
// Columns
if (AR_TOBJ_SCALAR != TypeInfoShapeKind) {
if (pIntrinsicArg->uCols >= IA_SPECIAL_BASE) {
UINT uSpecialId = pIntrinsicArg->uCols - IA_SPECIAL_BASE;
CAB(uSpecialId < IA_SPECIAL_SLOTS, iArg);
if (uSpecialSize[uSpecialId] > TypeInfoCols) {
uSpecialSize[uSpecialId] = TypeInfoCols;
}
} else {
if (TypeInfoCols < pIntrinsicArg->uCols) {
badArgIdx = std::min(badArgIdx, iArg);
}
}
}
ASTContext &actx = m_sema->getASTContext();
// Usage
// Argument must be non-constant and non-bitfield for out, inout, and ref
// parameters because they may be treated as pass-by-reference.
// This is hacky. We should actually be handling this by failing reference
// binding in sema init with SK_BindReference*. That code path is currently
// hacked off for HLSL and less trivial to fix.
if (pIntrinsicArg->qwUsage & AR_QUAL_OUT ||
pIntrinsicArg->qwUsage & AR_QUAL_REF) {
if (pType.isConstant(actx) || pCallArg->getObjectKind() == OK_BitField) {
// Can't use a const type in an out or inout parameter.
badArgIdx = std::min(badArgIdx, iArg);
}
}
iArg++;
}
DXASSERT(isVariadic || iterArg == end,
"otherwise the argument list wasn't fully processed");
// Default template and component type for return value
if (pIntrinsic->pArgs[0].qwUsage &&
pIntrinsic->pArgs[0].uTemplateId != INTRIN_TEMPLATE_FROM_TYPE &&
pIntrinsic->pArgs[0].uTemplateId != INTRIN_TEMPLATE_FROM_FUNCTION &&
pIntrinsic->pArgs[0].uComponentTypeId !=
INTRIN_COMPTYPE_FROM_NODEOUTPUT) {
CAB(pIntrinsic->pArgs[0].uTemplateId < MaxIntrinsicArgs, 0);
if (AR_TOBJ_UNKNOWN == Template[pIntrinsic->pArgs[0].uTemplateId]) {
Template[pIntrinsic->pArgs[0].uTemplateId] =
g_LegalIntrinsicTemplates[pIntrinsic->pArgs[0].uLegalTemplates][0];
if (pIntrinsic->pArgs[0].uComponentTypeId !=
INTRIN_COMPTYPE_FROM_TYPE_ELT0) {
DXASSERT_NOMSG(pIntrinsic->pArgs[0].uComponentTypeId <
MaxIntrinsicArgs);
if (AR_BASIC_UNKNOWN ==
ComponentType[pIntrinsic->pArgs[0].uComponentTypeId]) {
// half return type should map to float for min precision
if (pIntrinsic->pArgs[0].uLegalComponentTypes ==
LEGAL_INTRINSIC_COMPTYPES::LICOMPTYPE_FLOAT16 &&
getSema()->getLangOpts().UseMinPrecision) {
ComponentType[pIntrinsic->pArgs[0].uComponentTypeId] =
ArBasicKind::AR_BASIC_FLOAT32;
} else {
ComponentType[pIntrinsic->pArgs[0].uComponentTypeId] =
g_LegalIntrinsicCompTypes[pIntrinsic->pArgs[0]
.uLegalComponentTypes][0];
}
}
}
}
}
// Make sure all template, component type, and texture type selections are
// valid.
for (size_t i = 0; i < Args.size() + 1; i++) {
const HLSL_INTRINSIC_ARGUMENT *pArgument = &pIntrinsic->pArgs[i];
// If vararg is reached, we can break out of this loop.
if (pIntrinsic->pArgs[i].uTemplateId == INTRIN_TEMPLATE_VARARGS)
break;
// Check template.
if (pArgument->uTemplateId == INTRIN_TEMPLATE_FROM_TYPE ||
pArgument->uTemplateId == INTRIN_TEMPLATE_FROM_FUNCTION) {
continue; // Already verified that this is available.
}
if (pArgument->uLegalComponentTypes == LICOMPTYPE_USER_DEFINED_TYPE) {
continue;
}
const ArTypeObjectKind *pTT =
g_LegalIntrinsicTemplates[pArgument->uLegalTemplates];
if (AR_TOBJ_UNKNOWN != Template[i]) {
if ((AR_TOBJ_SCALAR == Template[i]) &&
(AR_TOBJ_VECTOR == *pTT || AR_TOBJ_MATRIX == *pTT)) {
Template[i] = *pTT;
} else {
while (AR_TOBJ_UNKNOWN != *pTT) {
if (Template[i] == *pTT)
break;
pTT++;
}
}
if (AR_TOBJ_UNKNOWN == *pTT) {
Template[i] = g_LegalIntrinsicTemplates[pArgument->uLegalTemplates][0];
badArgIdx = std::min(badArgIdx, i);
}
} else if (pTT) {
Template[i] = *pTT;
}
// Check component type.
const ArBasicKind *pCT =
g_LegalIntrinsicCompTypes[pArgument->uLegalComponentTypes];
if (AR_BASIC_UNKNOWN != ComponentType[i]) {
while (AR_BASIC_UNKNOWN != *pCT && AR_BASIC_NOCAST != *pCT) {
if (ComponentType[i] == *pCT)
break;
pCT++;
}
// has to be a strict match
if (*pCT == AR_BASIC_NOCAST) {
badArgIdx = std::min(badArgIdx, i);
// the match has failed, but the types are useful for errors. Present
// the cannonical overload for error
ComponentType[i] =
g_LegalIntrinsicCompTypes[pArgument->uLegalComponentTypes][0];
}
// If it is an object, see if it can be cast to the first thing in the
// list, otherwise move on to next intrinsic.
if (AR_TOBJ_OBJECT == Template[i] && AR_BASIC_UNKNOWN == *pCT) {
if (!CombineObjectTypes(
g_LegalIntrinsicCompTypes[pArgument->uLegalComponentTypes][0],
ComponentType[i], nullptr)) {
badArgIdx = std::min(badArgIdx, i);
}
}
if (AR_BASIC_UNKNOWN == *pCT) {
ComponentType[i] =
g_LegalIntrinsicCompTypes[pArgument->uLegalComponentTypes][0];
}
} else if (pCT) {
ComponentType[i] = *pCT;
}
}
argTypes.resize(1 + Args.size()); // +1 for return type
// Default to a void return type.
argTypes[0] = m_context->VoidTy;
// Default specials sizes.
for (UINT i = 0; i < IA_SPECIAL_SLOTS; i++) {
if (UnusedSize == uSpecialSize[i]) {
uSpecialSize[i] = 1;
}
}
// Populate argTypes.
for (size_t i = 0; i <= Args.size(); i++) {
const HLSL_INTRINSIC_ARGUMENT *pArgument = &pIntrinsic->pArgs[i];
// If vararg is reached, we can break out of this loop.
if (pArgument->uTemplateId == INTRIN_TEMPLATE_VARARGS)
break;
if (!pArgument->qwUsage)
continue;
QualType pNewType;
unsigned int quals = 0; // qualifications for this argument
// If we have no type, set it to our input type (templatized)
if (pArgument->uTemplateId == INTRIN_TEMPLATE_FROM_TYPE) {
// Use the templated input type, but resize it if the
// intrinsic's rows/cols isn't 0
if (pArgument->uRows && pArgument->uCols) {
UINT uRows, uCols = 0;
// if type is overriden, use new type size, for
// now it only supports scalars
if (pArgument->uRows >= IA_SPECIAL_BASE) {
UINT uSpecialId = pArgument->uRows - IA_SPECIAL_BASE;
CAB(uSpecialId < IA_SPECIAL_SLOTS, i);
uRows = uSpecialSize[uSpecialId];
} else if (pArgument->uRows > 0) {
uRows = pArgument->uRows;
}
if (pArgument->uCols >= IA_SPECIAL_BASE) {
UINT uSpecialId = pArgument->uCols - IA_SPECIAL_BASE;
CAB(uSpecialId < IA_SPECIAL_SLOTS, i);
uCols = uSpecialSize[uSpecialId];
} else if (pArgument->uCols > 0) {
uCols = pArgument->uCols;
}
// 1x1 numeric outputs are always scalar.. since these
// are most flexible
if ((1 == uCols) && (1 == uRows)) {
pNewType = objectElement;
if (pNewType.isNull()) {
badArgIdx = std::min(badArgIdx, i);
}
} else {
// non-scalars unsupported right now since nothing
// uses it, would have to create either a type
// list for sub-structures or just resize the
// given type
// VH(E_NOTIMPL);
badArgIdx = std::min(badArgIdx, i);
}
} else {
DXASSERT_NOMSG(!pArgument->uRows && !pArgument->uCols);
if (objectElement.isNull()) {
badArgIdx = std::min(badArgIdx, i);
}
pNewType = objectElement;
}
} else if (pArgument->uTemplateId == INTRIN_TEMPLATE_FROM_FUNCTION) {
if (functionTemplateTypeArg.isNull()) {
if (i == 0) {
// [RW]ByteAddressBuffer.Load, default to uint
pNewType = m_context->UnsignedIntTy;
if (builtinOp != hlsl::IntrinsicOp::MOP_Load)
badArgIdx = std::min(badArgIdx, i);
} else {
// [RW]ByteAddressBuffer.Store, default to argument type
pNewType = Args[i - 1]->getType().getNonReferenceType();
if (const BuiltinType *BuiltinTy = pNewType->getAs<BuiltinType>()) {
// For backcompat, ensure that Store(0, 42 or 42.0) matches a
// uint/float overload rather than a uint64_t/double one.
if (BuiltinTy->getKind() == BuiltinType::LitInt) {
pNewType = m_context->UnsignedIntTy;
} else if (BuiltinTy->getKind() == BuiltinType::LitFloat) {
pNewType = m_context->FloatTy;
}
}
}
} else {
pNewType = functionTemplateTypeArg;
}
} else if (pArgument->uLegalComponentTypes ==
LICOMPTYPE_USER_DEFINED_TYPE) {
if (objectElement.isNull()) {
badArgIdx = std::min(badArgIdx, i);
}
pNewType = objectElement;
} else if (i != 0 && Template[pArgument->uTemplateId] == AR_TOBJ_OBJECT) {
// For object parameters, just use the argument type
// Return type is assigned below
pNewType = Args[i - 1]->getType().getNonReferenceType();
} else if (pArgument->uLegalComponentTypes ==
LICOMPTYPE_NODE_RECORD_OR_UAV) {
pNewType = Args[i - 1]->getType().getNonReferenceType();
} else if (pArgument->uLegalComponentTypes ==
LICOMPTYPE_ANY_NODE_OUTPUT_RECORD) {
pNewType = Args[i - 1]->getType().getNonReferenceType();
} else {
ArBasicKind pEltType;
// ComponentType, if the Id is special then it gets the
// component type from the first component of the type, if
// we need more (for the second component, e.g.), then we
// can use more specials, etc.
if (pArgument->uComponentTypeId == INTRIN_COMPTYPE_FROM_TYPE_ELT0) {
if (objectElement.isNull()) {
badArgIdx = std::min(badArgIdx, i);
return false;
}
pEltType = GetTypeElementKind(objectElement);
if (!IsValidBasicKind(pEltType)) {
// This can happen with Texture2D<Struct> or other invalid
// declarations
badArgIdx = std::min(badArgIdx, i);
return false;
}
} else if (pArgument->uComponentTypeId ==
INTRIN_COMPTYPE_FROM_NODEOUTPUT) {
ClassTemplateDecl *templateDecl = nullptr;
if (pArgument->uLegalComponentTypes ==
LICOMPTYPE_GROUP_NODE_OUTPUT_RECORDS)
templateDecl = m_GroupNodeOutputRecordsTemplateDecl;
else if (pArgument->uLegalComponentTypes ==
LICOMPTYPE_THREAD_NODE_OUTPUT_RECORDS)
templateDecl = m_ThreadNodeOutputRecordsTemplateDecl;
else {
assert(false && "unexpected comp type");
}
CXXRecordDecl *recordDecl = templateDecl->getTemplatedDecl();
if (!recordDecl->isCompleteDefinition()) {
CompleteType(recordDecl);
}
pNewType = GetOrCreateNodeOutputRecordSpecialization(
*m_context, m_sema, templateDecl, objectElement);
argTypes[i] = QualType(pNewType.getTypePtr(), quals);
continue;
} else {
pEltType = ComponentType[pArgument->uComponentTypeId];
DXASSERT_VALIDBASICKIND(pEltType);
}
UINT uRows, uCols;
// Rows
if (pArgument->uRows >= IA_SPECIAL_BASE) {
UINT uSpecialId = pArgument->uRows - IA_SPECIAL_BASE;
CAB(uSpecialId < IA_SPECIAL_SLOTS, i);
uRows = uSpecialSize[uSpecialId];
} else {
uRows = pArgument->uRows;
}
// Cols
if (pArgument->uCols >= IA_SPECIAL_BASE) {
UINT uSpecialId = pArgument->uCols - IA_SPECIAL_BASE;
CAB(uSpecialId < IA_SPECIAL_SLOTS, i);
uCols = uSpecialSize[uSpecialId];
} else {
uCols = pArgument->uCols;
}
// Verify that the final results are in bounds.
CAB(uCols > 0 && uCols <= MaxVectorSize && uRows > 0 &&
uRows <= MaxVectorSize,
i);
// Const
UINT64 qwQual =
pArgument->qwUsage &
(AR_QUAL_ROWMAJOR | AR_QUAL_COLMAJOR | AR_QUAL_GROUPSHARED);
if ((0 == i) || !(pArgument->qwUsage & AR_QUAL_OUT))
qwQual |= AR_QUAL_CONST;
DXASSERT_VALIDBASICKIND(pEltType);
pNewType = NewSimpleAggregateType(Template[pArgument->uTemplateId],
pEltType, qwQual, uRows, uCols);
// If array type, wrap in the argument's array type.
if (i > 0 && Template[pArgument->uTemplateId] == AR_TOBJ_ARRAY) {
QualType arrayElt = Args[i - 1]->getType();
SmallVector<UINT, 4> sizes;
while (arrayElt->isArrayType()) {
UINT size = 0;
if (arrayElt->isConstantArrayType()) {
const ConstantArrayType *arrayType =
(const ConstantArrayType *)arrayElt->getAsArrayTypeUnsafe();
size = arrayType->getSize().getLimitedValue();
}
arrayElt = QualType(
arrayElt->getAsArrayTypeUnsafe()->getArrayElementTypeNoTypeQual(),
0);
sizes.push_back(size);
}
// Wrap element in matching array dimensions:
while (sizes.size()) {
uint64_t size = sizes.pop_back_val();
if (size) {
pNewType = m_context->getConstantArrayType(
pNewType, llvm::APInt(32, size, false),
ArrayType::ArraySizeModifier::Normal, 0);
} else {
pNewType = m_context->getIncompleteArrayType(
pNewType, ArrayType::ArraySizeModifier::Normal, 0);
}
}
if (qwQual & AR_QUAL_CONST)
pNewType = QualType(pNewType.getTypePtr(), Qualifiers::Const);
if (qwQual & AR_QUAL_GROUPSHARED)
pNewType =
m_context->getAddrSpaceQualType(pNewType, DXIL::kTGSMAddrSpace);
pNewType = m_context->getLValueReferenceType(pNewType);
}
}
DXASSERT(!pNewType.isNull(), "otherwise there's a branch in this function "
"that fails to assign this");
argTypes[i] = QualType(pNewType.getTypePtr(), quals);
}
// For variadic functions, we need to add the additional arguments here.
if (isVariadic) {
for (; iArg <= Args.size(); ++iArg) {
argTypes[iArg] = Args[iArg - 1]->getType().getNonReferenceType();
}
} else {
DXASSERT(iArg == pIntrinsic->uNumArgs,
"In the absence of varargs, a successful match would indicate we "
"have as many arguments and types as the intrinsic template");
}
// For object return types that need to match arguments, we need to slot in
// the full type here Can't do it sooner because when return is encountered
// above, the other arg types haven't been set
if (pIntrinsic->pArgs[0].uTemplateId < MaxIntrinsicArgs) {
if (Template[pIntrinsic->pArgs[0].uTemplateId] == AR_TOBJ_OBJECT)
argTypes[0] = argTypes[pIntrinsic->pArgs[0].uComponentTypeId];
}
return badArgIdx == MaxIntrinsicArgs;
#undef CAB
}
HLSLExternalSource::FindStructBasicTypeResult
HLSLExternalSource::FindStructBasicType(DeclContext *functionDeclContext) {
DXASSERT_NOMSG(functionDeclContext != nullptr);
// functionDeclContext may be a specialization of a template, such as
// AppendBuffer<MY_STRUCT>, or it may be a simple class, such as
// RWByteAddressBuffer.
const CXXRecordDecl *recordDecl =
GetRecordDeclForBuiltInOrStruct(functionDeclContext);
// We save the caller from filtering out other types of context (like the
// translation unit itself).
if (recordDecl != nullptr) {
int index = FindObjectBasicKindIndex(recordDecl);
if (index != -1) {
ArBasicKind kind = g_ArBasicKindsAsTypes[index];
return HLSLExternalSource::FindStructBasicTypeResult(kind, index);
}
}
return HLSLExternalSource::FindStructBasicTypeResult(AR_BASIC_UNKNOWN, 0);
}
void HLSLExternalSource::FindIntrinsicTable(DeclContext *functionDeclContext,
const char **name,
const HLSL_INTRINSIC **intrinsics,
size_t *intrinsicCount) {
DXASSERT_NOMSG(functionDeclContext != nullptr);
DXASSERT_NOMSG(name != nullptr);
DXASSERT_NOMSG(intrinsics != nullptr);
DXASSERT_NOMSG(intrinsicCount != nullptr);
*intrinsics = nullptr;
*intrinsicCount = 0;
*name = nullptr;
HLSLExternalSource::FindStructBasicTypeResult lookup =
FindStructBasicType(functionDeclContext);
if (lookup.Found()) {
GetIntrinsicMethods(lookup.Kind, intrinsics, intrinsicCount);
*name = g_ArBasicTypeNames[lookup.Kind];
}
}
static bool BinaryOperatorKindIsArithmetic(BinaryOperatorKind Opc) {
return
// Arithmetic operators.
Opc == BinaryOperatorKind::BO_Add ||
Opc == BinaryOperatorKind::BO_AddAssign ||
Opc == BinaryOperatorKind::BO_Sub ||
Opc == BinaryOperatorKind::BO_SubAssign ||
Opc == BinaryOperatorKind::BO_Rem ||
Opc == BinaryOperatorKind::BO_RemAssign ||
Opc == BinaryOperatorKind::BO_Div ||
Opc == BinaryOperatorKind::BO_DivAssign ||
Opc == BinaryOperatorKind::BO_Mul ||
Opc == BinaryOperatorKind::BO_MulAssign;
}
static bool BinaryOperatorKindIsCompoundAssignment(BinaryOperatorKind Opc) {
return
// Arithmetic-and-assignment operators.
Opc == BinaryOperatorKind::BO_AddAssign ||
Opc == BinaryOperatorKind::BO_SubAssign ||
Opc == BinaryOperatorKind::BO_RemAssign ||
Opc == BinaryOperatorKind::BO_DivAssign ||
Opc == BinaryOperatorKind::BO_MulAssign ||
// Bitwise-and-assignment operators.
Opc == BinaryOperatorKind::BO_ShlAssign ||
Opc == BinaryOperatorKind::BO_ShrAssign ||
Opc == BinaryOperatorKind::BO_AndAssign ||
Opc == BinaryOperatorKind::BO_OrAssign ||
Opc == BinaryOperatorKind::BO_XorAssign;
}
static bool
BinaryOperatorKindIsCompoundAssignmentForBool(BinaryOperatorKind Opc) {
return Opc == BinaryOperatorKind::BO_AndAssign ||
Opc == BinaryOperatorKind::BO_OrAssign ||
Opc == BinaryOperatorKind::BO_XorAssign;
}
static bool BinaryOperatorKindIsBitwise(BinaryOperatorKind Opc) {
return Opc == BinaryOperatorKind::BO_Shl ||
Opc == BinaryOperatorKind::BO_ShlAssign ||
Opc == BinaryOperatorKind::BO_Shr ||
Opc == BinaryOperatorKind::BO_ShrAssign ||
Opc == BinaryOperatorKind::BO_And ||
Opc == BinaryOperatorKind::BO_AndAssign ||
Opc == BinaryOperatorKind::BO_Or ||
Opc == BinaryOperatorKind::BO_OrAssign ||
Opc == BinaryOperatorKind::BO_Xor ||
Opc == BinaryOperatorKind::BO_XorAssign;
}
static bool BinaryOperatorKindIsBitwiseShift(BinaryOperatorKind Opc) {
return Opc == BinaryOperatorKind::BO_Shl ||
Opc == BinaryOperatorKind::BO_ShlAssign ||
Opc == BinaryOperatorKind::BO_Shr ||
Opc == BinaryOperatorKind::BO_ShrAssign;
}
static bool BinaryOperatorKindIsEqualComparison(BinaryOperatorKind Opc) {
return Opc == BinaryOperatorKind::BO_EQ || Opc == BinaryOperatorKind::BO_NE;
}
static bool BinaryOperatorKindIsOrderComparison(BinaryOperatorKind Opc) {
return Opc == BinaryOperatorKind::BO_LT || Opc == BinaryOperatorKind::BO_GT ||
Opc == BinaryOperatorKind::BO_LE || Opc == BinaryOperatorKind::BO_GE;
}
static bool BinaryOperatorKindIsComparison(BinaryOperatorKind Opc) {
return BinaryOperatorKindIsEqualComparison(Opc) ||
BinaryOperatorKindIsOrderComparison(Opc);
}
static bool BinaryOperatorKindIsLogical(BinaryOperatorKind Opc) {
return Opc == BinaryOperatorKind::BO_LAnd ||
Opc == BinaryOperatorKind::BO_LOr;
}
static bool BinaryOperatorKindRequiresNumeric(BinaryOperatorKind Opc) {
return BinaryOperatorKindIsArithmetic(Opc) ||
BinaryOperatorKindIsOrderComparison(Opc) ||
BinaryOperatorKindIsLogical(Opc);
}
static bool BinaryOperatorKindRequiresIntegrals(BinaryOperatorKind Opc) {
return BinaryOperatorKindIsBitwise(Opc);
}
static bool BinaryOperatorKindRequiresBoolAsNumeric(BinaryOperatorKind Opc) {
return BinaryOperatorKindIsBitwise(Opc) ||
BinaryOperatorKindIsArithmetic(Opc);
}
static bool UnaryOperatorKindRequiresIntegrals(UnaryOperatorKind Opc) {
return Opc == UnaryOperatorKind::UO_Not;
}
static bool UnaryOperatorKindRequiresNumerics(UnaryOperatorKind Opc) {
return Opc == UnaryOperatorKind::UO_LNot ||
Opc == UnaryOperatorKind::UO_Plus ||
Opc == UnaryOperatorKind::UO_Minus ||
// The omission in fxc caused objects and structs to accept this.
Opc == UnaryOperatorKind::UO_PreDec ||
Opc == UnaryOperatorKind::UO_PreInc ||
Opc == UnaryOperatorKind::UO_PostDec ||
Opc == UnaryOperatorKind::UO_PostInc;
}
static bool UnaryOperatorKindRequiresModifiableValue(UnaryOperatorKind Opc) {
return Opc == UnaryOperatorKind::UO_PreDec ||
Opc == UnaryOperatorKind::UO_PreInc ||
Opc == UnaryOperatorKind::UO_PostDec ||
Opc == UnaryOperatorKind::UO_PostInc;
}
static bool UnaryOperatorKindRequiresBoolAsNumeric(UnaryOperatorKind Opc) {
return Opc == UnaryOperatorKind::UO_Not ||
Opc == UnaryOperatorKind::UO_Plus ||
Opc == UnaryOperatorKind::UO_Minus;
}
static bool UnaryOperatorKindDisallowsBool(UnaryOperatorKind Opc) {
return Opc == UnaryOperatorKind::UO_PreDec ||
Opc == UnaryOperatorKind::UO_PreInc ||
Opc == UnaryOperatorKind::UO_PostDec ||
Opc == UnaryOperatorKind::UO_PostInc;
}
static bool IsIncrementOp(UnaryOperatorKind Opc) {
return Opc == UnaryOperatorKind::UO_PreInc ||
Opc == UnaryOperatorKind::UO_PostInc;
}
/// <summary>
/// Checks whether the specified AR_TOBJ* value is a primitive or aggregate of
/// primitive elements (as opposed to a built-in object like a sampler or
/// texture, or a void type).
/// </summary>
static bool IsObjectKindPrimitiveAggregate(ArTypeObjectKind value) {
return value == AR_TOBJ_BASIC || value == AR_TOBJ_MATRIX ||
value == AR_TOBJ_VECTOR;
}
static bool IsBasicKindIntegral(ArBasicKind value) {
return IS_BASIC_AINT(value) || IS_BASIC_BOOL(value);
}
static bool IsBasicKindIntMinPrecision(ArBasicKind kind) {
return IS_BASIC_SINT(kind) && IS_BASIC_MIN_PRECISION(kind);
}
static bool IsBasicKindNumeric(ArBasicKind value) {
return GetBasicKindProps(value) & BPROP_NUMERIC;
}
ExprResult HLSLExternalSource::PromoteToIntIfBool(ExprResult &E) {
// An invalid expression is pass-through at this point.
if (E.isInvalid()) {
return E;
}
QualType qt = E.get()->getType();
ArBasicKind elementKind = this->GetTypeElementKind(qt);
if (elementKind != AR_BASIC_BOOL) {
return E;
}
// Construct a scalar/vector/matrix type with the same shape as E.
ArTypeObjectKind objectKind = this->GetTypeObjectKind(qt);
QualType targetType;
UINT colCount, rowCount;
GetRowsAndColsForAny(qt, rowCount, colCount);
targetType =
NewSimpleAggregateType(objectKind, AR_BASIC_INT32, 0, rowCount, colCount)
->getCanonicalTypeInternal();
if (E.get()->isLValue()) {
E = m_sema->DefaultLvalueConversion(E.get()).get();
}
switch (objectKind) {
case AR_TOBJ_SCALAR:
return ImplicitCastExpr::Create(*m_context, targetType,
CastKind::CK_IntegralCast, E.get(), nullptr,
ExprValueKind::VK_RValue);
case AR_TOBJ_ARRAY:
case AR_TOBJ_VECTOR:
case AR_TOBJ_MATRIX:
return ImplicitCastExpr::Create(*m_context, targetType,
CastKind::CK_HLSLCC_IntegralCast, E.get(),
nullptr, ExprValueKind::VK_RValue);
default:
DXASSERT(false, "unsupported objectKind for PromoteToIntIfBool");
}
return E;
}
void HLSLExternalSource::CollectInfo(QualType type, ArTypeInfo *pTypeInfo) {
DXASSERT_NOMSG(pTypeInfo != nullptr);
DXASSERT_NOMSG(!type.isNull());
memset(pTypeInfo, 0, sizeof(*pTypeInfo));
// TODO: Get* functions used here add up to a bunch of redundant code.
// Try to inline that here, making it cheaper to use this function
// when retrieving multiple properties.
pTypeInfo->ObjKind = GetTypeElementKind(type);
pTypeInfo->ShapeKind = GetTypeObjectKind(type);
GetRowsAndColsForAny(type, pTypeInfo->uRows, pTypeInfo->uCols);
pTypeInfo->EltKind = pTypeInfo->ObjKind;
pTypeInfo->EltTy =
GetTypeElementType(type)->getCanonicalTypeUnqualified()->getTypePtr();
pTypeInfo->uTotalElts = pTypeInfo->uRows * pTypeInfo->uCols;
}
// Highest possible score (i.e., worst possible score).
static const UINT64 SCORE_MAX = 0xFFFFFFFFFFFFFFFF;
// Leave the first two score bits to handle higher-level
// variations like target type.
#define SCORE_MIN_SHIFT 2
// Space out scores to allow up to 128 parameters to
// vary between score sets spill into each other.
#define SCORE_PARAM_SHIFT 7
unsigned HLSLExternalSource::GetNumElements(QualType anyType) {
if (anyType.isNull()) {
return 0;
}
anyType = GetStructuralForm(anyType);
ArTypeObjectKind kind = GetTypeObjectKind(anyType);
switch (kind) {
case AR_TOBJ_BASIC:
case AR_TOBJ_OBJECT:
case AR_TOBJ_STRING:
return 1;
case AR_TOBJ_COMPOUND: {
// TODO: consider caching this value for perf
unsigned total = 0;
const RecordType *recordType = anyType->getAs<RecordType>();
RecordDecl::field_iterator fi = recordType->getDecl()->field_begin();
RecordDecl::field_iterator fend = recordType->getDecl()->field_end();
while (fi != fend) {
total += GetNumElements(fi->getType());
++fi;
}
return total;
}
case AR_TOBJ_ARRAY:
case AR_TOBJ_MATRIX:
case AR_TOBJ_VECTOR:
return GetElementCount(anyType);
default:
DXASSERT(kind == AR_TOBJ_VOID,
"otherwise the type cannot be classified or is not supported");
return 0;
}
}
unsigned HLSLExternalSource::GetNumBasicElements(QualType anyType) {
if (anyType.isNull()) {
return 0;
}
anyType = GetStructuralForm(anyType);
ArTypeObjectKind kind = GetTypeObjectKind(anyType);
switch (kind) {
case AR_TOBJ_BASIC:
case AR_TOBJ_OBJECT:
case AR_TOBJ_STRING:
return 1;
case AR_TOBJ_COMPOUND: {
// TODO: consider caching this value for perf
unsigned total = 0;
const RecordType *recordType = anyType->getAs<RecordType>();
RecordDecl *RD = recordType->getDecl();
// Take care base.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->getNumBases()) {
for (const auto &I : CXXRD->bases()) {
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
if (BaseDecl->field_empty())
continue;
QualType parentTy = QualType(BaseDecl->getTypeForDecl(), 0);
total += GetNumBasicElements(parentTy);
}
}
}
RecordDecl::field_iterator fi = RD->field_begin();
RecordDecl::field_iterator fend = RD->field_end();
while (fi != fend) {
total += GetNumBasicElements(fi->getType());
++fi;
}
return total;
}
case AR_TOBJ_ARRAY: {
unsigned arraySize = GetElementCount(anyType);
unsigned eltSize = GetNumBasicElements(
QualType(anyType->getArrayElementTypeNoTypeQual(), 0));
return arraySize * eltSize;
}
case AR_TOBJ_MATRIX:
case AR_TOBJ_VECTOR:
return GetElementCount(anyType);
default:
DXASSERT(kind == AR_TOBJ_VOID,
"otherwise the type cannot be classified or is not supported");
return 0;
}
}
unsigned HLSLExternalSource::GetNumConvertCheckElts(QualType leftType,
unsigned leftSize,
QualType rightType,
unsigned rightSize) {
// We can convert from a larger type to a smaller
// but not a smaller type to a larger so default
// to just comparing the destination size.
unsigned uElts = leftSize;
leftType = GetStructuralForm(leftType);
rightType = GetStructuralForm(rightType);
if (leftType->isArrayType() && rightType->isArrayType()) {
//
// If we're comparing arrays we don't
// need to compare every element of
// the arrays since all elements
// will have the same type.
// We only need to compare enough
// elements that we've tried every
// possible mix of dst and src elements.
//
// TODO: handle multidimensional arrays and arrays of arrays
QualType pDstElt = leftType->getAsArrayTypeUnsafe()->getElementType();
unsigned uDstEltSize = GetNumElements(pDstElt);
QualType pSrcElt = rightType->getAsArrayTypeUnsafe()->getElementType();
unsigned uSrcEltSize = GetNumElements(pSrcElt);
if (uDstEltSize == uSrcEltSize) {
uElts = uDstEltSize;
} else if (uDstEltSize > uSrcEltSize) {
// If one size is not an even multiple of the other we need to let the
// full compare run in order to try all alignments.
if (uSrcEltSize && (uDstEltSize % uSrcEltSize) == 0) {
uElts = uDstEltSize;
}
} else if (uDstEltSize && (uSrcEltSize % uDstEltSize) == 0) {
uElts = uSrcEltSize;
}
}
return uElts;
}
QualType HLSLExternalSource::GetNthElementType(QualType type, unsigned index) {
if (type.isNull()) {
return type;
}
ArTypeObjectKind kind = GetTypeObjectKind(type);
switch (kind) {
case AR_TOBJ_BASIC:
case AR_TOBJ_OBJECT:
case AR_TOBJ_STRING:
return (index == 0) ? type : QualType();
case AR_TOBJ_COMPOUND: {
// TODO: consider caching this value for perf
const RecordType *recordType = type->getAs<RecordType>();
RecordDecl::field_iterator fi = recordType->getDecl()->field_begin();
RecordDecl::field_iterator fend = recordType->getDecl()->field_end();
while (fi != fend) {
if (!fi->getType().isNull()) {
unsigned subElements = GetNumElements(fi->getType());
if (index < subElements) {
return GetNthElementType(fi->getType(), index);
} else {
index -= subElements;
}
}
++fi;
}
return QualType();
}
case AR_TOBJ_ARRAY: {
unsigned arraySize;
QualType elementType;
unsigned elementCount;
elementType =
type.getNonReferenceType()->getAsArrayTypeUnsafe()->getElementType();
elementCount = GetElementCount(elementType);
if (index < elementCount) {
return GetNthElementType(elementType, index);
}
arraySize = GetArraySize(type);
if (index >= arraySize * elementCount) {
return QualType();
}
return GetNthElementType(elementType, index % elementCount);
}
case AR_TOBJ_MATRIX:
case AR_TOBJ_VECTOR:
return (index < GetElementCount(type)) ? GetMatrixOrVectorElementType(type)
: QualType();
default:
DXASSERT(kind == AR_TOBJ_VOID,
"otherwise the type cannot be classified or is not supported");
return QualType();
}
}
bool HLSLExternalSource::IsPromotion(ArBasicKind leftKind,
ArBasicKind rightKind) {
// Eliminate exact matches first, then check for promotions.
if (leftKind == rightKind) {
return false;
}
switch (rightKind) {
case AR_BASIC_FLOAT16:
switch (leftKind) {
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
switch (leftKind) {
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_FLOAT32:
switch (leftKind) {
case AR_BASIC_FLOAT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_MIN10FLOAT:
switch (leftKind) {
case AR_BASIC_MIN16FLOAT:
case AR_BASIC_FLOAT16:
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_MIN16FLOAT:
switch (leftKind) {
case AR_BASIC_FLOAT16:
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_INT8:
case AR_BASIC_UINT8:
// For backwards compat we consider signed/unsigned the same.
switch (leftKind) {
case AR_BASIC_INT16:
case AR_BASIC_INT32:
case AR_BASIC_INT64:
case AR_BASIC_UINT16:
case AR_BASIC_UINT32:
case AR_BASIC_UINT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_INT16:
case AR_BASIC_UINT16:
// For backwards compat we consider signed/unsigned the same.
switch (leftKind) {
case AR_BASIC_INT32:
case AR_BASIC_INT64:
case AR_BASIC_UINT32:
case AR_BASIC_UINT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_INT32:
case AR_BASIC_UINT32:
// For backwards compat we consider signed/unsigned the same.
switch (leftKind) {
case AR_BASIC_INT64:
case AR_BASIC_UINT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_MIN12INT:
switch (leftKind) {
case AR_BASIC_MIN16INT:
case AR_BASIC_INT32:
case AR_BASIC_INT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_MIN16INT:
switch (leftKind) {
case AR_BASIC_INT32:
case AR_BASIC_INT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
case AR_BASIC_MIN16UINT:
switch (leftKind) {
case AR_BASIC_UINT32:
case AR_BASIC_UINT64:
return true;
default:
return false; // No other type is a promotion.
}
break;
}
return false;
}
bool HLSLExternalSource::IsCast(ArBasicKind leftKind, ArBasicKind rightKind) {
// Eliminate exact matches first, then check for casts.
if (leftKind == rightKind) {
return false;
}
//
// All minimum-bits types are only considered matches of themselves
// and thus are not in this table.
//
switch (leftKind) {
case AR_BASIC_LITERAL_INT:
switch (rightKind) {
case AR_BASIC_INT8:
case AR_BASIC_INT16:
case AR_BASIC_INT32:
case AR_BASIC_INT64:
case AR_BASIC_UINT8:
case AR_BASIC_UINT16:
case AR_BASIC_UINT32:
case AR_BASIC_UINT64:
return false;
default:
break; // No other valid cast types
}
break;
case AR_BASIC_INT8:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_UINT8:
return false;
default:
break; // No other valid cast types
}
break;
case AR_BASIC_INT16:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_UINT16:
return false;
default:
break; // No other valid cast types
}
break;
case AR_BASIC_INT32:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_UINT32:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_INT64:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_UINT64:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_UINT8:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_INT8:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_UINT16:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_INT16:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_UINT32:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_INT32:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_UINT64:
switch (rightKind) {
// For backwards compat we consider signed/unsigned the same.
case AR_BASIC_LITERAL_INT:
case AR_BASIC_INT64:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_LITERAL_FLOAT:
switch (rightKind) {
case AR_BASIC_LITERAL_FLOAT:
case AR_BASIC_FLOAT16:
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT64:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_FLOAT16:
switch (rightKind) {
case AR_BASIC_LITERAL_FLOAT:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
switch (rightKind) {
case AR_BASIC_LITERAL_FLOAT:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_FLOAT32:
switch (rightKind) {
case AR_BASIC_LITERAL_FLOAT:
return false;
default:
break; // No other valid cast types.
}
break;
case AR_BASIC_FLOAT64:
switch (rightKind) {
case AR_BASIC_LITERAL_FLOAT:
return false;
default:
break; // No other valid cast types.
}
break;
default:
break; // No other relevant targets.
}
return true;
}
bool HLSLExternalSource::IsIntCast(ArBasicKind leftKind,
ArBasicKind rightKind) {
// Eliminate exact matches first, then check for casts.
if (leftKind == rightKind) {
return false;
}
//
// All minimum-bits types are only considered matches of themselves
// and thus are not in this table.
//
switch (leftKind) {
case AR_BASIC_LITERAL_INT:
switch (rightKind) {
case AR_BASIC_INT8:
case AR_BASIC_INT16:
case AR_BASIC_INT32:
case AR_BASIC_INT64:
case AR_BASIC_UINT8:
case AR_BASIC_UINT16:
case AR_BASIC_UINT32:
case AR_BASIC_UINT64:
return false;
default:
break; // No other valid conversions
}
break;
case AR_BASIC_INT8:
case AR_BASIC_INT16:
case AR_BASIC_INT32:
case AR_BASIC_INT64:
case AR_BASIC_UINT8:
case AR_BASIC_UINT16:
case AR_BASIC_UINT32:
case AR_BASIC_UINT64:
switch (rightKind) {
case AR_BASIC_LITERAL_INT:
return false;
default:
break; // No other valid conversions
}
break;
case AR_BASIC_LITERAL_FLOAT:
switch (rightKind) {
case AR_BASIC_LITERAL_FLOAT:
case AR_BASIC_FLOAT16:
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT64:
return false;
default:
break; // No other valid conversions
}
break;
case AR_BASIC_FLOAT16:
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT64:
switch (rightKind) {
case AR_BASIC_LITERAL_FLOAT:
return false;
default:
break; // No other valid conversions
}
break;
default:
// No other relevant targets
break;
}
return true;
}
UINT64 HLSLExternalSource::ScoreCast(QualType pLType, QualType pRType) {
if (pLType.getCanonicalType() == pRType.getCanonicalType()) {
return 0;
}
UINT64 uScore = 0;
UINT uLSize = GetNumElements(pLType);
UINT uRSize = GetNumElements(pRType);
UINT uCompareSize;
bool bLCast = false;
bool bRCast = false;
bool bLIntCast = false;
bool bRIntCast = false;
bool bLPromo = false;
bool bRPromo = false;
uCompareSize = GetNumConvertCheckElts(pLType, uLSize, pRType, uRSize);
if (uCompareSize > uRSize) {
uCompareSize = uRSize;
}
for (UINT i = 0; i < uCompareSize; i++) {
ArBasicKind LeftElementKind, RightElementKind;
ArBasicKind CombinedKind = AR_BASIC_BOOL;
QualType leftSub = GetNthElementType(pLType, i);
QualType rightSub = GetNthElementType(pRType, i);
ArTypeObjectKind leftKind = GetTypeObjectKind(leftSub);
ArTypeObjectKind rightKind = GetTypeObjectKind(rightSub);
LeftElementKind = GetTypeElementKind(leftSub);
RightElementKind = GetTypeElementKind(rightSub);
// CollectInfo is called with AR_TINFO_ALLOW_OBJECTS, and the resulting
// information needed is the ShapeKind, EltKind and ObjKind.
if (!leftSub.isNull() && !rightSub.isNull() &&
leftKind != AR_TOBJ_INVALID && rightKind != AR_TOBJ_INVALID) {
bool bCombine;
if (leftKind == AR_TOBJ_OBJECT || rightKind == AR_TOBJ_OBJECT) {
DXASSERT(rightKind == AR_TOBJ_OBJECT,
"otherwise prior check is incorrect");
ArBasicKind LeftObjKind =
LeftElementKind; // actually LeftElementKind would have been the
// element
ArBasicKind RightObjKind = RightElementKind;
LeftElementKind = LeftObjKind;
RightElementKind = RightObjKind;
if (leftKind != rightKind) {
bCombine = false;
} else if (!(bCombine = CombineObjectTypes(LeftObjKind, RightObjKind,
&CombinedKind))) {
bCombine =
CombineObjectTypes(RightObjKind, LeftObjKind, &CombinedKind);
}
} else {
bCombine =
CombineBasicTypes(LeftElementKind, RightElementKind, &CombinedKind);
}
if (bCombine && IsPromotion(LeftElementKind, CombinedKind)) {
bLPromo = true;
} else if (!bCombine || IsCast(LeftElementKind, CombinedKind)) {
bLCast = true;
} else if (IsIntCast(LeftElementKind, CombinedKind)) {
bLIntCast = true;
}
if (bCombine && IsPromotion(CombinedKind, RightElementKind)) {
bRPromo = true;
} else if (!bCombine || IsCast(CombinedKind, RightElementKind)) {
bRCast = true;
} else if (IsIntCast(CombinedKind, RightElementKind)) {
bRIntCast = true;
}
} else {
bLCast = true;
bRCast = true;
}
}
#define SCORE_COND(shift, cond) \
{ \
if (cond) \
uScore += 1ULL << (SCORE_MIN_SHIFT + SCORE_PARAM_SHIFT * shift); \
}
SCORE_COND(0, uRSize < uLSize);
SCORE_COND(1, bLPromo);
SCORE_COND(2, bRPromo);
SCORE_COND(3, bLIntCast);
SCORE_COND(4, bRIntCast);
SCORE_COND(5, bLCast);
SCORE_COND(6, bRCast);
SCORE_COND(7, uLSize < uRSize);
#undef SCORE_COND
// Make sure our scores fit in a UINT64.
static_assert(SCORE_MIN_SHIFT + SCORE_PARAM_SHIFT * 8 <= 64);
return uScore;
}
UINT64 HLSLExternalSource::ScoreImplicitConversionSequence(
const ImplicitConversionSequence *ics) {
DXASSERT(ics, "otherwise conversion has not been initialized");
if (!ics->isInitialized()) {
return 0;
}
if (!ics->isStandard()) {
return SCORE_MAX;
}
QualType fromType = ics->Standard.getFromType();
QualType toType = ics->Standard.getToType(2); // final type
return ScoreCast(toType, fromType);
}
UINT64 HLSLExternalSource::ScoreFunction(OverloadCandidateSet::iterator &Cand) {
// Ignore target version mismatches.
// in/out considerations have been taken care of by viability.
// 'this' considerations don't matter without inheritance, other
// than lookup and viability.
UINT64 result = 0;
for (unsigned convIdx = 0; convIdx < Cand->NumConversions; ++convIdx) {
UINT64 score;
score = ScoreImplicitConversionSequence(Cand->Conversions + convIdx);
if (score == SCORE_MAX) {
return SCORE_MAX;
}
result += score;
score = ScoreImplicitConversionSequence(Cand->OutConversions + convIdx);
if (score == SCORE_MAX) {
return SCORE_MAX;
}
result += score;
}
return result;
}
OverloadingResult HLSLExternalSource::GetBestViableFunction(
SourceLocation Loc, OverloadCandidateSet &set,
OverloadCandidateSet::iterator &Best) {
UINT64 bestScore = SCORE_MAX;
unsigned scoreMatch = 0;
Best = set.end();
if (set.size() == 1 && set.begin()->Viable) {
Best = set.begin();
return OR_Success;
}
for (OverloadCandidateSet::iterator Cand = set.begin(); Cand != set.end();
++Cand) {
if (Cand->Viable) {
UINT64 score = ScoreFunction(Cand);
if (score != SCORE_MAX) {
if (score == bestScore) {
++scoreMatch;
} else if (score < bestScore) {
Best = Cand;
scoreMatch = 1;
bestScore = score;
}
}
}
}
if (Best == set.end()) {
return OR_No_Viable_Function;
}
if (scoreMatch > 1) {
Best = set.end();
return OR_Ambiguous;
}
// No need to check for deleted functions to yield OR_Deleted.
return OR_Success;
}
/// <summary>
/// Initializes the specified <paramref name="initSequence" /> describing how
/// <paramref name="Entity" /> is initialized with <paramref name="Args" />.
/// </summary>
/// <param name="Entity">Entity being initialized; a variable, return result,
/// etc.</param> <param name="Kind">Kind of initialization: copying,
/// list-initializing, constructing, etc.</param> <param name="Args">Arguments
/// to the initialization.</param> <param name="TopLevelOfInitList">Whether this
/// is the top-level of an initialization list.</param> <param
/// name="initSequence">Initialization sequence description to
/// initialize.</param>
void HLSLExternalSource::InitializeInitSequenceForHLSL(
const InitializedEntity &Entity, const InitializationKind &Kind,
MultiExprArg Args, bool TopLevelOfInitList,
InitializationSequence *initSequence) {
DXASSERT_NOMSG(initSequence != nullptr);
// In HLSL there are no default initializers, eg float4x4 m();
// Except for RayQuery constructor (also handle InitializationKind::IK_Value)
if (Kind.getKind() == InitializationKind::IK_Default ||
Kind.getKind() == InitializationKind::IK_Value) {
QualType destBaseType = m_context->getBaseElementType(Entity.getType());
ArTypeObjectKind destBaseShape = GetTypeObjectKind(destBaseType);
if (destBaseShape == AR_TOBJ_OBJECT) {
const CXXRecordDecl *typeRecordDecl = destBaseType->getAsCXXRecordDecl();
int index = FindObjectBasicKindIndex(
GetRecordDeclForBuiltInOrStruct(typeRecordDecl));
DXASSERT(index != -1,
"otherwise can't find type we already determined was an object");
if (g_ArBasicKindsAsTypes[index] == AR_OBJECT_RAY_QUERY) {
CXXConstructorDecl *Constructor = *typeRecordDecl->ctor_begin();
initSequence->AddConstructorInitializationStep(
Constructor, AccessSpecifier::AS_public, destBaseType, false, false,
false);
return;
}
}
// Value initializers occur for temporaries with empty parens or braces.
if (Kind.getKind() == InitializationKind::IK_Value) {
m_sema->Diag(Kind.getLocation(), diag::err_hlsl_type_empty_init)
<< Entity.getType();
SilenceSequenceDiagnostics(initSequence);
}
return;
}
// If we have a DirectList, we should have a single InitListExprClass
// argument.
DXASSERT(
Kind.getKind() != InitializationKind::IK_DirectList ||
(Args.size() == 1 &&
Args.front()->getStmtClass() == Stmt::InitListExprClass),
"otherwise caller is passing in incorrect initialization configuration");
bool isCast = Kind.isCStyleCast();
QualType destType = Entity.getType();
ArTypeObjectKind destShape = GetTypeObjectKind(destType);
// Direct initialization occurs for explicit constructor arguments.
// E.g.: http://en.cppreference.com/w/cpp/language/direct_initialization
if (Kind.getKind() == InitializationKind::IK_Direct &&
destShape == AR_TOBJ_COMPOUND && !Kind.isCStyleOrFunctionalCast()) {
m_sema->Diag(Kind.getLocation(),
diag::err_hlsl_require_numeric_base_for_ctor);
SilenceSequenceDiagnostics(initSequence);
return;
}
bool flatten = (Kind.getKind() == InitializationKind::IK_Direct && !isCast) ||
Kind.getKind() == InitializationKind::IK_DirectList ||
(Args.size() == 1 &&
Args.front()->getStmtClass() == Stmt::InitListExprClass);
if (flatten) {
// TODO: InitializationSequence::Perform in SemaInit should take the arity
// of incomplete array types to adjust the value - we do calculate this as
// part of type analysis. Until this is done, s_arr_i_f arr_struct_none[] =
// { }; succeeds when it should instead fail.
FlattenedTypeIterator::ComparisonResult comparisonResult =
FlattenedTypeIterator::CompareTypesForInit(
*this, destType, Args, Kind.getLocation(), Kind.getLocation());
if (comparisonResult.IsConvertibleAndEqualLength() ||
(isCast && comparisonResult.IsConvertibleAndLeftLonger())) {
initSequence->AddListInitializationStep(destType);
} else {
SourceLocation diagLocation;
if (Args.size() > 0) {
diagLocation = Args.front()->getLocStart();
} else {
diagLocation = Entity.getDiagLoc();
}
if (comparisonResult.IsEqualLength()) {
m_sema->Diag(diagLocation, diag::err_hlsl_type_mismatch);
} else {
m_sema->Diag(diagLocation, diag::err_incorrect_num_initializers)
<< (comparisonResult.RightCount < comparisonResult.LeftCount)
<< IsSubobjectType(destType) << comparisonResult.LeftCount
<< comparisonResult.RightCount;
}
SilenceSequenceDiagnostics(initSequence);
}
} else {
DXASSERT(
Args.size() == 1,
"otherwise this was mis-parsed or should be a list initialization");
Expr *firstArg = Args.front();
if (IsExpressionBinaryComma(firstArg)) {
m_sema->Diag(firstArg->getExprLoc(), diag::warn_hlsl_comma_in_init);
}
ExprResult expr = ExprResult(firstArg);
Sema::CheckedConversionKind cck =
Kind.isExplicitCast()
? Sema::CheckedConversionKind::CCK_CStyleCast
: Sema::CheckedConversionKind::CCK_ImplicitConversion;
unsigned int msg = 0;
CastKind castKind;
CXXCastPath basePath;
SourceRange range = Kind.getRange();
ImplicitConversionSequence ics;
ics.setStandard();
bool castWorked = TryStaticCastForHLSL(
expr, destType, cck, range, msg, castKind, basePath,
ListInitializationFalse, SuppressWarningsFalse, SuppressErrorsTrue,
&ics.Standard);
if (castWorked) {
if (destType.getCanonicalType() ==
firstArg->getType().getCanonicalType() &&
(ics.Standard).First != ICK_Lvalue_To_Rvalue) {
initSequence->AddCAssignmentStep(destType);
} else {
initSequence->AddConversionSequenceStep(
ics, destType.getNonReferenceType(), TopLevelOfInitList);
}
} else {
initSequence->SetFailed(InitializationSequence::FK_ConversionFailed);
}
}
}
bool HLSLExternalSource::IsConversionToLessOrEqualElements(
const QualType &sourceType, const QualType &targetType,
bool explicitConversion) {
DXASSERT_NOMSG(!sourceType.isNull());
DXASSERT_NOMSG(!targetType.isNull());
ArTypeInfo sourceTypeInfo;
ArTypeInfo targetTypeInfo;
GetConversionForm(sourceType, explicitConversion, &sourceTypeInfo);
GetConversionForm(targetType, explicitConversion, &targetTypeInfo);
if (sourceTypeInfo.EltKind != targetTypeInfo.EltKind) {
return false;
}
bool isVecMatTrunc = sourceTypeInfo.ShapeKind == AR_TOBJ_VECTOR &&
targetTypeInfo.ShapeKind == AR_TOBJ_BASIC;
if (sourceTypeInfo.ShapeKind != targetTypeInfo.ShapeKind && !isVecMatTrunc) {
return false;
}
if (sourceTypeInfo.ShapeKind == AR_TOBJ_OBJECT &&
sourceTypeInfo.ObjKind == targetTypeInfo.ObjKind) {
return true;
}
// Same struct is eqaul.
if (sourceTypeInfo.ShapeKind == AR_TOBJ_COMPOUND &&
sourceType.getCanonicalType().getUnqualifiedType() ==
targetType.getCanonicalType().getUnqualifiedType()) {
return true;
}
// DerivedFrom is less.
if (sourceTypeInfo.ShapeKind == AR_TOBJ_COMPOUND ||
GetTypeObjectKind(sourceType) == AR_TOBJ_COMPOUND) {
const RecordType *targetRT = targetType->getAs<RecordType>();
const RecordType *sourceRT = sourceType->getAs<RecordType>();
if (targetRT && sourceRT) {
RecordDecl *targetRD = targetRT->getDecl();
RecordDecl *sourceRD = sourceRT->getDecl();
const CXXRecordDecl *targetCXXRD = dyn_cast<CXXRecordDecl>(targetRD);
const CXXRecordDecl *sourceCXXRD = dyn_cast<CXXRecordDecl>(sourceRD);
if (targetCXXRD && sourceCXXRD) {
if (sourceCXXRD->isDerivedFrom(targetCXXRD))
return true;
}
}
}
if (sourceTypeInfo.ShapeKind != AR_TOBJ_SCALAR &&
sourceTypeInfo.ShapeKind != AR_TOBJ_VECTOR &&
sourceTypeInfo.ShapeKind != AR_TOBJ_MATRIX) {
return false;
}
return targetTypeInfo.uTotalElts <= sourceTypeInfo.uTotalElts;
}
bool HLSLExternalSource::IsConversionToLessOrEqualElements(
const ExprResult &sourceExpr, const QualType &targetType,
bool explicitConversion) {
if (sourceExpr.isInvalid() || targetType.isNull()) {
return false;
}
return IsConversionToLessOrEqualElements(sourceExpr.get()->getType(),
targetType, explicitConversion);
}
bool HLSLExternalSource::IsTypeNumeric(QualType type, UINT *count) {
DXASSERT_NOMSG(!type.isNull());
DXASSERT_NOMSG(count != nullptr);
*count = 0;
UINT subCount = 0;
ArTypeObjectKind shapeKind = GetTypeObjectKind(type);
switch (shapeKind) {
case AR_TOBJ_ARRAY:
if (IsTypeNumeric(m_context->getAsArrayType(type)->getElementType(),
&subCount)) {
*count = subCount * GetArraySize(type);
return true;
}
return false;
case AR_TOBJ_COMPOUND: {
UINT maxCount = 0;
{ // Determine maximum count to prevent infinite loop on incomplete array
FlattenedTypeIterator itCount(SourceLocation(), type, *this);
maxCount = itCount.countRemaining();
if (!maxCount) {
return false; // empty struct.
}
}
FlattenedTypeIterator it(SourceLocation(), type, *this);
while (it.hasCurrentElement()) {
bool isFieldNumeric = IsTypeNumeric(it.getCurrentElement(), &subCount);
if (!isFieldNumeric) {
return false;
}
if (*count >= maxCount) {
// this element is an incomplete array at the end; iterator will not
// advance past this element. don't add to *count either, so *count will
// represent minimum size of the structure.
break;
}
*count += (subCount * it.getCurrentElementSize());
it.advanceCurrentElement(it.getCurrentElementSize());
}
return true;
}
default:
DXASSERT(false, "unreachable");
return false;
case AR_TOBJ_BASIC:
case AR_TOBJ_MATRIX:
case AR_TOBJ_VECTOR:
*count = GetElementCount(type);
return IsBasicKindNumeric(GetTypeElementKind(type));
case AR_TOBJ_OBJECT:
case AR_TOBJ_DEPENDENT:
case AR_TOBJ_STRING:
return false;
}
}
enum MatrixMemberAccessError {
MatrixMemberAccessError_None, // No errors found.
MatrixMemberAccessError_BadFormat, // Formatting error (non-digit).
MatrixMemberAccessError_MixingRefs, // Mix of zero-based and one-based
// references.
MatrixMemberAccessError_Empty, // No members specified.
MatrixMemberAccessError_ZeroInOneBased, // A zero was used in a one-based
// reference.
MatrixMemberAccessError_FourInZeroBased, // A four was used in a zero-based
// reference.
MatrixMemberAccessError_TooManyPositions, // Too many positions (more than
// four) were specified.
};
static MatrixMemberAccessError TryConsumeMatrixDigit(const char *&memberText,
uint32_t *value) {
DXASSERT_NOMSG(memberText != nullptr);
DXASSERT_NOMSG(value != nullptr);
if ('0' <= *memberText && *memberText <= '9') {
*value = (*memberText) - '0';
} else {
return MatrixMemberAccessError_BadFormat;
}
memberText++;
return MatrixMemberAccessError_None;
}
static MatrixMemberAccessError
TryParseMatrixMemberAccess(const char *memberText,
MatrixMemberAccessPositions *value) {
DXASSERT_NOMSG(memberText != nullptr);
DXASSERT_NOMSG(value != nullptr);
MatrixMemberAccessPositions result;
bool zeroBasedDecided = false;
bool zeroBased = false;
// Set the output value to invalid to allow early exits when errors are found.
value->IsValid = 0;
// Assume this is true until proven otherwise.
result.IsValid = 1;
result.Count = 0;
while (*memberText) {
// Check for a leading underscore.
if (*memberText != '_') {
return MatrixMemberAccessError_BadFormat;
}
++memberText;
// Check whether we have an 'm' or a digit.
if (*memberText == 'm') {
if (zeroBasedDecided && !zeroBased) {
return MatrixMemberAccessError_MixingRefs;
}
zeroBased = true;
zeroBasedDecided = true;
++memberText;
} else if (!('0' <= *memberText && *memberText <= '9')) {
return MatrixMemberAccessError_BadFormat;
} else {
if (zeroBasedDecided && zeroBased) {
return MatrixMemberAccessError_MixingRefs;
}
zeroBased = false;
zeroBasedDecided = true;
}
// Consume two digits for the position.
uint32_t rowPosition;
uint32_t colPosition;
MatrixMemberAccessError digitError;
if (MatrixMemberAccessError_None !=
(digitError = TryConsumeMatrixDigit(memberText, &rowPosition))) {
return digitError;
}
if (MatrixMemberAccessError_None !=
(digitError = TryConsumeMatrixDigit(memberText, &colPosition))) {
return digitError;
}
// Look for specific common errors (developer likely mixed up reference
// style).
if (zeroBased) {
if (rowPosition == 4 || colPosition == 4) {
return MatrixMemberAccessError_FourInZeroBased;
}
} else {
if (rowPosition == 0 || colPosition == 0) {
return MatrixMemberAccessError_ZeroInOneBased;
}
// SetPosition will use zero-based indices.
--rowPosition;
--colPosition;
}
if (result.Count == 4) {
return MatrixMemberAccessError_TooManyPositions;
}
result.SetPosition(result.Count, rowPosition, colPosition);
result.Count++;
}
if (result.Count == 0) {
return MatrixMemberAccessError_Empty;
}
*value = result;
return MatrixMemberAccessError_None;
}
ExprResult HLSLExternalSource::LookupMatrixMemberExprForHLSL(
Expr &BaseExpr, DeclarationName MemberName, bool IsArrow,
SourceLocation OpLoc, SourceLocation MemberLoc) {
QualType BaseType = BaseExpr.getType();
DXASSERT(!BaseType.isNull(),
"otherwise caller should have stopped analysis much earlier");
DXASSERT(GetTypeObjectKind(BaseType) == AR_TOBJ_MATRIX,
"Should only be called on known matrix types");
QualType elementType;
UINT rowCount, colCount;
GetRowsAndCols(BaseType, rowCount, colCount);
elementType = GetMatrixOrVectorElementType(BaseType);
IdentifierInfo *member = MemberName.getAsIdentifierInfo();
const char *memberText = member->getNameStart();
MatrixMemberAccessPositions positions;
MatrixMemberAccessError memberAccessError;
unsigned msg = 0;
memberAccessError = TryParseMatrixMemberAccess(memberText, &positions);
switch (memberAccessError) {
case MatrixMemberAccessError_BadFormat:
msg = diag::err_hlsl_matrix_member_bad_format;
break;
case MatrixMemberAccessError_Empty:
msg = diag::err_hlsl_matrix_member_empty;
break;
case MatrixMemberAccessError_FourInZeroBased:
msg = diag::err_hlsl_matrix_member_four_in_zero_based;
break;
case MatrixMemberAccessError_MixingRefs:
msg = diag::err_hlsl_matrix_member_mixing_refs;
break;
case MatrixMemberAccessError_None:
msg = 0;
DXASSERT(positions.IsValid, "otherwise an error should have been returned");
// Check the position with the type now.
for (unsigned int i = 0; i < positions.Count; i++) {
uint32_t rowPos, colPos;
positions.GetPosition(i, &rowPos, &colPos);
if (rowPos >= rowCount || colPos >= colCount) {
msg = diag::err_hlsl_matrix_member_out_of_bounds;
break;
}
}
break;
case MatrixMemberAccessError_TooManyPositions:
msg = diag::err_hlsl_matrix_member_too_many_positions;
break;
case MatrixMemberAccessError_ZeroInOneBased:
msg = diag::err_hlsl_matrix_member_zero_in_one_based;
break;
default:
llvm_unreachable("Unknown MatrixMemberAccessError value");
}
if (msg != 0) {
m_sema->Diag(MemberLoc, msg) << memberText;
// It's possible that it's a simple out-of-bounds condition. In this case,
// generate the member access expression with the correct arity and continue
// processing.
if (!positions.IsValid) {
return ExprError();
}
}
DXASSERT(positions.IsValid, "otherwise an error should have been returned");
// Consume elements
QualType resultType;
if (positions.Count == 1)
resultType = elementType;
else
resultType =
NewSimpleAggregateType(AR_TOBJ_UNKNOWN, GetTypeElementKind(elementType),
0, OneRow, positions.Count);
// Add qualifiers from BaseType.
resultType =
m_context->getQualifiedType(resultType, BaseType.getQualifiers());
ExprValueKind VK = positions.ContainsDuplicateElements()
? VK_RValue
: (IsArrow ? VK_LValue : BaseExpr.getValueKind());
ExtMatrixElementExpr *matrixExpr = new (m_context) ExtMatrixElementExpr(
resultType, VK, &BaseExpr, *member, MemberLoc, positions);
return matrixExpr;
}
enum VectorMemberAccessError {
VectorMemberAccessError_None, // No errors found.
VectorMemberAccessError_BadFormat, // Formatting error (not in 'rgba' or
// 'xyzw').
VectorMemberAccessError_MixingStyles, // Mix of rgba and xyzw swizzle styles.
VectorMemberAccessError_Empty, // No members specified.
VectorMemberAccessError_TooManyPositions, // Too many positions (more than
// four) were specified.
};
static VectorMemberAccessError TryConsumeVectorDigit(const char *&memberText,
uint32_t *value,
bool &rgbaStyle) {
DXASSERT_NOMSG(memberText != nullptr);
DXASSERT_NOMSG(value != nullptr);
rgbaStyle = false;
switch (*memberText) {
case 'r':
rgbaStyle = true;
LLVM_FALLTHROUGH;
case 'x':
*value = 0;
break;
case 'g':
rgbaStyle = true;
LLVM_FALLTHROUGH;
case 'y':
*value = 1;
break;
case 'b':
rgbaStyle = true;
LLVM_FALLTHROUGH;
case 'z':
*value = 2;
break;
case 'a':
rgbaStyle = true;
LLVM_FALLTHROUGH;
case 'w':
*value = 3;
break;
default:
return VectorMemberAccessError_BadFormat;
}
memberText++;
return VectorMemberAccessError_None;
}
static VectorMemberAccessError
TryParseVectorMemberAccess(const char *memberText,
VectorMemberAccessPositions *value) {
DXASSERT_NOMSG(memberText != nullptr);
DXASSERT_NOMSG(value != nullptr);
VectorMemberAccessPositions result;
bool rgbaStyleDecided = false;
bool rgbaStyle = false;
// Set the output value to invalid to allow early exits when errors are found.
value->IsValid = 0;
// Assume this is true until proven otherwise.
result.IsValid = 1;
result.Count = 0;
while (*memberText) {
// Consume one character for the swizzle.
uint32_t colPosition;
VectorMemberAccessError digitError;
bool rgbaStyleTmp = false;
if (VectorMemberAccessError_None !=
(digitError =
TryConsumeVectorDigit(memberText, &colPosition, rgbaStyleTmp))) {
return digitError;
}
if (rgbaStyleDecided && rgbaStyleTmp != rgbaStyle) {
return VectorMemberAccessError_MixingStyles;
} else {
rgbaStyleDecided = true;
rgbaStyle = rgbaStyleTmp;
}
if (result.Count == 4) {
return VectorMemberAccessError_TooManyPositions;
}
result.SetPosition(result.Count, colPosition);
result.Count++;
}
if (result.Count == 0) {
return VectorMemberAccessError_Empty;
}
*value = result;
return VectorMemberAccessError_None;
}
bool IsExprAccessingOutIndicesArray(Expr *BaseExpr) {
switch (BaseExpr->getStmtClass()) {
case Stmt::ArraySubscriptExprClass: {
ArraySubscriptExpr *ase = cast<ArraySubscriptExpr>(BaseExpr);
return IsExprAccessingOutIndicesArray(ase->getBase());
}
case Stmt::ImplicitCastExprClass: {
ImplicitCastExpr *ice = cast<ImplicitCastExpr>(BaseExpr);
return IsExprAccessingOutIndicesArray(ice->getSubExpr());
}
case Stmt::DeclRefExprClass: {
DeclRefExpr *dre = cast<DeclRefExpr>(BaseExpr);
ValueDecl *vd = dre->getDecl();
if (vd->getAttr<HLSLIndicesAttr>() && vd->getAttr<HLSLOutAttr>()) {
return true;
}
return false;
}
default:
return false;
}
}
ExprResult HLSLExternalSource::LookupVectorMemberExprForHLSL(
Expr &BaseExpr, DeclarationName MemberName, bool IsArrow,
SourceLocation OpLoc, SourceLocation MemberLoc) {
QualType BaseType = BaseExpr.getType();
DXASSERT(!BaseType.isNull(),
"otherwise caller should have stopped analysis much earlier");
DXASSERT(GetTypeObjectKind(BaseType) == AR_TOBJ_VECTOR,
"Should only be called on known vector types");
QualType elementType;
UINT colCount = GetHLSLVecSize(BaseType);
elementType = GetMatrixOrVectorElementType(BaseType);
IdentifierInfo *member = MemberName.getAsIdentifierInfo();
const char *memberText = member->getNameStart();
VectorMemberAccessPositions positions;
VectorMemberAccessError memberAccessError;
unsigned msg = 0;
memberAccessError = TryParseVectorMemberAccess(memberText, &positions);
switch (memberAccessError) {
case VectorMemberAccessError_BadFormat:
msg = diag::err_hlsl_vector_member_bad_format;
break;
case VectorMemberAccessError_Empty:
msg = diag::err_hlsl_vector_member_empty;
break;
case VectorMemberAccessError_MixingStyles:
msg = diag::err_ext_vector_component_name_mixedsets;
break;
case VectorMemberAccessError_None:
msg = 0;
DXASSERT(positions.IsValid, "otherwise an error should have been returned");
// Check the position with the type now.
for (unsigned int i = 0; i < positions.Count; i++) {
uint32_t colPos;
positions.GetPosition(i, &colPos);
if (colPos >= colCount) {
msg = diag::err_hlsl_vector_member_out_of_bounds;
break;
}
}
break;
case VectorMemberAccessError_TooManyPositions:
msg = diag::err_hlsl_vector_member_too_many_positions;
break;
default:
llvm_unreachable("Unknown VectorMemberAccessError value");
}
if (msg != 0) {
m_sema->Diag(MemberLoc, msg) << memberText;
// It's possible that it's a simple out-of-bounds condition. In this case,
// generate the member access expression with the correct arity and continue
// processing.
if (!positions.IsValid) {
return ExprError();
}
}
DXASSERT(positions.IsValid, "otherwise an error should have been returned");
// Disallow component access for out indices for DXIL path. We still allow
// this in SPIR-V path.
if (!m_sema->getLangOpts().SPIRV &&
IsExprAccessingOutIndicesArray(&BaseExpr) && positions.Count < colCount) {
m_sema->Diag(MemberLoc, diag::err_hlsl_out_indices_array_incorrect_access);
return ExprError();
}
// Consume elements
QualType resultType;
if (positions.Count == 1)
resultType = elementType;
else
resultType =
NewSimpleAggregateType(AR_TOBJ_UNKNOWN, GetTypeElementKind(elementType),
0, OneRow, positions.Count);
// Add qualifiers from BaseType.
resultType =
m_context->getQualifiedType(resultType, BaseType.getQualifiers());
ExprValueKind VK = positions.ContainsDuplicateElements()
? VK_RValue
: (IsArrow ? VK_LValue : BaseExpr.getValueKind());
HLSLVectorElementExpr *vectorExpr = new (m_context) HLSLVectorElementExpr(
resultType, VK, &BaseExpr, *member, MemberLoc, positions);
return vectorExpr;
}
ExprResult HLSLExternalSource::LookupArrayMemberExprForHLSL(
Expr &BaseExpr, DeclarationName MemberName, bool IsArrow,
SourceLocation OpLoc, SourceLocation MemberLoc) {
QualType BaseType = BaseExpr.getType();
DXASSERT(!BaseType.isNull(),
"otherwise caller should have stopped analysis much earlier");
DXASSERT(GetTypeObjectKind(BaseType) == AR_TOBJ_ARRAY,
"Should only be called on known array types");
IdentifierInfo *member = MemberName.getAsIdentifierInfo();
const char *memberText = member->getNameStart();
// The only property available on arrays is Length; it is deprecated and
// available only on HLSL version <=2018
if (member->getLength() == 6 && 0 == strcmp(memberText, "Length")) {
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(BaseType)) {
// check version support
hlsl::LangStd hlslVer = getSema()->getLangOpts().HLSLVersion;
if (hlslVer > hlsl::LangStd::v2016) {
m_sema->Diag(MemberLoc, diag::err_hlsl_unsupported_for_version_lower)
<< "Length"
<< "2016";
return ExprError();
}
if (hlslVer == hlsl::LangStd::v2016) {
m_sema->Diag(MemberLoc, diag::warn_deprecated) << "Length";
}
UnaryExprOrTypeTraitExpr *arrayLenExpr = new (m_context)
UnaryExprOrTypeTraitExpr(UETT_ArrayLength, &BaseExpr,
m_context->getSizeType(), MemberLoc,
BaseExpr.getSourceRange().getEnd());
return arrayLenExpr;
}
}
m_sema->Diag(MemberLoc, diag::err_typecheck_member_reference_struct_union)
<< BaseType << BaseExpr.getSourceRange() << MemberLoc;
return ExprError();
}
ExprResult HLSLExternalSource::MaybeConvertMemberAccess(clang::Expr *E) {
DXASSERT_NOMSG(E != nullptr);
if (IsHLSLObjectWithImplicitMemberAccess(E->getType())) {
QualType targetType = hlsl::GetHLSLResourceResultType(E->getType());
if (IsHLSLObjectWithImplicitROMemberAccess(E->getType()))
targetType = m_context->getConstType(targetType);
return ImplicitCastExpr::Create(*m_context, targetType,
CastKind::CK_FlatConversion, E, nullptr,
E->getValueKind());
}
ArBasicKind basic = GetTypeElementKind(E->getType());
if (!IS_BASIC_PRIMITIVE(basic)) {
return E;
}
ArTypeObjectKind kind = GetTypeObjectKind(E->getType());
if (kind != AR_TOBJ_SCALAR) {
return E;
}
QualType targetType = NewSimpleAggregateType(AR_TOBJ_VECTOR, basic, 0, 1, 1);
if (E->getObjectKind() ==
OK_BitField) // if E is a bitfield, then generate an R value.
E = ImplicitCastExpr::Create(*m_context, E->getType(),
CastKind::CK_LValueToRValue, E, nullptr,
VK_RValue);
return ImplicitCastExpr::Create(*m_context, targetType,
CastKind::CK_HLSLVectorSplat, E, nullptr,
E->getValueKind());
}
static clang::CastKind
ImplicitConversionKindToCastKind(clang::ImplicitConversionKind ICK,
ArBasicKind FromKind, ArBasicKind ToKind) {
// TODO: Shouldn't we have more specific ICK enums so we don't have to
// re-evaluate
// based on from/to kinds in order to determine CastKind?
// There's a FIXME note in PerformImplicitConversion that calls out exactly
// this problem.
switch (ICK) {
case ICK_Integral_Promotion:
case ICK_Integral_Conversion:
return CK_IntegralCast;
case ICK_Floating_Promotion:
case ICK_Floating_Conversion:
return CK_FloatingCast;
case ICK_Floating_Integral:
if (IS_BASIC_FLOAT(FromKind) && IS_BASIC_AINT(ToKind))
return CK_FloatingToIntegral;
else if ((IS_BASIC_AINT(FromKind) || IS_BASIC_BOOL(FromKind)) &&
IS_BASIC_FLOAT(ToKind))
return CK_IntegralToFloating;
break;
case ICK_Boolean_Conversion:
if (IS_BASIC_FLOAT(FromKind) && IS_BASIC_BOOL(ToKind))
return CK_FloatingToBoolean;
else if (IS_BASIC_AINT(FromKind) && IS_BASIC_BOOL(ToKind))
return CK_IntegralToBoolean;
break;
default:
// Only covers implicit conversions with cast kind equivalents.
return CK_Invalid;
}
return CK_Invalid;
}
static clang::CastKind ConvertToComponentCastKind(clang::CastKind CK) {
switch (CK) {
case CK_IntegralCast:
return CK_HLSLCC_IntegralCast;
case CK_FloatingCast:
return CK_HLSLCC_FloatingCast;
case CK_FloatingToIntegral:
return CK_HLSLCC_FloatingToIntegral;
case CK_IntegralToFloating:
return CK_HLSLCC_IntegralToFloating;
case CK_FloatingToBoolean:
return CK_HLSLCC_FloatingToBoolean;
case CK_IntegralToBoolean:
return CK_HLSLCC_IntegralToBoolean;
default:
// Only HLSLCC castkinds are relevant. Ignore the rest.
return CK_Invalid;
}
return CK_Invalid;
}
clang::Expr *HLSLExternalSource::HLSLImpCastToScalar(clang::Sema *self,
clang::Expr *From,
ArTypeObjectKind FromShape,
ArBasicKind EltKind) {
clang::CastKind CK = CK_Invalid;
if (AR_TOBJ_MATRIX == FromShape)
CK = CK_HLSLMatrixToScalarCast;
if (AR_TOBJ_VECTOR == FromShape)
CK = CK_HLSLVectorToScalarCast;
if (CK_Invalid != CK) {
return self
->ImpCastExprToType(
From, NewSimpleAggregateType(AR_TOBJ_BASIC, EltKind, 0, 1, 1), CK,
From->getValueKind())
.get();
}
return From;
}
clang::ExprResult HLSLExternalSource::PerformHLSLConversion(
clang::Expr *From, clang::QualType targetType,
const clang::StandardConversionSequence &SCS,
clang::Sema::CheckedConversionKind CCK) {
QualType sourceType = From->getType();
sourceType = GetStructuralForm(sourceType);
targetType = GetStructuralForm(targetType);
ArTypeInfo SourceInfo, TargetInfo;
CollectInfo(sourceType, &SourceInfo);
CollectInfo(targetType, &TargetInfo);
clang::CastKind CK = CK_Invalid;
QualType intermediateTarget;
// TODO: construct vector/matrix and component cast expressions
switch (SCS.Second) {
case ICK_Flat_Conversion: {
// TODO: determine how to handle individual component conversions:
// - have an array of conversions for ComponentConversion in SCS?
// convert that to an array of casts under a special kind of flat
// flat conversion node? What do component conversion casts cast
// from? We don't have a From expression for individiual components.
From = m_sema
->ImpCastExprToType(From, targetType.getUnqualifiedType(),
CK_FlatConversion, From->getValueKind(),
/*BasePath=*/0, CCK)
.get();
break;
}
case ICK_HLSL_Derived_To_Base: {
CXXCastPath BasePath;
if (m_sema->CheckDerivedToBaseConversion(
sourceType, targetType.getNonReferenceType(), From->getLocStart(),
From->getSourceRange(), &BasePath, /*IgnoreAccess=*/true))
return ExprError();
From = m_sema
->ImpCastExprToType(From, targetType.getUnqualifiedType(),
CK_HLSLDerivedToBase, From->getValueKind(),
&BasePath, CCK)
.get();
break;
}
case ICK_HLSLVector_Splat: {
// 1. optionally convert from vec1 or mat1x1 to scalar
From = HLSLImpCastToScalar(m_sema, From, SourceInfo.ShapeKind,
SourceInfo.EltKind);
// 2. optionally convert component type
if (ICK_Identity != SCS.ComponentConversion) {
CK = ImplicitConversionKindToCastKind(
SCS.ComponentConversion, SourceInfo.EltKind, TargetInfo.EltKind);
if (CK_Invalid != CK) {
From = m_sema
->ImpCastExprToType(
From,
NewSimpleAggregateType(AR_TOBJ_BASIC, TargetInfo.EltKind,
0, 1, 1),
CK, From->getValueKind(), /*BasePath=*/0, CCK)
.get();
}
}
// 3. splat scalar to final vector or matrix
CK = CK_Invalid;
if (AR_TOBJ_VECTOR == TargetInfo.ShapeKind)
CK = CK_HLSLVectorSplat;
else if (AR_TOBJ_MATRIX == TargetInfo.ShapeKind)
CK = CK_HLSLMatrixSplat;
if (CK_Invalid != CK) {
From =
m_sema
->ImpCastExprToType(From,
NewSimpleAggregateType(
TargetInfo.ShapeKind, TargetInfo.EltKind,
0, TargetInfo.uRows, TargetInfo.uCols),
CK, From->getValueKind(), /*BasePath=*/0, CCK)
.get();
}
break;
}
case ICK_HLSLVector_Scalar: {
// 1. select vector or matrix component
From = HLSLImpCastToScalar(m_sema, From, SourceInfo.ShapeKind,
SourceInfo.EltKind);
// 2. optionally convert component type
if (ICK_Identity != SCS.ComponentConversion) {
CK = ImplicitConversionKindToCastKind(
SCS.ComponentConversion, SourceInfo.EltKind, TargetInfo.EltKind);
if (CK_Invalid != CK) {
From = m_sema
->ImpCastExprToType(
From,
NewSimpleAggregateType(AR_TOBJ_BASIC, TargetInfo.EltKind,
0, 1, 1),
CK, From->getValueKind(), /*BasePath=*/0, CCK)
.get();
}
}
break;
}
// The following two (three if we re-introduce ICK_HLSLComponent_Conversion)
// steps can be done with case fall-through, since this is the order in which
// we want to do the conversion operations.
case ICK_HLSLVector_Truncation: {
// 1. dimension truncation
// vector truncation or matrix truncation?
if (SourceInfo.ShapeKind == AR_TOBJ_VECTOR) {
From = m_sema
->ImpCastExprToType(
From,
NewSimpleAggregateType(AR_TOBJ_VECTOR, SourceInfo.EltKind,
0, 1, TargetInfo.uTotalElts),
CK_HLSLVectorTruncationCast, From->getValueKind(),
/*BasePath=*/0, CCK)
.get();
} else if (SourceInfo.ShapeKind == AR_TOBJ_MATRIX) {
if (TargetInfo.ShapeKind == AR_TOBJ_VECTOR && 1 == SourceInfo.uCols) {
// Handle the column to vector case
From =
m_sema
->ImpCastExprToType(
From,
NewSimpleAggregateType(AR_TOBJ_MATRIX, SourceInfo.EltKind,
0, TargetInfo.uCols, 1),
CK_HLSLMatrixTruncationCast, From->getValueKind(),
/*BasePath=*/0, CCK)
.get();
} else {
From =
m_sema
->ImpCastExprToType(From,
NewSimpleAggregateType(
AR_TOBJ_MATRIX, SourceInfo.EltKind, 0,
TargetInfo.uRows, TargetInfo.uCols),
CK_HLSLMatrixTruncationCast,
From->getValueKind(), /*BasePath=*/0, CCK)
.get();
}
} else {
DXASSERT(
false,
"PerformHLSLConversion: Invalid source type for truncation cast");
}
}
LLVM_FALLTHROUGH;
case ICK_HLSLVector_Conversion: {
// 2. Do ShapeKind conversion if necessary
if (SourceInfo.ShapeKind != TargetInfo.ShapeKind) {
switch (TargetInfo.ShapeKind) {
case AR_TOBJ_VECTOR:
DXASSERT(AR_TOBJ_MATRIX == SourceInfo.ShapeKind,
"otherwise, invalid casting sequence");
From =
m_sema
->ImpCastExprToType(From,
NewSimpleAggregateType(
AR_TOBJ_VECTOR, SourceInfo.EltKind, 0,
TargetInfo.uRows, TargetInfo.uCols),
CK_HLSLMatrixToVectorCast,
From->getValueKind(), /*BasePath=*/0, CCK)
.get();
break;
case AR_TOBJ_MATRIX:
DXASSERT(AR_TOBJ_VECTOR == SourceInfo.ShapeKind,
"otherwise, invalid casting sequence");
From =
m_sema
->ImpCastExprToType(From,
NewSimpleAggregateType(
AR_TOBJ_MATRIX, SourceInfo.EltKind, 0,
TargetInfo.uRows, TargetInfo.uCols),
CK_HLSLVectorToMatrixCast,
From->getValueKind(), /*BasePath=*/0, CCK)
.get();
break;
case AR_TOBJ_BASIC:
// Truncation may be followed by cast to scalar
From = HLSLImpCastToScalar(m_sema, From, SourceInfo.ShapeKind,
SourceInfo.EltKind);
break;
default:
DXASSERT(false, "otherwise, invalid casting sequence");
break;
}
}
// 3. Do component type conversion
if (ICK_Identity != SCS.ComponentConversion) {
CK = ImplicitConversionKindToCastKind(
SCS.ComponentConversion, SourceInfo.EltKind, TargetInfo.EltKind);
if (TargetInfo.ShapeKind != AR_TOBJ_BASIC)
CK = ConvertToComponentCastKind(CK);
if (CK_Invalid != CK) {
From =
m_sema
->ImpCastExprToType(From, targetType, CK, From->getValueKind(),
/*BasePath=*/0, CCK)
.get();
}
}
break;
}
case ICK_Identity:
// Nothing to do.
break;
default:
DXASSERT(false,
"PerformHLSLConversion: Invalid SCS.Second conversion kind");
}
return From;
}
void HLSLExternalSource::GetConversionForm(QualType type,
bool explicitConversion,
ArTypeInfo *pTypeInfo) {
// if (!CollectInfo(AR_TINFO_ALLOW_ALL, pTypeInfo))
CollectInfo(type, pTypeInfo);
// The fxc implementation reported pTypeInfo->ShapeKind separately in an
// output argument, but that value is only used for pointer conversions.
// When explicitly converting types complex aggregates can be treated
// as vectors if they are entirely numeric.
switch (pTypeInfo->ShapeKind) {
case AR_TOBJ_COMPOUND:
case AR_TOBJ_ARRAY:
if (explicitConversion && IsTypeNumeric(type, &pTypeInfo->uTotalElts)) {
pTypeInfo->ShapeKind = AR_TOBJ_VECTOR;
} else {
pTypeInfo->ShapeKind = AR_TOBJ_COMPOUND;
}
DXASSERT_NOMSG(pTypeInfo->uRows == 1);
pTypeInfo->uCols = pTypeInfo->uTotalElts;
break;
case AR_TOBJ_VECTOR:
case AR_TOBJ_MATRIX:
// Convert 1x1 types to scalars.
if (pTypeInfo->uCols == 1 && pTypeInfo->uRows == 1) {
pTypeInfo->ShapeKind = AR_TOBJ_BASIC;
}
break;
default:
// Only convertable shapekinds are relevant.
break;
}
}
static bool HandleVoidConversion(QualType source, QualType target,
bool explicitConversion, bool *allowed) {
DXASSERT_NOMSG(allowed != nullptr);
bool applicable = true;
*allowed = true;
if (explicitConversion) {
// (void) non-void
if (target->isVoidType()) {
DXASSERT_NOMSG(*allowed);
}
// (non-void) void
else if (source->isVoidType()) {
*allowed = false;
} else {
applicable = false;
}
} else {
// (void) void
if (source->isVoidType() && target->isVoidType()) {
DXASSERT_NOMSG(*allowed);
}
// (void) non-void, (non-void) void
else if (source->isVoidType() || target->isVoidType()) {
*allowed = false;
} else {
applicable = false;
}
}
return applicable;
}
static bool ConvertDimensions(ArTypeInfo TargetInfo, ArTypeInfo SourceInfo,
ImplicitConversionKind &Second,
TYPE_CONVERSION_REMARKS &Remarks) {
// The rules for aggregate conversions are:
// 1. A scalar can be replicated to any layout.
// 2. Any type may be truncated to anything else with one component.
// 3. A vector may be truncated to a smaller vector.
// 4. A matrix may be truncated to a smaller matrix.
// 5. The result of a vector and a matrix is:
// a. If the matrix has one row it's a vector-sized
// piece of the row.
// b. If the matrix has one column it's a vector-sized
// piece of the column.
// c. Otherwise the number of elements in the vector
// and matrix must match and the result is the vector.
// 6. The result of a matrix and a vector is similar to #5.
switch (TargetInfo.ShapeKind) {
case AR_TOBJ_BASIC:
switch (SourceInfo.ShapeKind) {
case AR_TOBJ_BASIC:
Second = ICK_Identity;
break;
case AR_TOBJ_VECTOR:
if (1 < SourceInfo.uCols)
Second = ICK_HLSLVector_Truncation;
else
Second = ICK_HLSLVector_Scalar;
break;
case AR_TOBJ_MATRIX:
if (1 < SourceInfo.uRows * SourceInfo.uCols)
Second = ICK_HLSLVector_Truncation;
else
Second = ICK_HLSLVector_Scalar;
break;
default:
return false;
}
break;
case AR_TOBJ_VECTOR:
switch (SourceInfo.ShapeKind) {
case AR_TOBJ_BASIC:
// Conversions between scalars and aggregates are always supported.
Second = ICK_HLSLVector_Splat;
break;
case AR_TOBJ_VECTOR:
if (TargetInfo.uCols > SourceInfo.uCols) {
if (SourceInfo.uCols == 1) {
Second = ICK_HLSLVector_Splat;
} else {
return false;
}
} else if (TargetInfo.uCols < SourceInfo.uCols) {
Second = ICK_HLSLVector_Truncation;
} else {
Second = ICK_Identity;
}
break;
case AR_TOBJ_MATRIX: {
UINT SourceComponents = SourceInfo.uRows * SourceInfo.uCols;
if (1 == SourceComponents && TargetInfo.uCols != 1) {
// splat: matrix<[..], 1, 1> -> vector<[..], O>
Second = ICK_HLSLVector_Splat;
} else if (1 == SourceInfo.uRows || 1 == SourceInfo.uCols) {
// cases for: matrix<[..], M, N> -> vector<[..], O>, where N == 1 or M
// == 1
if (TargetInfo.uCols > SourceComponents) // illegal: O > N*M
return false;
else if (TargetInfo.uCols < SourceComponents) // truncation: O < N*M
Second = ICK_HLSLVector_Truncation;
else // equalivalent: O == N*M
Second = ICK_HLSLVector_Conversion;
} else if (TargetInfo.uCols == 1 && SourceComponents > 1) {
Second = ICK_HLSLVector_Truncation;
} else if (TargetInfo.uCols != SourceComponents) {
// illegal: matrix<[..], M, N> -> vector<[..], O> where N != 1 and M !=
// 1 and O != N*M
return false;
} else {
// legal: matrix<[..], M, N> -> vector<[..], O> where N != 1 and M != 1
// and O == N*M
Second = ICK_HLSLVector_Conversion;
}
break;
}
default:
return false;
}
break;
case AR_TOBJ_MATRIX: {
UINT TargetComponents = TargetInfo.uRows * TargetInfo.uCols;
switch (SourceInfo.ShapeKind) {
case AR_TOBJ_BASIC:
// Conversions between scalars and aggregates are always supported.
Second = ICK_HLSLVector_Splat;
break;
case AR_TOBJ_VECTOR: {
// We can only convert vector to matrix in following cases:
// - splat from vector<...,1>
// - same number of components
// - one target component (truncate to scalar)
// - matrix has one row or one column, and fewer components (truncation)
// Other cases disallowed even if implicitly convertable in two steps
// (truncation+conversion).
if (1 == SourceInfo.uCols && TargetComponents != 1) {
// splat: vector<[..], 1> -> matrix<[..], M, N>
Second = ICK_HLSLVector_Splat;
} else if (TargetComponents == SourceInfo.uCols) {
// legal: vector<[..], O> -> matrix<[..], M, N> where N != 1 and M != 1
// and O == N*M
Second = ICK_HLSLVector_Conversion;
} else if (1 == TargetComponents) {
// truncate to scalar: matrix<[..], 1, 1>
Second = ICK_HLSLVector_Truncation;
} else if ((1 == TargetInfo.uRows || 1 == TargetInfo.uCols) &&
TargetComponents < SourceInfo.uCols) {
Second = ICK_HLSLVector_Truncation;
} else {
// illegal: change in components without going to or from scalar
// equivalent
return false;
}
break;
}
case AR_TOBJ_MATRIX: {
UINT SourceComponents = SourceInfo.uRows * SourceInfo.uCols;
if (1 == SourceComponents && TargetComponents != 1) {
// splat: matrix<[..], 1, 1> -> matrix<[..], M, N>
Second = ICK_HLSLVector_Splat;
} else if (TargetComponents == 1) {
Second = ICK_HLSLVector_Truncation;
} else if (TargetInfo.uRows > SourceInfo.uRows ||
TargetInfo.uCols > SourceInfo.uCols) {
return false;
} else if (TargetInfo.uRows < SourceInfo.uRows ||
TargetInfo.uCols < SourceInfo.uCols) {
Second = ICK_HLSLVector_Truncation;
} else {
Second = ICK_Identity;
}
break;
}
default:
return false;
}
break;
}
case AR_TOBJ_STRING:
if (SourceInfo.ShapeKind == AR_TOBJ_STRING) {
Second = ICK_Identity;
break;
} else {
return false;
}
default:
return false;
}
if (TargetInfo.uTotalElts < SourceInfo.uTotalElts) {
Remarks |= TYPE_CONVERSION_ELT_TRUNCATION;
}
return true;
}
static bool ConvertComponent(ArTypeInfo TargetInfo, ArTypeInfo SourceInfo,
ImplicitConversionKind &ComponentConversion,
TYPE_CONVERSION_REMARKS &Remarks) {
// Conversion to/from unknown types not supported.
if (TargetInfo.EltKind == AR_BASIC_UNKNOWN ||
SourceInfo.EltKind == AR_BASIC_UNKNOWN) {
return false;
}
bool precisionLoss = false;
if (GET_BASIC_BITS(TargetInfo.EltKind) != 0 &&
GET_BASIC_BITS(TargetInfo.EltKind) < GET_BASIC_BITS(SourceInfo.EltKind)) {
precisionLoss = true;
Remarks |= TYPE_CONVERSION_PRECISION_LOSS;
}
// enum -> enum not allowed
if ((SourceInfo.EltKind == AR_BASIC_ENUM &&
TargetInfo.EltKind == AR_BASIC_ENUM) ||
SourceInfo.EltKind == AR_BASIC_ENUM_CLASS ||
TargetInfo.EltKind == AR_BASIC_ENUM_CLASS) {
return false;
}
if (SourceInfo.EltKind != TargetInfo.EltKind) {
if (IS_BASIC_BOOL(TargetInfo.EltKind)) {
ComponentConversion = ICK_Boolean_Conversion;
} else if (IS_BASIC_ENUM(TargetInfo.EltKind)) {
// conversion to enum type not allowed
return false;
} else if (IS_BASIC_ENUM(SourceInfo.EltKind)) {
// enum -> int/float
ComponentConversion = ICK_Integral_Conversion;
} else if (TargetInfo.EltKind == AR_OBJECT_STRING) {
if (SourceInfo.EltKind == AR_OBJECT_STRING_LITERAL) {
ComponentConversion = ICK_Array_To_Pointer;
} else {
return false;
}
} else {
bool targetIsInt = IS_BASIC_AINT(TargetInfo.EltKind);
if (IS_BASIC_AINT(SourceInfo.EltKind)) {
if (targetIsInt) {
ComponentConversion =
precisionLoss ? ICK_Integral_Conversion : ICK_Integral_Promotion;
} else {
ComponentConversion = ICK_Floating_Integral;
}
} else if (IS_BASIC_FLOAT(SourceInfo.EltKind)) {
if (targetIsInt) {
ComponentConversion = ICK_Floating_Integral;
} else {
ComponentConversion =
precisionLoss ? ICK_Floating_Conversion : ICK_Floating_Promotion;
}
} else if (IS_BASIC_BOOL(SourceInfo.EltKind)) {
if (targetIsInt)
ComponentConversion = ICK_Integral_Conversion;
else
ComponentConversion = ICK_Floating_Integral;
}
}
} else if (TargetInfo.EltTy != SourceInfo.EltTy) {
// Types are identical in HLSL, but not identical in clang,
// such as unsigned long vs. unsigned int.
// Add conversion based on the type.
if (IS_BASIC_AINT(TargetInfo.EltKind))
ComponentConversion = ICK_Integral_Conversion;
else if (IS_BASIC_FLOAT(TargetInfo.EltKind))
ComponentConversion = ICK_Floating_Conversion;
else {
DXASSERT(false, "unhandled case for conversion that's identical in HLSL, "
"but not in clang");
return false;
}
}
return true;
}
bool HLSLExternalSource::CanConvert(SourceLocation loc, Expr *sourceExpr,
QualType target, bool explicitConversion,
TYPE_CONVERSION_REMARKS *remarks,
StandardConversionSequence *standard) {
UINT uTSize, uSSize;
bool SourceIsAggregate,
TargetIsAggregate; // Early declarations due to gotos below
DXASSERT_NOMSG(sourceExpr != nullptr);
DXASSERT_NOMSG(!target.isNull());
// Implements the semantics of ArType::CanConvertTo.
TYPE_CONVERSION_FLAGS Flags =
explicitConversion ? TYPE_CONVERSION_EXPLICIT : TYPE_CONVERSION_DEFAULT;
TYPE_CONVERSION_REMARKS Remarks = TYPE_CONVERSION_NONE;
QualType source = sourceExpr->getType();
// Cannot cast function type.
if (source->isFunctionType())
return false;
// Convert to an r-value to begin with, with an exception for strings
// since they are not first-class values and we want to preserve them as
// literals.
bool needsLValueToRValue =
sourceExpr->isLValue() && !target->isLValueReferenceType() &&
sourceExpr->getStmtClass() != Expr::StringLiteralClass;
bool targetRef = target->isReferenceType();
bool TargetIsAnonymous = false;
// Initialize the output standard sequence if available.
if (standard != nullptr) {
// Set up a no-op conversion, other than lvalue to rvalue - HLSL does not
// support references.
standard->setAsIdentityConversion();
if (needsLValueToRValue) {
standard->First = ICK_Lvalue_To_Rvalue;
}
standard->setFromType(source);
standard->setAllToTypes(target);
}
source = GetStructuralForm(source);
target = GetStructuralForm(target);
// Temporary conversion kind tracking which will be used/fixed up at the end
ImplicitConversionKind Second = ICK_Identity;
ImplicitConversionKind ComponentConversion = ICK_Identity;
// Identical types require no conversion.
if (source == target) {
Remarks = TYPE_CONVERSION_IDENTICAL;
goto lSuccess;
}
// Trivial cases for void.
bool allowed;
if (HandleVoidConversion(source, target, explicitConversion, &allowed)) {
if (allowed) {
Remarks = target->isVoidType() ? TYPE_CONVERSION_TO_VOID : Remarks;
goto lSuccess;
} else {
return false;
}
}
ArTypeInfo TargetInfo, SourceInfo;
CollectInfo(target, &TargetInfo);
CollectInfo(source, &SourceInfo);
uTSize = TargetInfo.uTotalElts;
uSSize = SourceInfo.uTotalElts;
// TODO: TYPE_CONVERSION_BY_REFERENCE does not seem possible here
// are we missing cases?
if ((Flags & TYPE_CONVERSION_BY_REFERENCE) != 0 && uTSize != uSSize) {
return false;
}
// Cast cbuffer to its result value.
if ((SourceInfo.EltKind == AR_OBJECT_CONSTANT_BUFFER ||
SourceInfo.EltKind == AR_OBJECT_TEXTURE_BUFFER) &&
TargetInfo.ShapeKind == AR_TOBJ_COMPOUND) {
if (standard)
standard->Second = ICK_Flat_Conversion;
return hlsl::GetHLSLResourceResultType(source) == target;
}
// Structure cast.
SourceIsAggregate = SourceInfo.ShapeKind == AR_TOBJ_COMPOUND ||
SourceInfo.ShapeKind == AR_TOBJ_ARRAY;
TargetIsAggregate = TargetInfo.ShapeKind == AR_TOBJ_COMPOUND ||
TargetInfo.ShapeKind == AR_TOBJ_ARRAY;
if (SourceIsAggregate || TargetIsAggregate) {
// For implicit conversions, FXC treats arrays the same as structures
// and rejects conversions between them and numeric types
if (!explicitConversion && SourceIsAggregate != TargetIsAggregate) {
return false;
}
// Structure to structure cases
const RecordType *targetRT = dyn_cast<RecordType>(target);
const RecordType *sourceRT = dyn_cast<RecordType>(source);
if (targetRT && sourceRT) {
RecordDecl *targetRD = targetRT->getDecl();
RecordDecl *sourceRD = sourceRT->getDecl();
if (sourceRT && targetRT) {
if (targetRD == sourceRD) {
Second = ICK_Flat_Conversion;
goto lSuccess;
}
const CXXRecordDecl *targetCXXRD = dyn_cast<CXXRecordDecl>(targetRD);
const CXXRecordDecl *sourceCXXRD = dyn_cast<CXXRecordDecl>(sourceRD);
if (targetCXXRD && sourceCXXRD &&
sourceCXXRD->isDerivedFrom(targetCXXRD)) {
Second = ICK_HLSL_Derived_To_Base;
goto lSuccess;
}
// There is no way to cast to anonymous structures. So we allow legacy
// HLSL implicit casts to matching anonymous structure types.
TargetIsAnonymous = !targetRD->hasNameForLinkage();
}
}
// Handle explicit splats from single element numerical types (scalars,
// vector1s and matrix1x1s) to aggregate types.
if (explicitConversion) {
const BuiltinType *sourceSingleElementBuiltinType =
source->getAs<BuiltinType>();
if (sourceSingleElementBuiltinType == nullptr &&
hlsl::IsHLSLVecMatType(source) &&
hlsl::GetElementCount(source) == 1) {
sourceSingleElementBuiltinType =
hlsl::GetElementTypeOrType(source)->getAs<BuiltinType>();
}
// We can only splat to target types that do not contain object/resource
// types
if (sourceSingleElementBuiltinType != nullptr &&
hlsl::IsHLSLNumericOrAggregateOfNumericType(target)) {
BuiltinType::Kind kind = sourceSingleElementBuiltinType->getKind();
switch (kind) {
case BuiltinType::Kind::UInt:
case BuiltinType::Kind::Int:
case BuiltinType::Kind::Float:
case BuiltinType::Kind::LitFloat:
case BuiltinType::Kind::LitInt:
Second = ICK_Flat_Conversion;
goto lSuccess;
default:
// Only flat conversion kinds are relevant.
break;
}
}
} else if (m_sema->getLangOpts().HLSLVersion >= hlsl::LangStd::v2021 &&
(SourceInfo.ShapeKind == AR_TOBJ_COMPOUND ||
TargetInfo.ShapeKind == AR_TOBJ_COMPOUND) &&
!TargetIsAnonymous) {
// Not explicit, either are struct/class, not derived-to-base,
// target is named (so explicit cast is possible),
// and using strict UDT rules: disallow this implicit cast.
return false;
}
FlattenedTypeIterator::ComparisonResult result =
FlattenedTypeIterator::CompareTypes(*this, loc, loc, target, source);
if (!result.CanConvertElements) {
return false;
}
// Only allow scalar to compound or array with explicit cast
if (result.IsConvertibleAndLeftLonger()) {
if (!explicitConversion || SourceInfo.ShapeKind != AR_TOBJ_SCALAR) {
return false;
}
}
// Assignment is valid if elements are exactly the same in type and size; if
// an explicit conversion is being done, we accept converted elements and a
// longer right-hand sequence.
if (!explicitConversion &&
(!result.AreElementsEqual || result.IsRightLonger())) {
return false;
}
Second = ICK_Flat_Conversion;
goto lSuccess;
}
// Cast from Resource to Object types.
if (SourceInfo.EltKind == AR_OBJECT_HEAP_RESOURCE ||
SourceInfo.EltKind == AR_OBJECT_HEAP_SAMPLER) {
// TODO: skip things like PointStream.
if (TargetInfo.ShapeKind == AR_TOBJ_OBJECT) {
Second = ICK_Flat_Conversion;
goto lSuccess;
}
}
// Convert scalar/vector/matrix dimensions
if (!ConvertDimensions(TargetInfo, SourceInfo, Second, Remarks))
return false;
// Convert component type
if (!ConvertComponent(TargetInfo, SourceInfo, ComponentConversion, Remarks))
return false;
lSuccess:
if (standard) {
if (sourceExpr->isLValue()) {
if (needsLValueToRValue) {
// We don't need LValueToRValue cast before casting a derived object
// to its base.
if (Second == ICK_HLSL_Derived_To_Base) {
standard->First = ICK_Identity;
} else {
standard->First = ICK_Lvalue_To_Rvalue;
}
} else {
switch (Second) {
case ICK_NoReturn_Adjustment:
case ICK_Vector_Conversion:
case ICK_Vector_Splat:
DXASSERT(false,
"We shouldn't be producing these implicit conversion kinds");
break;
case ICK_Flat_Conversion:
case ICK_HLSLVector_Splat:
standard->First = ICK_Lvalue_To_Rvalue;
break;
default:
// Only flat and splat conversions handled.
break;
}
switch (ComponentConversion) {
case ICK_Integral_Promotion:
case ICK_Integral_Conversion:
case ICK_Floating_Promotion:
case ICK_Floating_Conversion:
case ICK_Floating_Integral:
case ICK_Boolean_Conversion:
standard->First = ICK_Lvalue_To_Rvalue;
break;
case ICK_Array_To_Pointer:
standard->First = ICK_Array_To_Pointer;
break;
default:
// Only potential assignments above covered.
break;
}
}
}
// Finally fix up the cases for scalar->scalar component conversion, and
// identity vector/matrix component conversion
if (ICK_Identity != ComponentConversion) {
if (Second == ICK_Identity) {
if (TargetInfo.ShapeKind == AR_TOBJ_BASIC) {
// Scalar to scalar type conversion, use normal mechanism (Second)
Second = ComponentConversion;
ComponentConversion = ICK_Identity;
} else if (TargetInfo.ShapeKind != AR_TOBJ_STRING) {
// vector or matrix dimensions are not being changed, but component
// type is being converted, so change Second to signal the conversion
Second = ICK_HLSLVector_Conversion;
}
}
}
standard->Second = Second;
standard->ComponentConversion = ComponentConversion;
// For conversion which change to RValue but targeting reference type
// Hold the conversion to codeGen
if (targetRef && standard->First == ICK_Lvalue_To_Rvalue) {
standard->First = ICK_Identity;
standard->Second = ICK_Identity;
}
}
AssignOpt(Remarks, remarks);
return true;
}
bool HLSLExternalSource::ValidateTypeRequirements(SourceLocation loc,
ArBasicKind elementKind,
ArTypeObjectKind objectKind,
bool requiresIntegrals,
bool requiresNumerics) {
if (objectKind == AR_TOBJ_DEPENDENT)
return true;
if (elementKind == AR_BASIC_DEPENDENT)
return true;
if (requiresIntegrals || requiresNumerics) {
if (!IsObjectKindPrimitiveAggregate(objectKind)) {
m_sema->Diag(loc, diag::err_hlsl_requires_non_aggregate);
return false;
}
}
if (requiresIntegrals) {
if (!IsBasicKindIntegral(elementKind)) {
m_sema->Diag(loc, diag::err_hlsl_requires_int_or_uint);
return false;
}
} else if (requiresNumerics) {
if (!IsBasicKindNumeric(elementKind)) {
m_sema->Diag(loc, diag::err_hlsl_requires_numeric);
return false;
}
}
return true;
}
bool HLSLExternalSource::ValidatePrimitiveTypeForOperand(
SourceLocation loc, QualType type, ArTypeObjectKind kind) {
bool isValid = true;
if (IsBuiltInObjectType(type)) {
m_sema->Diag(loc, diag::err_hlsl_unsupported_builtin_op) << type;
isValid = false;
}
if (kind == AR_TOBJ_COMPOUND) {
m_sema->Diag(loc, diag::err_hlsl_unsupported_struct_op) << type;
isValid = false;
}
return isValid;
}
HRESULT HLSLExternalSource::CombineDimensions(
QualType leftType, QualType rightType, QualType *resultType,
ImplicitConversionKind &convKind, TYPE_CONVERSION_REMARKS &Remarks) {
ArTypeInfo leftInfo, rightInfo;
CollectInfo(leftType, &leftInfo);
CollectInfo(rightType, &rightInfo);
// Prefer larger, or left if same.
if (leftInfo.uTotalElts >= rightInfo.uTotalElts) {
if (ConvertDimensions(leftInfo, rightInfo, convKind, Remarks))
*resultType = leftType;
else if (ConvertDimensions(rightInfo, leftInfo, convKind, Remarks))
*resultType = rightType;
else
return E_FAIL;
} else {
if (ConvertDimensions(rightInfo, leftInfo, convKind, Remarks))
*resultType = rightType;
else if (ConvertDimensions(leftInfo, rightInfo, convKind, Remarks))
*resultType = leftType;
else
return E_FAIL;
}
return S_OK;
}
/// <summary>Validates and adjusts operands for the specified binary
/// operator.</summary> <param name="OpLoc">Source location for
/// operator.</param> <param name="Opc">Kind of binary operator.</param> <param
/// name="LHS">Left-hand-side expression, possibly updated by this
/// function.</param> <param name="RHS">Right-hand-side expression, possibly
/// updated by this function.</param> <param name="ResultTy">Result type for
/// operator expression.</param> <param name="CompLHSTy">Type of LHS after
/// promotions for computation.</param> <param name="CompResultTy">Type of
/// computation result.</param>
void HLSLExternalSource::CheckBinOpForHLSL(SourceLocation OpLoc,
BinaryOperatorKind Opc,
ExprResult &LHS, ExprResult &RHS,
QualType &ResultTy,
QualType &CompLHSTy,
QualType &CompResultTy) {
// At the start, none of the output types should be valid.
DXASSERT_NOMSG(ResultTy.isNull());
DXASSERT_NOMSG(CompLHSTy.isNull());
DXASSERT_NOMSG(CompResultTy.isNull());
LHS = m_sema->CorrectDelayedTyposInExpr(LHS);
RHS = m_sema->CorrectDelayedTyposInExpr(RHS);
// If either expression is invalid to begin with, propagate that.
if (LHS.isInvalid() || RHS.isInvalid()) {
return;
}
// If there is a dependent type we will use that as the result type
if (LHS.get()->getType()->isDependentType() ||
RHS.get()->getType()->isDependentType()) {
if (LHS.get()->getType()->isDependentType())
ResultTy = LHS.get()->getType();
else
ResultTy = RHS.get()->getType();
if (BinaryOperatorKindIsCompoundAssignment(Opc))
CompResultTy = ResultTy;
return;
}
// TODO: re-review the Check** in Clang and add equivalent diagnostics if/as
// needed, possibly after conversions
// Handle Assign and Comma operators and return
switch (Opc) {
case BO_AddAssign:
case BO_AndAssign:
case BO_DivAssign:
case BO_MulAssign:
case BO_RemAssign:
case BO_ShlAssign:
case BO_ShrAssign:
case BO_SubAssign:
case BO_OrAssign:
case BO_XorAssign: {
extern bool CheckForModifiableLvalue(Expr * E, SourceLocation Loc,
Sema & S);
if (CheckForModifiableLvalue(LHS.get(), OpLoc, *m_sema)) {
return;
}
} break;
case BO_Assign: {
extern bool CheckForModifiableLvalue(Expr * E, SourceLocation Loc,
Sema & S);
if (CheckForModifiableLvalue(LHS.get(), OpLoc, *m_sema)) {
return;
}
bool complained = false;
ResultTy = LHS.get()->getType();
if (m_sema->DiagnoseAssignmentResult(
Sema::AssignConvertType::Compatible, OpLoc, ResultTy,
RHS.get()->getType(), RHS.get(),
Sema::AssignmentAction::AA_Assigning, &complained)) {
return;
}
StandardConversionSequence standard;
if (!ValidateCast(OpLoc, RHS.get(), ResultTy, ExplicitConversionFalse,
SuppressWarningsFalse, SuppressErrorsFalse, &standard)) {
return;
}
if (RHS.get()->isLValue()) {
standard.First = ICK_Lvalue_To_Rvalue;
}
RHS = m_sema->PerformImplicitConversion(RHS.get(), ResultTy, standard,
Sema::AA_Converting,
Sema::CCK_ImplicitConversion);
return;
} break;
case BO_Comma:
// C performs conversions, C++ doesn't but still checks for type
// completeness. There are also diagnostics for improper comma use. In the
// HLSL case these cases don't apply or simply aren't surfaced.
ResultTy = RHS.get()->getType();
return;
default:
// Only assign and comma operations handled.
break;
}
// Leave this diagnostic for last to emulate fxc behavior.
bool isCompoundAssignment = BinaryOperatorKindIsCompoundAssignment(Opc);
bool unsupportedBoolLvalue =
isCompoundAssignment &&
!BinaryOperatorKindIsCompoundAssignmentForBool(Opc) &&
GetTypeElementKind(LHS.get()->getType()) == AR_BASIC_BOOL;
// Turn operand inputs into r-values.
QualType LHSTypeAsPossibleLValue = LHS.get()->getType();
if (!isCompoundAssignment) {
LHS = m_sema->DefaultLvalueConversion(LHS.get());
}
RHS = m_sema->DefaultLvalueConversion(RHS.get());
if (LHS.isInvalid() || RHS.isInvalid()) {
return;
}
// Gather type info
QualType leftType = GetStructuralForm(LHS.get()->getType());
QualType rightType = GetStructuralForm(RHS.get()->getType());
ArBasicKind leftElementKind = GetTypeElementKind(leftType);
ArBasicKind rightElementKind = GetTypeElementKind(rightType);
ArTypeObjectKind leftObjectKind = GetTypeObjectKind(leftType);
ArTypeObjectKind rightObjectKind = GetTypeObjectKind(rightType);
// Validate type requirements
{
bool requiresNumerics = BinaryOperatorKindRequiresNumeric(Opc);
bool requiresIntegrals = BinaryOperatorKindRequiresIntegrals(Opc);
if (!ValidateTypeRequirements(OpLoc, leftElementKind, leftObjectKind,
requiresIntegrals, requiresNumerics)) {
return;
}
if (!ValidateTypeRequirements(OpLoc, rightElementKind, rightObjectKind,
requiresIntegrals, requiresNumerics)) {
return;
}
}
if (unsupportedBoolLvalue) {
m_sema->Diag(OpLoc, diag::err_hlsl_unsupported_bool_lvalue_op);
return;
}
// We don't support binary operators on built-in object types other than
// assignment or commas.
{
DXASSERT(Opc != BO_Assign,
"otherwise this wasn't handled as an early exit");
DXASSERT(Opc != BO_Comma, "otherwise this wasn't handled as an early exit");
bool isValid;
isValid = ValidatePrimitiveTypeForOperand(OpLoc, leftType, leftObjectKind);
if (leftType != rightType &&
!ValidatePrimitiveTypeForOperand(OpLoc, rightType, rightObjectKind)) {
isValid = false;
}
if (!isValid) {
return;
}
}
// We don't support equality comparisons on arrays.
if ((Opc == BO_EQ || Opc == BO_NE) &&
(leftObjectKind == AR_TOBJ_ARRAY || rightObjectKind == AR_TOBJ_ARRAY)) {
m_sema->Diag(OpLoc, diag::err_hlsl_unsupported_array_equality_op);
return;
}
// Combine element types for computation.
ArBasicKind resultElementKind = leftElementKind;
{
if (BinaryOperatorKindIsLogical(Opc)) {
if (m_sema->getLangOpts().HLSLVersion >= hlsl::LangStd::v2021) {
// Only allow scalar types for logical operators &&, ||
if (leftObjectKind != ArTypeObjectKind::AR_TOBJ_BASIC ||
rightObjectKind != ArTypeObjectKind::AR_TOBJ_BASIC) {
SmallVector<char, 256> Buff;
llvm::raw_svector_ostream OS(Buff);
PrintingPolicy PP(m_sema->getLangOpts());
if (Opc == BinaryOperatorKind::BO_LAnd) {
OS << "and(";
} else if (Opc == BinaryOperatorKind::BO_LOr) {
OS << "or(";
}
LHS.get()->printPretty(OS, nullptr, PP);
OS << ", ";
RHS.get()->printPretty(OS, nullptr, PP);
OS << ")";
SourceRange FullRange =
SourceRange(LHS.get()->getLocStart(), RHS.get()->getLocEnd());
m_sema->Diag(OpLoc, diag::err_hlsl_logical_binop_scalar)
<< (Opc == BinaryOperatorKind::BO_LOr)
<< FixItHint::CreateReplacement(FullRange, OS.str());
return;
}
}
resultElementKind = AR_BASIC_BOOL;
} else if (!BinaryOperatorKindIsBitwiseShift(Opc) &&
leftElementKind != rightElementKind) {
if (!CombineBasicTypes(leftElementKind, rightElementKind,
&resultElementKind)) {
m_sema->Diag(OpLoc, diag::err_hlsl_type_mismatch);
return;
}
} else if (BinaryOperatorKindIsBitwiseShift(Opc) &&
(resultElementKind == AR_BASIC_LITERAL_INT ||
resultElementKind == AR_BASIC_LITERAL_FLOAT) &&
rightElementKind != AR_BASIC_LITERAL_INT &&
rightElementKind != AR_BASIC_LITERAL_FLOAT) {
// For case like 1<<x.
m_sema->Diag(OpLoc, diag::warn_hlsl_ambiguous_literal_shift);
if (rightElementKind == AR_BASIC_UINT32)
resultElementKind = AR_BASIC_UINT32;
else
resultElementKind = AR_BASIC_INT32;
} else if (resultElementKind == AR_BASIC_BOOL &&
BinaryOperatorKindRequiresBoolAsNumeric(Opc)) {
resultElementKind = AR_BASIC_INT32;
}
// The following combines the selected/combined element kind above with
// the dimensions that are legal to implicitly cast. This means that
// element kind may be taken from one side and the dimensions from the
// other.
if (!isCompoundAssignment) {
// Legal dimension combinations are identical, splat, and truncation.
// ResultTy will be set to whichever type can be converted to, if legal,
// with preference for leftType if both are possible.
if (FAILED(CombineDimensions(LHS.get()->getType(), RHS.get()->getType(),
&ResultTy))) {
// Just choose leftType, and allow ValidateCast to catch this later
ResultTy = LHS.get()->getType();
}
} else {
ResultTy = LHS.get()->getType();
}
// Here, element kind is combined with dimensions for computation type, if
// different.
if (resultElementKind != GetTypeElementKind(ResultTy)) {
UINT rowCount, colCount;
GetRowsAndColsForAny(ResultTy, rowCount, colCount);
ResultTy =
NewSimpleAggregateType(GetTypeObjectKind(ResultTy), resultElementKind,
0, rowCount, colCount);
}
}
bool bFailedFirstRHSCast = false;
// Perform necessary conversion sequences for LHS and RHS
if (RHS.get()->getType() != ResultTy) {
StandardConversionSequence standard;
// Suppress type narrowing or truncation warnings for RHS on bitwise shift,
// since we only care about the LHS type.
bool bSuppressWarnings = BinaryOperatorKindIsBitwiseShift(Opc);
// Suppress errors on compound assignment, since we will validate the cast
// to the final type later.
bool bSuppressErrors = isCompoundAssignment;
// Suppress errors if either operand has a dependent type.
if (RHS.get()->getType()->isDependentType() || ResultTy->isDependentType())
bSuppressErrors = true;
// If compound assignment, suppress errors until later, but report warning
// (vector truncation/type narrowing) here.
if (ValidateCast(SourceLocation(), RHS.get(), ResultTy,
ExplicitConversionFalse, bSuppressWarnings,
bSuppressErrors, &standard)) {
if (standard.First != ICK_Identity || !standard.isIdentityConversion())
RHS = m_sema->PerformImplicitConversion(RHS.get(), ResultTy, standard,
Sema::AA_Casting,
Sema::CCK_ImplicitConversion);
} else if (!isCompoundAssignment) {
// If compound assignment, validate cast from RHS directly to LHS later,
// otherwise, fail here.
ResultTy = QualType();
return;
} else {
bFailedFirstRHSCast = true;
}
}
if (isCompoundAssignment) {
CompResultTy = ResultTy;
CompLHSTy = CompResultTy;
// For a compound operation, C/C++ promotes both types, performs the
// arithmetic, then converts to the result type and then assigns.
//
// So int + float promotes the int to float, does a floating-point addition,
// then the result becomes and int and is assigned.
ResultTy = LHSTypeAsPossibleLValue;
// Validate remainder of cast from computation type to final result type
StandardConversionSequence standard;
if (!ValidateCast(SourceLocation(), RHS.get(), ResultTy,
ExplicitConversionFalse, SuppressWarningsFalse,
SuppressErrorsFalse, &standard)) {
ResultTy = QualType();
return;
}
DXASSERT_LOCALVAR(bFailedFirstRHSCast, !bFailedFirstRHSCast,
"otherwise, hit compound assign case that failed RHS -> "
"CompResultTy cast, but succeeded RHS -> LHS cast.");
} else if (LHS.get()->getType() != ResultTy) {
StandardConversionSequence standard;
if (ValidateCast(SourceLocation(), LHS.get(), ResultTy,
ExplicitConversionFalse, SuppressWarningsFalse,
SuppressErrorsFalse, &standard)) {
if (standard.First != ICK_Identity || !standard.isIdentityConversion())
LHS = m_sema->PerformImplicitConversion(LHS.get(), ResultTy, standard,
Sema::AA_Casting,
Sema::CCK_ImplicitConversion);
} else {
ResultTy = QualType();
return;
}
}
if (BinaryOperatorKindIsComparison(Opc) || BinaryOperatorKindIsLogical(Opc)) {
DXASSERT(!isCompoundAssignment,
"otherwise binary lookup tables are inconsistent");
// Return bool vector for vector types.
if (IsVectorType(m_sema, ResultTy)) {
UINT rowCount, colCount;
GetRowsAndColsForAny(ResultTy, rowCount, colCount);
ResultTy =
LookupVectorType(HLSLScalarType::HLSLScalarType_bool, colCount);
} else if (IsMatrixType(m_sema, ResultTy)) {
UINT rowCount, colCount;
GetRowsAndColsForAny(ResultTy, rowCount, colCount);
ResultTy = LookupMatrixType(HLSLScalarType::HLSLScalarType_bool, rowCount,
colCount);
} else
ResultTy = m_context->BoolTy.withConst();
}
// Run diagnostics. Some are emulating checks that occur in IR emission in
// fxc.
if (Opc == BO_Div || Opc == BO_DivAssign || Opc == BO_Rem ||
Opc == BO_RemAssign) {
if (IsBasicKindIntMinPrecision(resultElementKind)) {
m_sema->Diag(OpLoc, diag::err_hlsl_unsupported_div_minint);
return;
}
}
if (Opc == BO_Rem || Opc == BO_RemAssign) {
if (resultElementKind == AR_BASIC_FLOAT64) {
m_sema->Diag(OpLoc, diag::err_hlsl_unsupported_mod_double);
return;
}
}
}
/// <summary>Validates and adjusts operands for the specified unary
/// operator.</summary> <param name="OpLoc">Source location for
/// operator.</param> <param name="Opc">Kind of operator.</param> <param
/// name="InputExpr">Input expression to the operator.</param> <param
/// name="VK">Value kind for resulting expression.</param> <param
/// name="OK">Object kind for resulting expression.</param> <returns>The result
/// type for the expression.</returns>
QualType HLSLExternalSource::CheckUnaryOpForHLSL(SourceLocation OpLoc,
UnaryOperatorKind Opc,
ExprResult &InputExpr,
ExprValueKind &VK,
ExprObjectKind &OK) {
InputExpr = m_sema->CorrectDelayedTyposInExpr(InputExpr);
if (InputExpr.isInvalid())
return QualType();
// Reject unsupported operators * and &
switch (Opc) {
case UO_AddrOf:
case UO_Deref:
m_sema->Diag(OpLoc, diag::err_hlsl_unsupported_operator);
return QualType();
default:
// Only * and & covered.
break;
}
Expr *expr = InputExpr.get();
if (expr->isTypeDependent())
return m_context->DependentTy;
ArBasicKind elementKind = GetTypeElementKind(expr->getType());
if (UnaryOperatorKindRequiresModifiableValue(Opc)) {
if (elementKind == AR_BASIC_ENUM) {
bool isInc = IsIncrementOp(Opc);
m_sema->Diag(OpLoc, diag::err_increment_decrement_enum)
<< isInc << expr->getType();
return QualType();
}
extern bool CheckForModifiableLvalue(Expr * E, SourceLocation Loc,
Sema & S);
if (CheckForModifiableLvalue(expr, OpLoc, *m_sema))
return QualType();
} else {
InputExpr = m_sema->DefaultLvalueConversion(InputExpr.get()).get();
if (InputExpr.isInvalid())
return QualType();
}
if (UnaryOperatorKindDisallowsBool(Opc) && IS_BASIC_BOOL(elementKind)) {
m_sema->Diag(OpLoc, diag::err_hlsl_unsupported_bool_lvalue_op);
return QualType();
}
if (UnaryOperatorKindRequiresBoolAsNumeric(Opc)) {
InputExpr = PromoteToIntIfBool(InputExpr);
expr = InputExpr.get();
elementKind = GetTypeElementKind(expr->getType());
}
ArTypeObjectKind objectKind = GetTypeObjectKind(expr->getType());
bool requiresIntegrals = UnaryOperatorKindRequiresIntegrals(Opc);
bool requiresNumerics = UnaryOperatorKindRequiresNumerics(Opc);
if (!ValidateTypeRequirements(OpLoc, elementKind, objectKind,
requiresIntegrals, requiresNumerics)) {
return QualType();
}
if (Opc == UnaryOperatorKind::UO_Minus) {
if (IS_BASIC_UINT(Opc)) {
m_sema->Diag(OpLoc, diag::warn_hlsl_unary_negate_unsigned);
}
}
// By default, the result type is the operand type.
// Logical not however should cast to a bool.
QualType resultType = expr->getType();
if (Opc == UnaryOperatorKind::UO_LNot) {
UINT rowCount, colCount;
GetRowsAndColsForAny(expr->getType(), rowCount, colCount);
resultType = NewSimpleAggregateType(objectKind, AR_BASIC_BOOL,
AR_QUAL_CONST, rowCount, colCount);
StandardConversionSequence standard;
if (!CanConvert(OpLoc, expr, resultType, false, nullptr, &standard)) {
m_sema->Diag(OpLoc, diag::err_hlsl_requires_bool_for_not);
return QualType();
}
// Cast argument.
ExprResult result = m_sema->PerformImplicitConversion(
InputExpr.get(), resultType, standard, Sema::AA_Casting,
Sema::CCK_ImplicitConversion);
if (result.isUsable()) {
InputExpr = result.get();
}
}
bool isPrefix = Opc == UO_PreInc || Opc == UO_PreDec;
if (isPrefix) {
VK = VK_LValue;
return resultType;
} else {
VK = VK_RValue;
return resultType.getUnqualifiedType();
}
}
clang::QualType
HLSLExternalSource::CheckVectorConditional(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc) {
Cond = m_sema->CorrectDelayedTyposInExpr(Cond);
LHS = m_sema->CorrectDelayedTyposInExpr(LHS);
RHS = m_sema->CorrectDelayedTyposInExpr(RHS);
// If either expression is invalid to begin with, propagate that.
if (Cond.isInvalid() || LHS.isInvalid() || RHS.isInvalid()) {
return QualType();
}
// Gather type info
QualType condType = GetStructuralForm(Cond.get()->getType());
QualType leftType = GetStructuralForm(LHS.get()->getType());
QualType rightType = GetStructuralForm(RHS.get()->getType());
// If any type is dependent, we will use that as the type to return.
if (leftType->isDependentType())
return leftType;
if (rightType->isDependentType())
return rightType;
if (condType->isDependentType())
return condType;
ArBasicKind condElementKind = GetTypeElementKind(condType);
ArBasicKind leftElementKind = GetTypeElementKind(leftType);
ArBasicKind rightElementKind = GetTypeElementKind(rightType);
ArTypeObjectKind condObjectKind = GetTypeObjectKind(condType);
ArTypeObjectKind leftObjectKind = GetTypeObjectKind(leftType);
ArTypeObjectKind rightObjectKind = GetTypeObjectKind(rightType);
QualType ResultTy = leftType;
if (m_sema->getLangOpts().HLSLVersion >= hlsl::LangStd::v2021) {
// Only allow scalar.
if (condObjectKind == AR_TOBJ_VECTOR || condObjectKind == AR_TOBJ_MATRIX) {
SmallVector<char, 256> Buff;
llvm::raw_svector_ostream OS(Buff);
PrintingPolicy PP(m_sema->getLangOpts());
OS << "select(";
Cond.get()->printPretty(OS, nullptr, PP);
OS << ", ";
LHS.get()->printPretty(OS, nullptr, PP);
OS << ", ";
RHS.get()->printPretty(OS, nullptr, PP);
OS << ")";
SourceRange FullRange =
SourceRange(Cond.get()->getLocStart(), RHS.get()->getLocEnd());
m_sema->Diag(QuestionLoc, diag::err_hlsl_ternary_scalar)
<< FixItHint::CreateReplacement(FullRange, OS.str());
return QualType();
}
}
bool condIsSimple = condObjectKind == AR_TOBJ_BASIC ||
condObjectKind == AR_TOBJ_VECTOR ||
condObjectKind == AR_TOBJ_MATRIX;
if (!condIsSimple) {
m_sema->Diag(QuestionLoc, diag::err_hlsl_conditional_cond_typecheck);
return QualType();
}
UINT rowCountCond, colCountCond;
GetRowsAndColsForAny(condType, rowCountCond, colCountCond);
bool leftIsSimple = leftObjectKind == AR_TOBJ_BASIC ||
leftObjectKind == AR_TOBJ_VECTOR ||
leftObjectKind == AR_TOBJ_MATRIX;
bool rightIsSimple = rightObjectKind == AR_TOBJ_BASIC ||
rightObjectKind == AR_TOBJ_VECTOR ||
rightObjectKind == AR_TOBJ_MATRIX;
if (!leftIsSimple || !rightIsSimple) {
if (leftObjectKind == AR_TOBJ_OBJECT && leftObjectKind == AR_TOBJ_OBJECT) {
if (leftType == rightType) {
return leftType;
}
}
// NOTE: Limiting this operator to working only on basic numeric types.
// This is due to extremely limited (and even broken) support for any other
// case. In the future we may decide to support more cases.
m_sema->Diag(QuestionLoc, diag::err_hlsl_conditional_result_typecheck);
return QualType();
}
// Types should be only scalar, vector, or matrix after this point.
ArBasicKind resultElementKind = leftElementKind;
// Combine LHS and RHS element types for computation.
if (leftElementKind != rightElementKind) {
if (!CombineBasicTypes(leftElementKind, rightElementKind,
&resultElementKind)) {
m_sema->Diag(QuestionLoc,
diag::err_hlsl_conditional_result_comptype_mismatch);
return QualType();
}
}
// Restore left/right type to original to avoid stripping attributed type or
// typedef type
leftType = LHS.get()->getType();
rightType = RHS.get()->getType();
// Combine LHS and RHS dimensions
if (FAILED(CombineDimensions(leftType, rightType, &ResultTy))) {
m_sema->Diag(QuestionLoc, diag::err_hlsl_conditional_result_dimensions);
return QualType();
}
UINT rowCount, colCount;
GetRowsAndColsForAny(ResultTy, rowCount, colCount);
// If result is scalar, use condition dimensions.
// Otherwise, condition must either match or is scalar, then use result
// dimensions
if (rowCount * colCount == 1) {
rowCount = rowCountCond;
colCount = colCountCond;
} else if (rowCountCond * colCountCond != 1 &&
(rowCountCond != rowCount || colCountCond != colCount)) {
m_sema->Diag(QuestionLoc, diag::err_hlsl_conditional_dimensions);
return QualType();
}
// Here, element kind is combined with dimensions for primitive types.
if (IS_BASIC_PRIMITIVE(resultElementKind)) {
ResultTy = NewSimpleAggregateType(AR_TOBJ_INVALID, resultElementKind, 0,
rowCount, colCount)
->getCanonicalTypeInternal();
} else {
DXASSERT(rowCount == 1 && colCount == 1,
"otherwise, attempting to construct vector or matrix with "
"non-primitive component type");
ResultTy = ResultTy.getUnqualifiedType();
}
// Cast condition to RValue
if (Cond.get()->isLValue())
Cond.set(CreateLValueToRValueCast(Cond.get()));
// Convert condition component type to bool, using result component dimensions
QualType boolType;
// If short-circuiting, condition must be scalar.
if (m_sema->getLangOpts().HLSLVersion >= hlsl::LangStd::v2021)
boolType = NewSimpleAggregateType(AR_TOBJ_INVALID, AR_BASIC_BOOL, 0, 1, 1)
->getCanonicalTypeInternal();
else
boolType = NewSimpleAggregateType(AR_TOBJ_INVALID, AR_BASIC_BOOL, 0,
rowCount, colCount)
->getCanonicalTypeInternal();
if (condElementKind != AR_BASIC_BOOL || condType != boolType) {
StandardConversionSequence standard;
if (ValidateCast(SourceLocation(), Cond.get(), boolType,
ExplicitConversionFalse, SuppressWarningsFalse,
SuppressErrorsFalse, &standard)) {
if (standard.First != ICK_Identity || !standard.isIdentityConversion())
Cond = m_sema->PerformImplicitConversion(Cond.get(), boolType, standard,
Sema::AA_Casting,
Sema::CCK_ImplicitConversion);
} else {
return QualType();
}
}
// TODO: Is this correct? Does fxc support lvalue return here?
// Cast LHS/RHS to RValue
if (LHS.get()->isLValue())
LHS.set(CreateLValueToRValueCast(LHS.get()));
if (RHS.get()->isLValue())
RHS.set(CreateLValueToRValueCast(RHS.get()));
if (leftType != ResultTy) {
StandardConversionSequence standard;
if (ValidateCast(SourceLocation(), LHS.get(), ResultTy,
ExplicitConversionFalse, SuppressWarningsFalse,
SuppressErrorsFalse, &standard)) {
if (standard.First != ICK_Identity || !standard.isIdentityConversion())
LHS = m_sema->PerformImplicitConversion(LHS.get(), ResultTy, standard,
Sema::AA_Casting,
Sema::CCK_ImplicitConversion);
} else {
return QualType();
}
}
if (rightType != ResultTy) {
StandardConversionSequence standard;
if (ValidateCast(SourceLocation(), RHS.get(), ResultTy,
ExplicitConversionFalse, SuppressWarningsFalse,
SuppressErrorsFalse, &standard)) {
if (standard.First != ICK_Identity || !standard.isIdentityConversion())
RHS = m_sema->PerformImplicitConversion(RHS.get(), ResultTy, standard,
Sema::AA_Casting,
Sema::CCK_ImplicitConversion);
} else {
return QualType();
}
}
return ResultTy;
}
// Apply type specifier sign to the given QualType.
// Other than privmitive int type, only allow shorthand vectors and matrices to
// be unsigned.
clang::QualType
HLSLExternalSource::ApplyTypeSpecSignToParsedType(clang::QualType &type,
clang::TypeSpecifierSign TSS,
clang::SourceLocation Loc) {
if (TSS == TypeSpecifierSign::TSS_unspecified) {
return type;
}
DXASSERT(TSS != TypeSpecifierSign::TSS_signed,
"else signed keyword is supported in HLSL");
ArTypeObjectKind objKind = GetTypeObjectKind(type);
if (objKind != AR_TOBJ_VECTOR && objKind != AR_TOBJ_MATRIX &&
objKind != AR_TOBJ_BASIC && objKind != AR_TOBJ_ARRAY) {
return type;
}
// check if element type is unsigned and check if such vector exists
// If not create a new one, Make a QualType of the new kind
ArBasicKind elementKind = GetTypeElementKind(type);
// Only ints can have signed/unsigend ty
if (!IS_BASIC_UNSIGNABLE(elementKind)) {
return type;
} else {
// Check given TypeSpecifierSign. If unsigned, change int to uint.
HLSLScalarType scalarType = ScalarTypeForBasic(elementKind);
HLSLScalarType newScalarType = MakeUnsigned(scalarType);
// Get new vector types for a given TypeSpecifierSign.
if (objKind == AR_TOBJ_VECTOR) {
UINT colCount = GetHLSLVecSize(type);
TypedefDecl *qts = LookupVectorShorthandType(newScalarType, colCount);
return m_context->getTypeDeclType(qts);
} else if (objKind == AR_TOBJ_MATRIX) {
UINT rowCount, colCount;
GetRowsAndCols(type, rowCount, colCount);
TypedefDecl *qts =
LookupMatrixShorthandType(newScalarType, rowCount, colCount);
return m_context->getTypeDeclType(qts);
} else {
DXASSERT_NOMSG(objKind == AR_TOBJ_BASIC || objKind == AR_TOBJ_ARRAY);
return m_scalarTypes[newScalarType];
}
}
}
Sema::TemplateDeductionResult
HLSLExternalSource::DeduceTemplateArgumentsForHLSL(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, TemplateDeductionInfo &Info) {
DXASSERT_NOMSG(FunctionTemplate != nullptr);
// Get information about the function we have.
CXXMethodDecl *functionMethod =
dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl());
if (!functionMethod) {
// standalone function.
return Sema::TemplateDeductionResult::TDK_Invalid;
}
CXXRecordDecl *functionParentRecord = functionMethod->getParent();
DXASSERT(functionParentRecord != nullptr, "otherwise function is orphaned");
QualType objectElement = GetFirstElementTypeFromDecl(functionParentRecord);
// Preserve full object type for special cases in method matching
QualType objectType = m_context->getTagDeclType(functionParentRecord);
QualType functionTemplateTypeArg{};
if (ExplicitTemplateArgs != nullptr && ExplicitTemplateArgs->size() == 1) {
const TemplateArgument &firstTemplateArg =
(*ExplicitTemplateArgs)[0].getArgument();
if (firstTemplateArg.getKind() == TemplateArgument::ArgKind::Type)
functionTemplateTypeArg = firstTemplateArg.getAsType();
}
// Handle subscript overloads.
if (FunctionTemplate->getDeclName() ==
m_context->DeclarationNames.getCXXOperatorName(OO_Subscript)) {
DeclContext *functionTemplateContext = FunctionTemplate->getDeclContext();
FindStructBasicTypeResult findResult =
FindStructBasicType(functionTemplateContext);
if (!findResult.Found()) {
// This might be a nested type. Do a lookup on the parent.
CXXRecordDecl *parentRecordType =
dyn_cast_or_null<CXXRecordDecl>(functionTemplateContext);
if (parentRecordType == nullptr ||
parentRecordType->getDeclContext() == nullptr) {
return Sema::TemplateDeductionResult::TDK_Invalid;
}
findResult = FindStructBasicType(parentRecordType->getDeclContext());
if (!findResult.Found()) {
return Sema::TemplateDeductionResult::TDK_Invalid;
}
DXASSERT(parentRecordType->getDeclContext()->getDeclKind() ==
Decl::Kind::CXXRecord ||
parentRecordType->getDeclContext()->getDeclKind() ==
Decl::Kind::ClassTemplateSpecialization,
"otherwise FindStructBasicType should have failed - no other "
"types are allowed");
objectElement = GetFirstElementTypeFromDecl(
cast<CXXRecordDecl>(parentRecordType->getDeclContext()));
}
Specialization =
AddSubscriptSpecialization(FunctionTemplate, objectElement, findResult);
DXASSERT_NOMSG(Specialization->getPrimaryTemplate()->getCanonicalDecl() ==
FunctionTemplate->getCanonicalDecl());
return Sema::TemplateDeductionResult::TDK_Success;
}
// Reject overload lookups that aren't identifier-based.
if (!FunctionTemplate->getDeclName().isIdentifier()) {
return Sema::TemplateDeductionResult::TDK_NonDeducedMismatch;
}
// Find the table of intrinsics based on the object type.
const HLSL_INTRINSIC *intrinsics = nullptr;
size_t intrinsicCount = 0;
const char *objectName = nullptr;
FindIntrinsicTable(FunctionTemplate->getDeclContext(), &objectName,
&intrinsics, &intrinsicCount);
// user-defined template object.
if (objectName == nullptr && intrinsics == nullptr) {
return Sema::TemplateDeductionResult::TDK_Invalid;
}
DXASSERT(objectName != nullptr &&
(intrinsics != nullptr || m_intrinsicTables.size() > 0),
"otherwise FindIntrinsicTable failed to lookup a valid object, "
"or the parser let a user-defined template object through");
// Look for an intrinsic for which we can match arguments.
std::vector<QualType> argTypes;
StringRef nameIdentifier = FunctionTemplate->getName();
IntrinsicDefIter cursor = FindIntrinsicByNameAndArgCount(
intrinsics, intrinsicCount, objectName, nameIdentifier, Args.size());
IntrinsicDefIter end = IntrinsicDefIter::CreateEnd(
intrinsics, intrinsicCount,
IntrinsicTableDefIter::CreateEnd(m_intrinsicTables));
while (cursor != end) {
size_t badArgIdx;
if (!MatchArguments(cursor, objectType, objectElement,
functionTemplateTypeArg, Args, &argTypes, badArgIdx)) {
++cursor;
continue;
}
LPCSTR tableName = cursor.GetTableName();
// Currently only intrinsic we allow for explicit template arguments are
// for Load/Store for ByteAddressBuffer/RWByteAddressBuffer
// Check Explicit template arguments
UINT intrinsicOp = (*cursor)->Op;
LPCSTR intrinsicName = (*cursor)->pArgs[0].pName;
bool Is2018 = getSema()->getLangOpts().HLSLVersion >= hlsl::LangStd::v2018;
bool IsBAB =
objectName == g_ArBasicTypeNames[AR_OBJECT_BYTEADDRESS_BUFFER] ||
objectName == g_ArBasicTypeNames[AR_OBJECT_RWBYTEADDRESS_BUFFER];
bool IsBABLoad = false;
bool IsBABStore = false;
if (IsBuiltinTable(tableName) && IsBAB) {
IsBABLoad = intrinsicOp == (UINT)IntrinsicOp::MOP_Load;
IsBABStore = intrinsicOp == (UINT)IntrinsicOp::MOP_Store;
}
if (ExplicitTemplateArgs && ExplicitTemplateArgs->size() > 0) {
bool isLegalTemplate = false;
SourceLocation Loc = ExplicitTemplateArgs->getLAngleLoc();
auto TemplateDiag = diag::err_hlsl_intrinsic_template_arg_unsupported;
if (ExplicitTemplateArgs->size() >= 1 && (IsBABLoad || IsBABStore)) {
TemplateDiag = diag::err_hlsl_intrinsic_template_arg_requires_2018;
Loc = (*ExplicitTemplateArgs)[0].getLocation();
if (Is2018) {
TemplateDiag = diag::err_hlsl_intrinsic_template_arg_numeric;
if (ExplicitTemplateArgs->size() == 1 &&
!functionTemplateTypeArg.isNull() &&
hlsl::IsHLSLNumericOrAggregateOfNumericType(
functionTemplateTypeArg)) {
isLegalTemplate = true;
}
}
}
if (!isLegalTemplate) {
getSema()->Diag(Loc, TemplateDiag) << intrinsicName;
return Sema::TemplateDeductionResult::TDK_Invalid;
}
} else if (IsBABStore) {
// Prior to HLSL 2018, Store operation only stored scalar uint.
if (!Is2018) {
if (GetNumElements(argTypes[2]) != 1) {
getSema()->Diag(Args[1]->getLocStart(),
diag::err_ovl_no_viable_member_function_in_call)
<< intrinsicName;
return Sema::TemplateDeductionResult::TDK_Invalid;
}
argTypes[2] = getSema()->getASTContext().getIntTypeForBitwidth(
32, /*signed*/ false);
}
}
Specialization = AddHLSLIntrinsicMethod(
tableName, cursor.GetLoweringStrategy(), *cursor, FunctionTemplate,
Args, argTypes.data(), argTypes.size());
DXASSERT_NOMSG(Specialization->getPrimaryTemplate()->getCanonicalDecl() ==
FunctionTemplate->getCanonicalDecl());
const HLSL_INTRINSIC *pIntrinsic = *cursor;
if (!IsValidObjectElement(tableName,
static_cast<IntrinsicOp>(pIntrinsic->Op),
objectElement)) {
UINT numEles = GetNumElements(objectElement);
std::string typeName(
g_ArBasicTypeNames[GetTypeElementKind(objectElement)]);
if (numEles > 1)
typeName += std::to_string(numEles);
m_sema->Diag(Args[0]->getExprLoc(),
diag::err_hlsl_invalid_resource_type_on_intrinsic)
<< nameIdentifier << typeName;
}
return Sema::TemplateDeductionResult::TDK_Success;
}
return Sema::TemplateDeductionResult::TDK_NonDeducedMismatch;
}
void HLSLExternalSource::ReportUnsupportedTypeNesting(SourceLocation loc,
QualType type) {
m_sema->Diag(loc, diag::err_hlsl_unsupported_type_nesting) << type;
}
bool HLSLExternalSource::TryStaticCastForHLSL(
ExprResult &SrcExpr, QualType DestType, Sema::CheckedConversionKind CCK,
const SourceRange &OpRange, unsigned &msg, CastKind &Kind,
CXXCastPath &BasePath, bool ListInitialization, bool SuppressWarnings,
bool SuppressErrors, StandardConversionSequence *standard) {
DXASSERT(!SrcExpr.isInvalid(),
"caller should check for invalid expressions and placeholder types");
bool explicitConversion =
(CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
bool suppressWarnings = explicitConversion || SuppressWarnings;
SourceLocation loc = OpRange.getBegin();
if (ValidateCast(loc, SrcExpr.get(), DestType, explicitConversion,
suppressWarnings, SuppressErrors, standard)) {
// TODO: LValue to RValue cast was all that CanConvert (ValidateCast) did
// anyway, so do this here until we figure out why this is needed.
if (standard && standard->First == ICK_Lvalue_To_Rvalue) {
SrcExpr.set(CreateLValueToRValueCast(SrcExpr.get()));
}
return true;
}
// ValidateCast includes its own error messages.
msg = 0;
return false;
}
/// <summary>
/// Checks if a subscript index argument can be initialized from the given
/// expression.
/// </summary>
/// <param name="SrcExpr">Source expression used as argument.</param>
/// <param name="DestType">Parameter type to initialize.</param>
/// <remarks>
/// Rules for subscript index initialization follow regular implicit casting
/// rules, with the exception that no changes in arity are allowed (i.e., int2
/// can become uint2, but uint or uint3 cannot).
/// </remarks>
ImplicitConversionSequence
HLSLExternalSource::TrySubscriptIndexInitialization(clang::Expr *SrcExpr,
clang::QualType DestType) {
DXASSERT_NOMSG(SrcExpr != nullptr);
DXASSERT_NOMSG(!DestType.isNull());
unsigned int msg = 0;
CastKind kind;
CXXCastPath path;
ImplicitConversionSequence sequence;
sequence.setStandard();
ExprResult sourceExpr(SrcExpr);
if (GetElementCount(SrcExpr->getType()) != GetElementCount(DestType)) {
sequence.setBad(BadConversionSequence::FailureKind::no_conversion,
SrcExpr->getType(), DestType);
} else if (!TryStaticCastForHLSL(sourceExpr, DestType,
Sema::CCK_ImplicitConversion, NoRange, msg,
kind, path, ListInitializationFalse,
SuppressWarningsFalse, SuppressErrorsTrue,
&sequence.Standard)) {
sequence.setBad(BadConversionSequence::FailureKind::no_conversion,
SrcExpr->getType(), DestType);
}
return sequence;
}
template <typename T>
static bool IsValueInRange(T value, T minValue, T maxValue) {
return minValue <= value && value <= maxValue;
}
#define D3DX_16F_MAX 6.550400e+004 // max value
#define D3DX_16F_MIN 6.1035156e-5f // min positive value
static void GetFloatLimits(ArBasicKind basicKind, double *minValue,
double *maxValue) {
DXASSERT_NOMSG(minValue != nullptr);
DXASSERT_NOMSG(maxValue != nullptr);
switch (basicKind) {
case AR_BASIC_MIN10FLOAT:
case AR_BASIC_MIN16FLOAT:
case AR_BASIC_FLOAT16:
*minValue = -(D3DX_16F_MIN);
*maxValue = D3DX_16F_MAX;
return;
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT32:
*minValue = -(FLT_MIN);
*maxValue = FLT_MAX;
return;
case AR_BASIC_FLOAT64:
*minValue = -(DBL_MIN);
*maxValue = DBL_MAX;
return;
default:
// No other float types.
break;
}
DXASSERT(false, "unreachable");
*minValue = 0;
*maxValue = 0;
return;
}
static void GetUnsignedLimit(ArBasicKind basicKind, uint64_t *maxValue) {
DXASSERT_NOMSG(maxValue != nullptr);
switch (basicKind) {
case AR_BASIC_BOOL:
*maxValue = 1;
return;
case AR_BASIC_UINT8:
*maxValue = UINT8_MAX;
return;
case AR_BASIC_MIN16UINT:
case AR_BASIC_UINT16:
*maxValue = UINT16_MAX;
return;
case AR_BASIC_UINT32:
*maxValue = UINT32_MAX;
return;
case AR_BASIC_UINT64:
*maxValue = UINT64_MAX;
return;
case AR_BASIC_UINT8_4PACKED:
case AR_BASIC_INT8_4PACKED:
*maxValue = UINT32_MAX;
return;
default:
// No other unsigned int types.
break;
}
DXASSERT(false, "unreachable");
*maxValue = 0;
return;
}
static void GetSignedLimits(ArBasicKind basicKind, int64_t *minValue,
int64_t *maxValue) {
DXASSERT_NOMSG(minValue != nullptr);
DXASSERT_NOMSG(maxValue != nullptr);
switch (basicKind) {
case AR_BASIC_INT8:
*minValue = INT8_MIN;
*maxValue = INT8_MAX;
return;
case AR_BASIC_MIN12INT:
case AR_BASIC_MIN16INT:
case AR_BASIC_INT16:
*minValue = INT16_MIN;
*maxValue = INT16_MAX;
return;
case AR_BASIC_INT32:
*minValue = INT32_MIN;
*maxValue = INT32_MAX;
return;
case AR_BASIC_INT64:
*minValue = INT64_MIN;
*maxValue = INT64_MAX;
return;
default:
// No other signed int types.
break;
}
DXASSERT(false, "unreachable");
*minValue = 0;
*maxValue = 0;
return;
}
static bool IsValueInBasicRange(ArBasicKind basicKind, const APValue &value) {
if (IS_BASIC_FLOAT(basicKind)) {
double val;
if (value.isInt()) {
val = value.getInt().getLimitedValue();
} else if (value.isFloat()) {
llvm::APFloat floatValue = value.getFloat();
if (!floatValue.isFinite()) {
return false;
}
llvm::APFloat valueFloat = value.getFloat();
if (&valueFloat.getSemantics() == &llvm::APFloat::IEEEsingle) {
val = value.getFloat().convertToFloat();
} else {
val = value.getFloat().convertToDouble();
}
} else {
return false;
}
double minValue, maxValue;
GetFloatLimits(basicKind, &minValue, &maxValue);
return IsValueInRange(val, minValue, maxValue);
} else if (IS_BASIC_SINT(basicKind)) {
if (!value.isInt()) {
return false;
}
int64_t val = value.getInt().getSExtValue();
int64_t minValue, maxValue;
GetSignedLimits(basicKind, &minValue, &maxValue);
return IsValueInRange(val, minValue, maxValue);
} else if (IS_BASIC_UINT(basicKind) || IS_BASIC_BOOL(basicKind)) {
if (!value.isInt()) {
return false;
}
uint64_t val = value.getInt().getLimitedValue();
uint64_t maxValue;
GetUnsignedLimit(basicKind, &maxValue);
return IsValueInRange(val, (uint64_t)0, maxValue);
} else {
return false;
}
}
static bool IsPrecisionLossIrrelevant(ASTContext &Ctx, const Expr *sourceExpr,
QualType targetType,
ArBasicKind targetKind) {
DXASSERT_NOMSG(!targetType.isNull());
DXASSERT_NOMSG(sourceExpr != nullptr);
Expr::EvalResult evalResult;
if (sourceExpr->EvaluateAsRValue(evalResult, Ctx)) {
if (evalResult.Diag == nullptr || evalResult.Diag->empty()) {
return IsValueInBasicRange(targetKind, evalResult.Val);
}
}
return false;
}
bool HLSLExternalSource::ValidateCast(SourceLocation OpLoc, Expr *sourceExpr,
QualType target, bool explicitConversion,
bool suppressWarnings,
bool suppressErrors,
StandardConversionSequence *standard) {
DXASSERT_NOMSG(sourceExpr != nullptr);
if (OpLoc.isInvalid())
OpLoc = sourceExpr->getExprLoc();
QualType source = sourceExpr->getType();
TYPE_CONVERSION_REMARKS remarks = TYPE_CONVERSION_NONE;
if (!CanConvert(OpLoc, sourceExpr, target, explicitConversion, &remarks,
standard)) {
//
// Check whether the lack of explicit-ness matters.
//
// Setting explicitForDiagnostics to true in that case will avoid the
// message saying anything about the implicit nature of the cast, when
// adding the explicit cast won't make a difference.
//
bool explicitForDiagnostics = explicitConversion;
if (explicitConversion == false) {
if (!CanConvert(OpLoc, sourceExpr, target, true, &remarks, nullptr)) {
// Can't convert either way - implicit/explicit doesn't matter.
explicitForDiagnostics = true;
}
}
if (!suppressErrors) {
bool IsOutputParameter = false;
if (clang::DeclRefExpr *OutFrom =
dyn_cast<clang::DeclRefExpr>(sourceExpr)) {
if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(OutFrom->getDecl())) {
IsOutputParameter = Param->isModifierOut();
}
}
m_sema->Diag(OpLoc, diag::err_hlsl_cannot_convert)
<< explicitForDiagnostics << IsOutputParameter << source << target;
}
return false;
}
if (!suppressWarnings) {
if (!explicitConversion) {
if ((remarks & TYPE_CONVERSION_PRECISION_LOSS) != 0) {
// This is a much more restricted version of the analysis does
// StandardConversionSequence::getNarrowingKind
if (!IsPrecisionLossIrrelevant(*m_context, sourceExpr, target,
GetTypeElementKind(target))) {
m_sema->Diag(OpLoc, diag::warn_hlsl_narrowing) << source << target;
}
}
if ((remarks & TYPE_CONVERSION_ELT_TRUNCATION) != 0) {
m_sema->Diag(OpLoc, diag::warn_hlsl_implicit_vector_truncation);
}
}
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
// Functions exported from this translation unit. //
/// <summary>Performs HLSL-specific processing for unary operators.</summary>
QualType hlsl::CheckUnaryOpForHLSL(Sema &self, SourceLocation OpLoc,
UnaryOperatorKind Opc, ExprResult &InputExpr,
ExprValueKind &VK, ExprObjectKind &OK) {
ExternalSemaSource *externalSource = self.getExternalSource();
if (externalSource == nullptr) {
return QualType();
}
HLSLExternalSource *hlsl =
reinterpret_cast<HLSLExternalSource *>(externalSource);
return hlsl->CheckUnaryOpForHLSL(OpLoc, Opc, InputExpr, VK, OK);
}
/// <summary>Performs HLSL-specific processing for binary operators.</summary>
void hlsl::CheckBinOpForHLSL(Sema &self, SourceLocation OpLoc,
BinaryOperatorKind Opc, ExprResult &LHS,
ExprResult &RHS, QualType &ResultTy,
QualType &CompLHSTy, QualType &CompResultTy) {
ExternalSemaSource *externalSource = self.getExternalSource();
if (externalSource == nullptr) {
return;
}
HLSLExternalSource *hlsl =
reinterpret_cast<HLSLExternalSource *>(externalSource);
return hlsl->CheckBinOpForHLSL(OpLoc, Opc, LHS, RHS, ResultTy, CompLHSTy,
CompResultTy);
}
/// <summary>Performs HLSL-specific processing of template
/// declarations.</summary>
bool hlsl::CheckTemplateArgumentListForHLSL(
Sema &self, TemplateDecl *Template, SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgList) {
DXASSERT_NOMSG(Template != nullptr);
ExternalSemaSource *externalSource = self.getExternalSource();
if (externalSource == nullptr) {
return false;
}
HLSLExternalSource *hlsl =
reinterpret_cast<HLSLExternalSource *>(externalSource);
return hlsl->CheckTemplateArgumentListForHLSL(Template, TemplateLoc,
TemplateArgList);
}
/// <summary>Deduces template arguments on a function call in an HLSL
/// program.</summary>
Sema::TemplateDeductionResult hlsl::DeduceTemplateArgumentsForHLSL(
Sema *self, FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, TemplateDeductionInfo &Info) {
return HLSLExternalSource::FromSema(self)->DeduceTemplateArgumentsForHLSL(
FunctionTemplate, ExplicitTemplateArgs, Args, Specialization, Info);
}
void hlsl::DiagnoseControlFlowConditionForHLSL(Sema *self, Expr *condExpr,
StringRef StmtName) {
while (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(condExpr)) {
if (IC->getCastKind() == CastKind::CK_HLSLMatrixTruncationCast ||
IC->getCastKind() == CastKind::CK_HLSLVectorTruncationCast) {
self->Diag(condExpr->getLocStart(),
diag::err_hlsl_control_flow_cond_not_scalar)
<< StmtName;
return;
}
condExpr = IC->getSubExpr();
}
}
static bool ShaderModelsMatch(const StringRef &left, const StringRef &right) {
// TODO: handle shorthand cases.
return left.size() == 0 || right.size() == 0 || left.equals(right);
}
void hlsl::DiagnosePackingOffset(clang::Sema *self, SourceLocation loc,
clang::QualType type, int componentOffset) {
DXASSERT_NOMSG(0 <= componentOffset && componentOffset <= 3);
if (componentOffset > 0) {
HLSLExternalSource *source = HLSLExternalSource::FromSema(self);
ArBasicKind element = source->GetTypeElementKind(type);
ArTypeObjectKind shape = source->GetTypeObjectKind(type);
// Only perform some simple validation for now.
if (IsObjectKindPrimitiveAggregate(shape) && IsBasicKindNumeric(element)) {
int count = GetElementCount(type);
if (count > (4 - componentOffset)) {
self->Diag(loc, diag::err_hlsl_register_or_offset_bind_not_valid);
}
}
if (hlsl::IsMatrixType(self, type) || type->isArrayType() ||
type->isStructureType()) {
self->Diag(loc, diag::err_hlsl_register_or_offset_bind_not_valid);
}
}
}
void hlsl::DiagnoseRegisterType(clang::Sema *self, clang::SourceLocation loc,
clang::QualType type, char registerType) {
// Register type can be zero if only a register space was provided.
if (registerType == 0)
return;
if (registerType >= 'A' && registerType <= 'Z')
registerType = registerType + ('a' - 'A');
HLSLExternalSource *source = HLSLExternalSource::FromSema(self);
ArBasicKind element = source->GetTypeElementKind(type);
StringRef expected("none");
bool isValid = true;
bool isWarning = false;
switch (element) {
case AR_BASIC_BOOL:
case AR_BASIC_LITERAL_FLOAT:
case AR_BASIC_FLOAT16:
case AR_BASIC_FLOAT32_PARTIAL_PRECISION:
case AR_BASIC_FLOAT32:
case AR_BASIC_FLOAT64:
case AR_BASIC_LITERAL_INT:
case AR_BASIC_INT8:
case AR_BASIC_UINT8:
case AR_BASIC_INT16:
case AR_BASIC_UINT16:
case AR_BASIC_INT32:
case AR_BASIC_UINT32:
case AR_BASIC_INT64:
case AR_BASIC_UINT64:
case AR_BASIC_MIN10FLOAT:
case AR_BASIC_MIN16FLOAT:
case AR_BASIC_MIN12INT:
case AR_BASIC_MIN16INT:
case AR_BASIC_MIN16UINT:
expected = "'b', 'c', or 'i'";
isValid = registerType == 'b' || registerType == 'c' || registerType == 'i';
break;
case AR_OBJECT_TEXTURE1D:
case AR_OBJECT_TEXTURE1D_ARRAY:
case AR_OBJECT_TEXTURE2D:
case AR_OBJECT_TEXTURE2D_ARRAY:
case AR_OBJECT_TEXTURE3D:
case AR_OBJECT_TEXTURECUBE:
case AR_OBJECT_TEXTURECUBE_ARRAY:
case AR_OBJECT_TEXTURE2DMS:
case AR_OBJECT_TEXTURE2DMS_ARRAY:
expected = "'t' or 's'";
isValid = registerType == 't' || registerType == 's';
break;
case AR_OBJECT_SAMPLER:
case AR_OBJECT_SAMPLER1D:
case AR_OBJECT_SAMPLER2D:
case AR_OBJECT_SAMPLER3D:
case AR_OBJECT_SAMPLERCUBE:
case AR_OBJECT_SAMPLERCOMPARISON:
expected = "'s' or 't'";
isValid = registerType == 's' || registerType == 't';
break;
case AR_OBJECT_BUFFER:
expected = "'t'";
isValid = registerType == 't';
break;
case AR_OBJECT_POINTSTREAM:
case AR_OBJECT_LINESTREAM:
case AR_OBJECT_TRIANGLESTREAM:
isValid = false;
isWarning = true;
break;
case AR_OBJECT_INPUTPATCH:
case AR_OBJECT_OUTPUTPATCH:
isValid = false;
isWarning = true;
break;
case AR_OBJECT_RWTEXTURE1D:
case AR_OBJECT_RWTEXTURE1D_ARRAY:
case AR_OBJECT_RWTEXTURE2D:
case AR_OBJECT_RWTEXTURE2D_ARRAY:
case AR_OBJECT_RWTEXTURE3D:
case AR_OBJECT_RWBUFFER:
expected = "'u'";
isValid = registerType == 'u';
break;
case AR_OBJECT_BYTEADDRESS_BUFFER:
case AR_OBJECT_STRUCTURED_BUFFER:
expected = "'t'";
isValid = registerType == 't';
break;
case AR_OBJECT_CONSUME_STRUCTURED_BUFFER:
case AR_OBJECT_RWBYTEADDRESS_BUFFER:
case AR_OBJECT_RWSTRUCTURED_BUFFER:
case AR_OBJECT_RWSTRUCTURED_BUFFER_ALLOC:
case AR_OBJECT_RWSTRUCTURED_BUFFER_CONSUME:
case AR_OBJECT_APPEND_STRUCTURED_BUFFER:
expected = "'u'";
isValid = registerType == 'u';
break;
case AR_OBJECT_CONSTANT_BUFFER:
expected = "'b'";
isValid = registerType == 'b';
break;
case AR_OBJECT_TEXTURE_BUFFER:
expected = "'t'";
isValid = registerType == 't';
break;
case AR_OBJECT_ROVBUFFER:
case AR_OBJECT_ROVBYTEADDRESS_BUFFER:
case AR_OBJECT_ROVSTRUCTURED_BUFFER:
case AR_OBJECT_ROVTEXTURE1D:
case AR_OBJECT_ROVTEXTURE1D_ARRAY:
case AR_OBJECT_ROVTEXTURE2D:
case AR_OBJECT_ROVTEXTURE2D_ARRAY:
case AR_OBJECT_ROVTEXTURE3D:
case AR_OBJECT_FEEDBACKTEXTURE2D:
case AR_OBJECT_FEEDBACKTEXTURE2D_ARRAY:
expected = "'u'";
isValid = registerType == 'u';
break;
case AR_OBJECT_LEGACY_EFFECT: // Used for all unsupported but ignored legacy
// effect types
isWarning = true;
break; // So we don't care what you tried to bind it to
default: // Other types have no associated registers.
break;
}
// fxc is inconsistent as to when it reports an error and when it ignores
// invalid bind semantics, so emit a warning instead.
if (!isValid) {
unsigned DiagID = isWarning ? diag::warn_hlsl_incorrect_bind_semantic
: diag::err_hlsl_incorrect_bind_semantic;
self->Diag(loc, DiagID) << expected;
}
}
// FIXME: DiagnoseSVForLaunchType is wrong in multiple ways:
// - It doesn't handle system values inside structs
// - It doesn't account for the fact that semantics are case-insensitive
// - It doesn't account for optional index at the end of semantic name
// - It permits any `SV_*` for Broadcasting launch, not just the legal ones
// - It doesn't prevent multiple system values with the same semantic
// - It doesn't check that the type is valid for the system value
// Produce diagnostics for any system values attached to `FD` function
// that are invalid for the `LaunchTy` launch type
static void DiagnoseSVForLaunchType(const FunctionDecl *FD,
DXIL::NodeLaunchType LaunchTy,
DiagnosticsEngine &Diags) {
// Validate Compute Shader system value inputs per launch mode
for (ParmVarDecl *param : FD->parameters()) {
for (const hlsl::UnusualAnnotation *it : param->getUnusualAnnotations()) {
if (it->getKind() == hlsl::UnusualAnnotation::UA_SemanticDecl) {
const hlsl::SemanticDecl *sd = cast<hlsl::SemanticDecl>(it);
// if the node launch type is Thread, then there are no system values
// allowed
if (LaunchTy == DXIL::NodeLaunchType::Thread) {
if (sd->SemanticName.startswith("SV_")) {
// emit diagnostic
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"Invalid system value semantic '%0' for launchtype '%1'");
Diags.Report(param->getLocation(), DiagID)
<< sd->SemanticName << "Thread";
}
}
// if the node launch type is Coalescing, then only
// SV_GroupIndex and SV_GroupThreadID are allowed
else if (LaunchTy == DXIL::NodeLaunchType::Coalescing) {
if (!(sd->SemanticName.equals("SV_GroupIndex") ||
sd->SemanticName.equals("SV_GroupThreadID"))) {
// emit diagnostic
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error,
"Invalid system value semantic '%0' for launchtype '%1'");
Diags.Report(param->getLocation(), DiagID)
<< sd->SemanticName << "Coalescing";
}
}
// Broadcasting nodes allow all node shader system value semantics
else if (LaunchTy == DXIL::NodeLaunchType::Broadcasting) {
continue;
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////
// Check HLSL intrinsic calls without call-graph context.
static bool CheckFinishedCrossGroupSharingCall(Sema &S, CXXMethodDecl *MD,
SourceLocation Loc) {
const CXXRecordDecl *NodeRecDecl = MD->getParent();
// Node I/O records are templateTypes
const ClassTemplateSpecializationDecl *templateDecl =
cast<ClassTemplateSpecializationDecl>(NodeRecDecl);
auto &TemplateArgs = templateDecl->getTemplateArgs();
DXASSERT(TemplateArgs.size() == 1,
"Input record types need to have one template argument");
auto &Rec = TemplateArgs.get(0);
clang::QualType RecType = Rec.getAsType();
RecordDecl *RD = RecType->getAs<RecordType>()->getDecl();
if (!RD->hasAttr<HLSLNodeTrackRWInputSharingAttr>()) {
S.Diags.Report(Loc, diag::err_hlsl_wg_nodetrackrwinputsharing_missing);
return true;
}
return false;
}
static bool CheckBarrierCall(Sema &S, FunctionDecl *FD, CallExpr *CE) {
DXASSERT(FD->getNumParams() == 2, "otherwise, unknown Barrier overload");
// Emit error when MemoryTypeFlags are known to be invalid.
QualType Param0Ty = FD->getParamDecl(0)->getType();
if (Param0Ty ==
HLSLExternalSource::FromSema(&S)->GetBasicKindType(AR_BASIC_UINT32)) {
uint32_t MemoryTypeFlags = 0;
Expr *MemoryTypeFlagsExpr = CE->getArg(0);
llvm::APSInt MemoryTypeFlagsVal;
if (MemoryTypeFlagsExpr->isIntegerConstantExpr(MemoryTypeFlagsVal,
S.Context)) {
MemoryTypeFlags = MemoryTypeFlagsVal.getLimitedValue();
if ((uint32_t)MemoryTypeFlags &
~(uint32_t)DXIL::MemoryTypeFlag::ValidMask) {
S.Diags.Report(MemoryTypeFlagsExpr->getExprLoc(),
diag::err_hlsl_barrier_invalid_memory_flags)
<< (uint32_t)MemoryTypeFlags
<< (uint32_t)DXIL::MemoryTypeFlag::ValidMask;
return true;
}
}
}
// Emit error when SemanticFlags are known to be invalid.
uint32_t SemanticFlags = 0;
Expr *SemanticFlagsExpr = CE->getArg(1);
llvm::APSInt SemanticFlagsVal;
if (SemanticFlagsExpr->isIntegerConstantExpr(SemanticFlagsVal, S.Context)) {
SemanticFlags = SemanticFlagsVal.getLimitedValue();
if ((uint32_t)SemanticFlags &
~(uint32_t)DXIL::BarrierSemanticFlag::ValidMask) {
S.Diags.Report(SemanticFlagsExpr->getExprLoc(),
diag::err_hlsl_barrier_invalid_semantic_flags)
<< (uint32_t)SemanticFlags
<< (uint32_t)DXIL::BarrierSemanticFlag::ValidMask;
return true;
}
}
return false;
}
// Check HLSL call constraints, not fatal to creating the AST.
void Sema::CheckHLSLFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto) {
HLSLIntrinsicAttr *IntrinsicAttr = FDecl->getAttr<HLSLIntrinsicAttr>();
if (!IntrinsicAttr)
return;
if (!IsBuiltinTable(IntrinsicAttr->getGroup()))
return;
hlsl::IntrinsicOp opCode = (hlsl::IntrinsicOp)IntrinsicAttr->getOpcode();
switch (opCode) {
case hlsl::IntrinsicOp::MOP_FinishedCrossGroupSharing:
CheckFinishedCrossGroupSharingCall(*this, cast<CXXMethodDecl>(FDecl),
TheCall->getLocStart());
break;
case hlsl::IntrinsicOp::IOP_Barrier:
CheckBarrierCall(*this, FDecl, TheCall);
break;
default:
break;
}
}
/////////////////////////////////////////////////////////////////////////////
// Check HLSL intrinsic calls reachable from entry/export functions.
static void DiagnoseNumThreadsForDerivativeOp(const HLSLNumThreadsAttr *Attr,
SourceLocation LocDeriv,
FunctionDecl *FD,
const FunctionDecl *EntryDecl,
DiagnosticsEngine &Diags) {
bool invalidNumThreads = false;
if (Attr->getY() != 1) {
// 2D mode requires x and y to be multiple of 2.
invalidNumThreads = !((Attr->getX() % 2) == 0 && (Attr->getY() % 2) == 0);
} else {
// 1D mode requires x to be multiple of 4 and y and z to be 1.
invalidNumThreads = (Attr->getX() % 4) != 0 || (Attr->getZ() != 1);
}
if (invalidNumThreads) {
Diags.Report(LocDeriv, diag::warn_hlsl_derivatives_wrong_numthreads)
<< FD->getNameAsString() << EntryDecl->getNameAsString();
Diags.Report(EntryDecl->getLocation(), diag::note_hlsl_entry_defined_here);
}
}
static void DiagnoseDerivativeOp(Sema &S, FunctionDecl *FD, SourceLocation Loc,
const hlsl::ShaderModel *SM,
DXIL::ShaderKind EntrySK,
DXIL::NodeLaunchType NodeLaunchTy,
const FunctionDecl *EntryDecl,
DiagnosticsEngine &Diags) {
switch (EntrySK) {
default: {
if (!SM->AllowDerivatives(EntrySK)) {
Diags.Report(Loc, diag::warn_hlsl_derivatives_in_wrong_shader_kind)
<< FD->getNameAsString() << EntryDecl->getNameAsString();
Diags.Report(EntryDecl->getLocation(),
diag::note_hlsl_entry_defined_here);
}
} break;
case DXIL::ShaderKind::Compute:
case DXIL::ShaderKind::Amplification:
case DXIL::ShaderKind::Mesh: {
if (!SM->IsSM66Plus()) {
Diags.Report(Loc, diag::warn_hlsl_derivatives_in_wrong_shader_model)
<< FD->getNameAsString() << EntryDecl->getNameAsString();
Diags.Report(EntryDecl->getLocation(),
diag::note_hlsl_entry_defined_here);
}
} break;
case DXIL::ShaderKind::Node: {
if (NodeLaunchTy != DXIL::NodeLaunchType::Broadcasting) {
Diags.Report(Loc, diag::warn_hlsl_derivatives_in_wrong_shader_kind)
<< FD->getNameAsString() << EntryDecl->getNameAsString();
Diags.Report(EntryDecl->getLocation(),
diag::note_hlsl_entry_defined_here);
}
} break;
}
if (const HLSLNumThreadsAttr *Attr =
EntryDecl->getAttr<HLSLNumThreadsAttr>()) {
DiagnoseNumThreadsForDerivativeOp(Attr, Loc, FD, EntryDecl, Diags);
}
}
static void DiagnoseCalculateLOD(Sema &S, FunctionDecl *FD, SourceLocation Loc,
const hlsl::ShaderModel *SM,
DXIL::ShaderKind EntrySK,
DXIL::NodeLaunchType NodeLaunchTy,
const FunctionDecl *EntryDecl,
DiagnosticsEngine &Diags,
bool locallyVisited) {
if (FD->getParamDecl(0)->getType() !=
HLSLExternalSource::FromSema(&S)->GetBasicKindType(
AR_OBJECT_SAMPLERCOMPARISON))
return;
if (!locallyVisited && !SM->IsSM68Plus()) {
Diags.Report(Loc, diag::warn_hlsl_intrinsic_overload_in_wrong_shader_model)
<< FD->getNameAsString() + " with SamplerComparisonState"
<< "6.8";
return;
}
DiagnoseDerivativeOp(S, FD, Loc, SM, EntrySK, NodeLaunchTy, EntryDecl, Diags);
}
static uint32_t
DiagnoseMemoryFlags(SourceLocation ArgLoc, uint32_t MemoryTypeFlags,
bool hasVisibleGroup, DXIL::ShaderKind EntrySK,
const FunctionDecl *EntryDecl, DiagnosticsEngine &Diags) {
// Check flags against context.
// If DXIL::MemoryTypeFlag::AllMemory, filter flags for context, otherwise,
// emit errors for invalid flags.
uint32_t MemoryTypeFiltered = MemoryTypeFlags;
// If group memory specified, must have a visible group.
if (!hasVisibleGroup) {
if ((uint32_t)MemoryTypeFlags &
(uint32_t)DXIL::MemoryTypeFlag::GroupFlags) {
if (MemoryTypeFlags == (uint32_t)DXIL::MemoryTypeFlag::AllMemory) {
// If AllMemory, filter out group flags.
MemoryTypeFiltered &= ~(uint32_t)DXIL::MemoryTypeFlag::GroupFlags;
} else {
Diags.Report(ArgLoc,
diag::warn_hlsl_barrier_group_memory_requires_group);
Diags.Report(EntryDecl->getLocation(),
diag::note_hlsl_entry_defined_here);
}
}
}
// If node memory specified, must be a node shader.
if (EntrySK != DXIL::ShaderKind::Node &&
EntrySK != DXIL::ShaderKind::Library &&
((uint32_t)MemoryTypeFlags & (uint32_t)DXIL::MemoryTypeFlag::NodeFlags)) {
if (MemoryTypeFlags == (uint32_t)DXIL::MemoryTypeFlag::AllMemory) {
// If AllMemory, filter out node flags.
MemoryTypeFiltered &= ~(uint32_t)DXIL::MemoryTypeFlag::NodeFlags;
} else {
Diags.Report(ArgLoc, diag::warn_hlsl_barrier_node_memory_requires_node);
Diags.Report(EntryDecl->getLocation(),
diag::note_hlsl_entry_defined_here);
}
}
// Return filtered flags.
return MemoryTypeFiltered;
}
static void DiagnoseSemanticFlags(SourceLocation ArgLoc, uint32_t SemanticFlags,
bool hasVisibleGroup,
bool memAtLeastGroupScope,
bool memAtLeastDeviceScope,
const FunctionDecl *EntryDecl,
DiagnosticsEngine &Diags) {
// If hasVisibleGroup is false, emit error for group flags.
if (!hasVisibleGroup) {
if ((uint32_t)SemanticFlags &
(uint32_t)DXIL::BarrierSemanticFlag::GroupFlags) {
Diags.Report(ArgLoc,
diag::warn_hlsl_barrier_group_semantic_requires_group);
Diags.Report(EntryDecl->getLocation(),
diag::note_hlsl_entry_defined_here);
}
}
// Error on DeviceScope or GroupScope when memory lacks this scope.
if (!memAtLeastDeviceScope &&
((uint32_t)SemanticFlags &
(uint32_t)DXIL::BarrierSemanticFlag::DeviceScope)) {
Diags.Report(ArgLoc,
diag::warn_hlsl_barrier_no_mem_with_required_device_scope);
Diags.Report(EntryDecl->getLocation(), diag::note_hlsl_entry_defined_here);
}
if (!memAtLeastGroupScope &&
((uint32_t)SemanticFlags &
(uint32_t)DXIL::BarrierSemanticFlag::GroupScope)) {
Diags.Report(ArgLoc,
diag::warn_hlsl_barrier_no_mem_with_required_group_scope);
Diags.Report(EntryDecl->getLocation(), diag::note_hlsl_entry_defined_here);
}
}
static void DiagnoseReachableBarrier(Sema &S, CallExpr *CE,
const hlsl::ShaderModel *SM,
DXIL::ShaderKind EntrySK,
DXIL::NodeLaunchType NodeLaunchTy,
const FunctionDecl *EntryDecl,
DiagnosticsEngine &Diags) {
FunctionDecl *FD = CE->getDirectCallee();
DXASSERT(FD->getNumParams() == 2, "otherwise, unknown Barrier overload");
// First, check shader model constraint.
if (!SM->IsSM68Plus()) {
Diags.Report(CE->getExprLoc(),
diag::warn_hlsl_intrinsic_in_wrong_shader_model)
<< FD->getNameAsString() << EntryDecl->getNameAsString() << "6.8";
Diags.Report(EntryDecl->getLocation(), diag::note_hlsl_entry_defined_here);
return;
}
// Does shader have visible group?
// Allow exported library functions as well.
bool hasVisibleGroup = ShaderModel::HasVisibleGroup(EntrySK, NodeLaunchTy);
QualType Param0Ty = FD->getParamDecl(0)->getType();
// Used when checking scope flags
// Default to true to avoid over-strict diagnostics
bool memAtLeastGroupScope = true;
bool memAtLeastDeviceScope = true;
if (Param0Ty ==
HLSLExternalSource::FromSema(&S)->GetBasicKindType(AR_BASIC_UINT32)) {
// overload: Barrier(uint MemoryTypeFlags, uint SemanticFlags)
uint32_t MemoryTypeFlags = 0;
Expr *MemoryTypeFlagsExpr = CE->getArg(0);
llvm::APSInt MemoryTypeFlagsVal;
if (MemoryTypeFlagsExpr->isIntegerConstantExpr(MemoryTypeFlagsVal,
S.Context)) {
MemoryTypeFlags = MemoryTypeFlagsVal.getLimitedValue();
MemoryTypeFlags = DiagnoseMemoryFlags(MemoryTypeFlagsExpr->getExprLoc(),
MemoryTypeFlags, hasVisibleGroup,
EntrySK, EntryDecl, Diags);
// Consider group scope if any group flags remain.
memAtLeastGroupScope = 0 != MemoryTypeFlags;
// Consider it device scope if UavMemory or any NodeFlags remain.
memAtLeastDeviceScope =
0 != (MemoryTypeFlags & ((uint32_t)DXIL::MemoryTypeFlag::UavMemory |
(uint32_t)DXIL::MemoryTypeFlag::NodeFlags));
}
} else {
DXIL::NodeIOKind IOKind = GetNodeIOType(Param0Ty);
if (IOKind == DXIL::NodeIOKind::Invalid) {
// overload: Barrier(<UAV Resource>, uint SemanticFlags)
// UAV objects have at least device scope.
DXASSERT(IsHLSLResourceType(Param0Ty),
"otherwise, missed a case for Barrier");
// mem scope flags already set to true.
} else {
// Must be a record object
// overload: Barrier(<node record object>, uint SemanticFlags)
// Only record objects specify a record granularity
DXASSERT((uint32_t)IOKind &
(uint32_t)DXIL::NodeIOFlags::RecordGranularityMask,
"otherwise, missed a Node object case for Barrier");
DXIL::NodeIOFlags RecordGranularity = (DXIL::NodeIOFlags)(
(uint32_t)IOKind &
(uint32_t)DXIL::NodeIOFlags::RecordGranularityMask);
switch (RecordGranularity) {
case DXIL::NodeIOFlags::ThreadRecord:
memAtLeastGroupScope = false;
LLVM_FALLTHROUGH;
case DXIL::NodeIOFlags::GroupRecord:
memAtLeastDeviceScope = false;
break;
default:
break;
}
}
}
// All barrier overloads have SemanticFlags as second paramter
uint32_t SemanticFlags = 0;
Expr *SemanticFlagsExpr = CE->getArg(1);
llvm::APSInt SemanticFlagsVal;
if (SemanticFlagsExpr->isIntegerConstantExpr(SemanticFlagsVal, S.Context)) {
SemanticFlags = SemanticFlagsVal.getLimitedValue();
DiagnoseSemanticFlags(SemanticFlagsExpr->getExprLoc(), SemanticFlags,
hasVisibleGroup, memAtLeastGroupScope,
memAtLeastDeviceScope, EntryDecl, Diags);
}
}
// Check HLSL member call constraints for used functions.
// locallyVisited is true if this call has been visited already from any other
// entry function. Used to avoid duplicate diagnostics when not dependent on
// entry function (or export function) properties.
void Sema::DiagnoseReachableHLSLCall(CallExpr *CE, const hlsl::ShaderModel *SM,
DXIL::ShaderKind EntrySK,
DXIL::NodeLaunchType NodeLaunchTy,
const FunctionDecl *EntryDecl,
bool locallyVisited) {
FunctionDecl *FD = CE->getDirectCallee();
if (!FD)
return;
HLSLIntrinsicAttr *IntrinsicAttr = FD->getAttr<HLSLIntrinsicAttr>();
if (!IntrinsicAttr)
return;
if (!IsBuiltinTable(IntrinsicAttr->getGroup()))
return;
SourceLocation Loc = CE->getExprLoc();
hlsl::IntrinsicOp opCode = (IntrinsicOp)IntrinsicAttr->getOpcode();
switch (opCode) {
case hlsl::IntrinsicOp::MOP_CalculateLevelOfDetail:
case hlsl::IntrinsicOp::MOP_CalculateLevelOfDetailUnclamped:
DiagnoseCalculateLOD(*this, FD, Loc, SM, EntrySK, NodeLaunchTy, EntryDecl,
Diags, locallyVisited);
break;
case hlsl::IntrinsicOp::IOP_Barrier:
DiagnoseReachableBarrier(*this, CE, SM, EntrySK, NodeLaunchTy, EntryDecl,
Diags);
break;
default:
break;
}
}
/////////////////////////////////////////////////////////////////////////////
bool hlsl::DiagnoseNodeStructArgument(Sema *self, TemplateArgumentLoc ArgLoc,
QualType ArgTy, bool &Empty,
const FieldDecl *FD) {
DXASSERT_NOMSG(!ArgTy.isNull());
HLSLExternalSource *source = HLSLExternalSource::FromSema(self);
ArTypeObjectKind shapeKind = source->GetTypeObjectKind(ArgTy);
switch (shapeKind) {
case AR_TOBJ_ARRAY:
case AR_TOBJ_BASIC:
case AR_TOBJ_MATRIX:
case AR_TOBJ_VECTOR:
Empty = false;
return false;
case AR_TOBJ_OBJECT:
Empty = false;
self->Diag(ArgLoc.getLocation(), diag::err_hlsl_node_record_object)
<< ArgTy << ArgLoc.getSourceRange();
if (FD)
self->Diag(FD->getLocation(), diag::note_field_declared_here)
<< FD->getType() << FD->getSourceRange();
return true;
case AR_TOBJ_DEPENDENT:
llvm_unreachable("obj dependent should go dependent type path, not reach "
"here");
return true;
case AR_TOBJ_COMPOUND: {
bool ErrorFound = false;
const RecordDecl *RD = ArgTy->getAs<RecordType>()->getDecl();
// Check the fields of the RecordDecl
RecordDecl::field_iterator begin = RD->field_begin();
RecordDecl::field_iterator end = RD->field_end();
while (begin != end) {
const FieldDecl *FD = *begin;
ErrorFound |=
DiagnoseNodeStructArgument(self, ArgLoc, FD->getType(), Empty, FD);
begin++;
}
return ErrorFound;
}
default:
DXASSERT(false, "unreachable");
return false;
}
}
// This function diagnoses whether or not all entry-point attributes
// should exist on this shader stage
void DiagnoseEntryAttrAllowedOnStage(clang::Sema *self,
FunctionDecl *entryPointDecl,
DXIL::ShaderKind shaderKind) {
if (entryPointDecl->hasAttrs()) {
for (Attr *pAttr : entryPointDecl->getAttrs()) {
switch (pAttr->getKind()) {
case clang::attr::HLSLWaveSize: {
switch (shaderKind) {
case DXIL::ShaderKind::Compute:
case DXIL::ShaderKind::Node:
break;
default:
self->Diag(pAttr->getRange().getBegin(),
diag::err_hlsl_attribute_unsupported_stage)
<< "WaveSize"
<< "compute or node";
break;
}
break;
}
case clang::attr::HLSLNodeLaunch:
case clang::attr::HLSLNodeIsProgramEntry:
case clang::attr::HLSLNodeId:
case clang::attr::HLSLNodeLocalRootArgumentsTableIndex:
case clang::attr::HLSLNodeShareInputOf:
case clang::attr::HLSLNodeDispatchGrid:
case clang::attr::HLSLNodeMaxDispatchGrid:
case clang::attr::HLSLNodeMaxRecursionDepth: {
if (shaderKind != DXIL::ShaderKind::Node) {
self->Diag(pAttr->getRange().getBegin(),
diag::err_hlsl_attribute_unsupported_stage)
<< pAttr->getSpelling() << "node";
}
break;
}
}
}
}
}
std::string getFQFunctionName(FunctionDecl *FD) {
std::string name = "";
if (!FD) {
return name;
}
if (FD->getName().empty()) {
// Anonymous functions are not supported.
return name;
}
name = FD->getName();
while (!FD->isGlobal()) {
DeclContext *parent = FD->getParent();
if (NamespaceDecl *ns = dyn_cast<NamespaceDecl>(parent)) {
// function declaration is in a namespace
name = ns->getName().str() + "::" + name;
} else if (RecordDecl *record = dyn_cast<RecordDecl>(parent)) {
// function declaration is in a record or class
name = record->getName().str() + "::" + name;
} else if (FunctionDecl *parentFunc = dyn_cast<FunctionDecl>(parent)) {
// function declaration is in a nested function
name = parentFunc->getName().str() + "::" + name;
FD = parentFunc;
} else {
// function declaration is in an unknown scope
name = "unknown::" + name;
}
}
return name;
}
void hlsl::DiagnosePayloadAccessQualifierAnnotations(
Sema &S, Declarator &D, const QualType &T,
const std::vector<hlsl::UnusualAnnotation *> &annotations) {
auto &&iter = annotations.begin();
auto &&end = annotations.end();
hlsl::PayloadAccessAnnotation *readAnnotation = nullptr;
hlsl::PayloadAccessAnnotation *writeAnnotation = nullptr;
for (; iter != end; ++iter) {
switch ((*iter)->getKind()) {
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier: {
hlsl::PayloadAccessAnnotation *annotation =
cast<hlsl::PayloadAccessAnnotation>(*iter);
if (annotation->qualifier == DXIL::PayloadAccessQualifier::Read) {
if (!readAnnotation)
readAnnotation = annotation;
else {
S.Diag(annotation->Loc,
diag::err_hlsl_payload_access_qualifier_multiple_defined)
<< "read";
return;
}
} else if (annotation->qualifier == DXIL::PayloadAccessQualifier::Write) {
if (!writeAnnotation)
writeAnnotation = annotation;
else {
S.Diag(annotation->Loc,
diag::err_hlsl_payload_access_qualifier_multiple_defined)
<< "write";
return;
}
}
break;
}
default:
// Ignore all other annotations here.
break;
}
}
struct PayloadAccessQualifierInformation {
bool anyhit = false;
bool closesthit = false;
bool miss = false;
bool caller = false;
} readQualContains, writeQualContains;
auto collectInformationAboutShaderStages =
[&](hlsl::PayloadAccessAnnotation *annotation,
PayloadAccessQualifierInformation &info) {
for (auto shaderType : annotation->ShaderStages) {
if (shaderType == DXIL::PayloadAccessShaderStage::Anyhit)
info.anyhit = true;
else if (shaderType == DXIL::PayloadAccessShaderStage::Closesthit)
info.closesthit = true;
else if (shaderType == DXIL::PayloadAccessShaderStage::Miss)
info.miss = true;
else if (shaderType == DXIL::PayloadAccessShaderStage::Caller)
info.caller = true;
}
return true;
};
if (readAnnotation) {
if (!collectInformationAboutShaderStages(readAnnotation, readQualContains))
return;
}
if (writeAnnotation) {
if (!collectInformationAboutShaderStages(writeAnnotation,
writeQualContains))
return;
}
if (writeAnnotation) {
// Note: keep the following two checks separated to diagnose both
// stages (closesthit and miss)
// If closesthit/miss writes a value the caller must consume it.
if (writeQualContains.miss) {
if (!readAnnotation || !readQualContains.caller) {
S.Diag(writeAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "write"
<< "miss"
<< "consumer";
}
}
if (writeQualContains.closesthit) {
if (!readAnnotation || !readQualContains.caller) {
S.Diag(writeAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "write"
<< "closesthit"
<< "consumer";
}
}
// If anyhit writes, we need at least one consumer
if (writeQualContains.anyhit && !readAnnotation) {
S.Diag(writeAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "write"
<< "anyhit"
<< "consumer";
}
// If the caller writes, we need at least one consumer
if (writeQualContains.caller && !readAnnotation) {
S.Diag(writeAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "write"
<< "caller"
<< "consumer";
}
}
// Validate the read qualifer if present.
if (readAnnotation) {
// Note: keep the following two checks separated to diagnose both
// stages (closesthit and miss)
// If closeshit/miss consume a value we need a producer.
// Valid producers are the caller and anyhit.
if (readQualContains.miss) {
if (!writeAnnotation ||
!(writeQualContains.anyhit || writeQualContains.caller)) {
S.Diag(readAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "read"
<< "miss"
<< "producer";
}
}
// If closeshit/miss consume a value we need a producer.
// Valid producers are the caller and anyhit.
if (readQualContains.closesthit) {
if (!writeAnnotation ||
!(writeQualContains.anyhit || writeQualContains.caller)) {
S.Diag(readAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "read"
<< "closesthit"
<< "producer";
}
}
// If anyhit consumes the value we need a producer.
// Valid producers are the caller and antoher anyhit.
if (readQualContains.anyhit) {
if (!writeAnnotation ||
!(writeQualContains.anyhit || writeQualContains.caller)) {
S.Diag(readAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "read"
<< "anyhit"
<< "producer";
}
}
// If the caller consumes the value we need a valid producer.
if (readQualContains.caller && !writeAnnotation) {
S.Diag(readAnnotation->Loc,
diag::err_hlsl_payload_access_qualifier_invalid_combination)
<< D.getIdentifier() << "read"
<< "caller"
<< "producer";
}
}
}
void hlsl::DiagnoseUnusualAnnotationsForHLSL(
Sema &S, std::vector<hlsl::UnusualAnnotation *> &annotations) {
bool packoffsetOverriddenReported = false;
auto &&iter = annotations.begin();
auto &&end = annotations.end();
for (; iter != end; ++iter) {
switch ((*iter)->getKind()) {
case hlsl::UnusualAnnotation::UA_ConstantPacking: {
hlsl::ConstantPacking *constantPacking =
cast<hlsl::ConstantPacking>(*iter);
// Check whether this will conflict with other packoffsets. If so, only
// issue a warning; last one wins.
if (!packoffsetOverriddenReported) {
auto newIter = iter;
++newIter;
while (newIter != end) {
hlsl::ConstantPacking *other =
dyn_cast_or_null<hlsl::ConstantPacking>(*newIter);
if (other != nullptr &&
(other->Subcomponent != constantPacking->Subcomponent ||
other->ComponentOffset != constantPacking->ComponentOffset)) {
S.Diag(constantPacking->Loc, diag::warn_hlsl_packoffset_overridden);
packoffsetOverriddenReported = true;
break;
}
++newIter;
}
}
break;
}
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
hlsl::RegisterAssignment *registerAssignment =
cast<hlsl::RegisterAssignment>(*iter);
// Check whether this will conflict with other register assignments of the
// same type.
auto newIter = iter;
++newIter;
while (newIter != end) {
hlsl::RegisterAssignment *other =
dyn_cast_or_null<hlsl::RegisterAssignment>(*newIter);
// Same register bank and profile, but different number.
if (other != nullptr &&
ShaderModelsMatch(other->ShaderProfile,
registerAssignment->ShaderProfile) &&
other->RegisterType == registerAssignment->RegisterType &&
(other->RegisterNumber != registerAssignment->RegisterNumber ||
other->RegisterOffset != registerAssignment->RegisterOffset)) {
// Obvious conflict - report it up front.
S.Diag(registerAssignment->Loc,
diag::err_hlsl_register_semantics_conflicting);
}
++newIter;
}
break;
}
case hlsl::UnusualAnnotation::UA_SemanticDecl: {
// hlsl::SemanticDecl* semanticDecl = cast<hlsl::SemanticDecl>(*iter);
// No common validation to be performed.
break;
}
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier: {
// Validation happens sperately
break;
}
}
}
}
clang::OverloadingResult
hlsl::GetBestViableFunction(clang::Sema &S, clang::SourceLocation Loc,
clang::OverloadCandidateSet &set,
clang::OverloadCandidateSet::iterator &Best) {
return HLSLExternalSource::FromSema(&S)->GetBestViableFunction(Loc, set,
Best);
}
void hlsl::InitializeInitSequenceForHLSL(Sema *self,
const InitializedEntity &Entity,
const InitializationKind &Kind,
MultiExprArg Args,
bool TopLevelOfInitList,
InitializationSequence *initSequence) {
return HLSLExternalSource::FromSema(self)->InitializeInitSequenceForHLSL(
Entity, Kind, Args, TopLevelOfInitList, initSequence);
}
static unsigned CaculateInitListSize(HLSLExternalSource *hlslSource,
const clang::InitListExpr *InitList) {
unsigned totalSize = 0;
for (unsigned i = 0; i < InitList->getNumInits(); i++) {
const clang::Expr *EltInit = InitList->getInit(i);
QualType EltInitTy = EltInit->getType();
if (const InitListExpr *EltInitList = dyn_cast<InitListExpr>(EltInit)) {
totalSize += CaculateInitListSize(hlslSource, EltInitList);
} else {
totalSize += hlslSource->GetNumBasicElements(EltInitTy);
}
}
return totalSize;
}
unsigned
hlsl::CaculateInitListArraySizeForHLSL(clang::Sema *sema,
const clang::InitListExpr *InitList,
const clang::QualType EltTy) {
HLSLExternalSource *hlslSource = HLSLExternalSource::FromSema(sema);
unsigned totalSize = CaculateInitListSize(hlslSource, InitList);
unsigned eltSize = hlslSource->GetNumBasicElements(EltTy);
if (totalSize > 0 && (totalSize % eltSize) == 0) {
return totalSize / eltSize;
} else {
return 0;
}
}
// NRVO unsafe for a variety of cases in HLSL
// - vectors/matrix with bool component types
// - attributes not captured to QualType, such as precise and globallycoherent
bool hlsl::ShouldSkipNRVO(clang::Sema &sema, clang::QualType returnType,
clang::VarDecl *VD, clang::FunctionDecl *FD) {
// exclude vectors/matrix (not treated as record type)
// NRVO breaks on bool component type due to diff between
// i32 memory and i1 register representation
if (hlsl::IsHLSLVecMatType(returnType))
return true;
QualType ArrayEltTy = returnType;
while (const clang::ArrayType *AT =
sema.getASTContext().getAsArrayType(ArrayEltTy)) {
ArrayEltTy = AT->getElementType();
}
// exclude resource for globallycoherent.
if (hlsl::IsHLSLResourceType(ArrayEltTy) || hlsl::IsHLSLNodeType(ArrayEltTy))
return true;
// exclude precise.
if (VD->hasAttr<HLSLPreciseAttr>()) {
return true;
}
if (FD) {
// propagate precise the the VD.
if (FD->hasAttr<HLSLPreciseAttr>()) {
VD->addAttr(FD->getAttr<HLSLPreciseAttr>());
return true;
}
// Don't do NRVO if this is an entry function or a patch contsant function.
// With NVRO, writing to the return variable directly writes to the output
// argument instead of to an alloca which gets copied to the output arg in
// one spot. This causes many extra dx.storeOutput's to be emitted.
//
// Check if this is an entry function the easy way if we're a library
if (const HLSLShaderAttr *Attr = FD->getAttr<HLSLShaderAttr>()) {
return true;
}
// Check if it's an entry function the hard way
if (!FD->getDeclContext()->isNamespace() && FD->isGlobal()) {
// Check if this is an entry function by comparing name
// TODO: Remove this once we put HLSLShaderAttr on all entry functions.
if (FD->getIdentifier() &&
FD->getName() == sema.getLangOpts().HLSLEntryFunction) {
return true;
}
// See if it's the patch constant function
if (sema.getLangOpts().HLSLProfile.size() &&
(sema.getLangOpts().HLSLProfile[0] == 'h' /*For 'hs'*/ ||
sema.getLangOpts().HLSLProfile[0] == 'l' /*For 'lib'*/)) {
if (hlsl::IsPatchConstantFunctionDecl(FD))
return true;
}
}
}
return false;
}
bool hlsl::IsConversionToLessOrEqualElements(
clang::Sema *self, const clang::ExprResult &sourceExpr,
const clang::QualType &targetType, bool explicitConversion) {
return HLSLExternalSource::FromSema(self)->IsConversionToLessOrEqualElements(
sourceExpr, targetType, explicitConversion);
}
ExprResult hlsl::LookupMatrixMemberExprForHLSL(Sema *self, Expr &BaseExpr,
DeclarationName MemberName,
bool IsArrow,
SourceLocation OpLoc,
SourceLocation MemberLoc) {
return HLSLExternalSource::FromSema(self)->LookupMatrixMemberExprForHLSL(
BaseExpr, MemberName, IsArrow, OpLoc, MemberLoc);
}
ExprResult hlsl::LookupVectorMemberExprForHLSL(Sema *self, Expr &BaseExpr,
DeclarationName MemberName,
bool IsArrow,
SourceLocation OpLoc,
SourceLocation MemberLoc) {
return HLSLExternalSource::FromSema(self)->LookupVectorMemberExprForHLSL(
BaseExpr, MemberName, IsArrow, OpLoc, MemberLoc);
}
ExprResult hlsl::LookupArrayMemberExprForHLSL(Sema *self, Expr &BaseExpr,
DeclarationName MemberName,
bool IsArrow,
SourceLocation OpLoc,
SourceLocation MemberLoc) {
return HLSLExternalSource::FromSema(self)->LookupArrayMemberExprForHLSL(
BaseExpr, MemberName, IsArrow, OpLoc, MemberLoc);
}
bool hlsl::LookupRecordMemberExprForHLSL(Sema *self, Expr &BaseExpr,
DeclarationName MemberName,
bool IsArrow, SourceLocation OpLoc,
SourceLocation MemberLoc,
ExprResult &result) {
HLSLExternalSource *source = HLSLExternalSource::FromSema(self);
switch (source->GetTypeObjectKind(BaseExpr.getType())) {
case AR_TOBJ_MATRIX:
result = source->LookupMatrixMemberExprForHLSL(BaseExpr, MemberName,
IsArrow, OpLoc, MemberLoc);
return true;
case AR_TOBJ_VECTOR:
result = source->LookupVectorMemberExprForHLSL(BaseExpr, MemberName,
IsArrow, OpLoc, MemberLoc);
return true;
case AR_TOBJ_ARRAY:
result = source->LookupArrayMemberExprForHLSL(BaseExpr, MemberName, IsArrow,
OpLoc, MemberLoc);
return true;
default:
return false;
}
return false;
}
clang::ExprResult hlsl::MaybeConvertMemberAccess(clang::Sema *self,
clang::Expr *E) {
return HLSLExternalSource::FromSema(self)->MaybeConvertMemberAccess(E);
}
bool hlsl::TryStaticCastForHLSL(
Sema *self, ExprResult &SrcExpr, QualType DestType,
Sema::CheckedConversionKind CCK, const SourceRange &OpRange, unsigned &msg,
CastKind &Kind, CXXCastPath &BasePath, bool ListInitialization,
bool SuppressDiagnostics, StandardConversionSequence *standard) {
return HLSLExternalSource::FromSema(self)->TryStaticCastForHLSL(
SrcExpr, DestType, CCK, OpRange, msg, Kind, BasePath, ListInitialization,
SuppressDiagnostics, SuppressDiagnostics, standard);
}
clang::ExprResult
hlsl::PerformHLSLConversion(clang::Sema *self, clang::Expr *From,
clang::QualType targetType,
const clang::StandardConversionSequence &SCS,
clang::Sema::CheckedConversionKind CCK) {
return HLSLExternalSource::FromSema(self)->PerformHLSLConversion(
From, targetType, SCS, CCK);
}
clang::ImplicitConversionSequence
hlsl::TrySubscriptIndexInitialization(clang::Sema *self, clang::Expr *SrcExpr,
clang::QualType DestType) {
return HLSLExternalSource::FromSema(self)->TrySubscriptIndexInitialization(
SrcExpr, DestType);
}
/// <summary>Performs HLSL-specific initialization on the specified
/// context.</summary>
void hlsl::InitializeASTContextForHLSL(ASTContext &context) {
HLSLExternalSource *hlslSource = new HLSLExternalSource();
IntrusiveRefCntPtr<ExternalASTSource> externalSource(hlslSource);
if (hlslSource->Initialize(context)) {
context.setExternalSource(externalSource);
}
}
////////////////////////////////////////////////////////////////////////////////
// FlattenedTypeIterator implementation //
/// <summary>Constructs a FlattenedTypeIterator for the specified
/// type.</summary>
FlattenedTypeIterator::FlattenedTypeIterator(SourceLocation loc, QualType type,
HLSLExternalSource &source)
: m_source(source), m_draining(false), m_springLoaded(false),
m_incompleteCount(0), m_typeDepth(0), m_loc(loc) {
if (pushTrackerForType(type, nullptr)) {
while (!m_typeTrackers.empty() && !considerLeaf())
consumeLeaf();
}
}
/// <summary>Constructs a FlattenedTypeIterator for the specified
/// expressions.</summary>
FlattenedTypeIterator::FlattenedTypeIterator(SourceLocation loc,
MultiExprArg args,
HLSLExternalSource &source)
: m_source(source), m_draining(false), m_springLoaded(false),
m_incompleteCount(0), m_typeDepth(0), m_loc(loc) {
if (!args.empty()) {
MultiExprArg::iterator ii = args.begin();
MultiExprArg::iterator ie = args.end();
DXASSERT(ii != ie, "otherwise empty() returned an incorrect value");
m_typeTrackers.push_back(
FlattenedTypeIterator::FlattenedTypeTracker(ii, ie));
if (!considerLeaf()) {
m_typeTrackers.clear();
}
}
}
/// <summary>Gets the current element in the flattened type hierarchy.</summary>
QualType FlattenedTypeIterator::getCurrentElement() const {
return m_typeTrackers.back().Type;
}
/// <summary>Get the number of repeated current elements.</summary>
unsigned int FlattenedTypeIterator::getCurrentElementSize() const {
const FlattenedTypeTracker &back = m_typeTrackers.back();
return (back.IterKind == FK_IncompleteArray) ? 1 : back.Count;
}
/// <summary>Checks whether the iterator has a current element type to
/// report.</summary>
bool FlattenedTypeIterator::hasCurrentElement() const {
return m_typeTrackers.size() > 0;
}
/// <summary>Consumes count elements on this iterator.</summary>
void FlattenedTypeIterator::advanceCurrentElement(unsigned int count) {
DXASSERT(
!m_typeTrackers.empty(),
"otherwise caller should not be trying to advance to another element");
DXASSERT(m_typeTrackers.back().IterKind == FK_IncompleteArray ||
count <= m_typeTrackers.back().Count,
"caller should never exceed currently pending element count");
FlattenedTypeTracker &tracker = m_typeTrackers.back();
if (tracker.IterKind == FK_IncompleteArray) {
tracker.Count += count;
m_springLoaded = true;
} else {
tracker.Count -= count;
m_springLoaded = false;
if (m_typeTrackers.back().Count == 0) {
advanceLeafTracker();
}
}
}
unsigned int FlattenedTypeIterator::countRemaining() {
m_draining = true; // when draining the iterator, incomplete arrays stop
// functioning as an infinite array
size_t result = 0;
while (hasCurrentElement() && !m_springLoaded) {
size_t pending = getCurrentElementSize();
result += pending;
advanceCurrentElement(pending);
}
return result;
}
void FlattenedTypeIterator::advanceLeafTracker() {
DXASSERT(
!m_typeTrackers.empty(),
"otherwise caller should not be trying to advance to another element");
for (;;) {
consumeLeaf();
if (m_typeTrackers.empty()) {
return;
}
if (considerLeaf()) {
return;
}
}
}
bool FlattenedTypeIterator::considerLeaf() {
if (m_typeTrackers.empty()) {
return false;
}
m_typeDepth++;
if (m_typeDepth > MaxTypeDepth) {
m_source.ReportUnsupportedTypeNesting(m_loc, m_firstType);
m_typeTrackers.clear();
m_typeDepth--;
return false;
}
bool result = false;
FlattenedTypeTracker &tracker = m_typeTrackers.back();
tracker.IsConsidered = true;
switch (tracker.IterKind) {
case FlattenedIterKind::FK_Expressions:
if (pushTrackerForExpression(tracker.CurrentExpr)) {
result = considerLeaf();
}
break;
case FlattenedIterKind::FK_Fields:
if (pushTrackerForType(tracker.CurrentField->getType(), nullptr)) {
result = considerLeaf();
}
break;
case FlattenedIterKind::FK_Bases:
if (pushTrackerForType(tracker.CurrentBase->getType(), nullptr)) {
result = considerLeaf();
}
break;
case FlattenedIterKind::FK_IncompleteArray:
m_springLoaded = true;
LLVM_FALLTHROUGH;
default:
case FlattenedIterKind::FK_Simple: {
ArTypeObjectKind objectKind = m_source.GetTypeObjectKind(tracker.Type);
if (objectKind != ArTypeObjectKind::AR_TOBJ_BASIC &&
objectKind != ArTypeObjectKind::AR_TOBJ_OBJECT &&
objectKind != ArTypeObjectKind::AR_TOBJ_STRING) {
if (pushTrackerForType(tracker.Type, tracker.CurrentExpr)) {
result = considerLeaf();
}
} else {
result = true;
}
}
}
m_typeDepth--;
return result;
}
void FlattenedTypeIterator::consumeLeaf() {
bool topConsumed = true; // Tracks whether we're processing the topmost item
// which we should consume.
for (;;) {
if (m_typeTrackers.empty()) {
return;
}
FlattenedTypeTracker &tracker = m_typeTrackers.back();
// Reach a leaf which is not considered before.
// Stop here.
if (!tracker.IsConsidered) {
break;
}
switch (tracker.IterKind) {
case FlattenedIterKind::FK_Expressions:
++tracker.CurrentExpr;
if (tracker.CurrentExpr == tracker.EndExpr) {
m_typeTrackers.pop_back();
topConsumed = false;
} else {
return;
}
break;
case FlattenedIterKind::FK_Fields:
++tracker.CurrentField;
if (tracker.CurrentField == tracker.EndField) {
m_typeTrackers.pop_back();
topConsumed = false;
} else {
return;
}
break;
case FlattenedIterKind::FK_Bases:
++tracker.CurrentBase;
if (tracker.CurrentBase == tracker.EndBase) {
m_typeTrackers.pop_back();
topConsumed = false;
} else {
return;
}
break;
case FlattenedIterKind::FK_IncompleteArray:
if (m_draining) {
DXASSERT(m_typeTrackers.size() == 1,
"m_typeTrackers.size() == 1, otherwise incomplete array isn't "
"topmost");
m_incompleteCount = tracker.Count;
m_typeTrackers.pop_back();
}
return;
default:
case FlattenedIterKind::FK_Simple: {
m_springLoaded = false;
if (!topConsumed) {
DXASSERT(tracker.Count > 0,
"tracker.Count > 0 - otherwise we shouldn't be on stack");
--tracker.Count;
} else {
topConsumed = false;
}
if (tracker.Count == 0) {
m_typeTrackers.pop_back();
} else {
return;
}
}
}
}
}
bool FlattenedTypeIterator::pushTrackerForExpression(
MultiExprArg::iterator expression) {
Expr *e = *expression;
Stmt::StmtClass expressionClass = e->getStmtClass();
if (expressionClass == Stmt::StmtClass::InitListExprClass) {
InitListExpr *initExpr = dyn_cast<InitListExpr>(e);
if (initExpr->getNumInits() == 0) {
return false;
}
MultiExprArg inits(initExpr->getInits(), initExpr->getNumInits());
MultiExprArg::iterator ii = inits.begin();
MultiExprArg::iterator ie = inits.end();
DXASSERT(ii != ie, "otherwise getNumInits() returned an incorrect value");
m_typeTrackers.push_back(
FlattenedTypeIterator::FlattenedTypeTracker(ii, ie));
return true;
}
return pushTrackerForType(e->getType(), expression);
}
// TODO: improve this to provide a 'peek' at intermediate types,
// which should help compare struct foo[1000] to avoid 1000 steps + per-field
// steps
bool FlattenedTypeIterator::pushTrackerForType(
QualType type, MultiExprArg::iterator expression) {
if (type->isVoidType()) {
return false;
}
if (type->isFunctionType()) {
return false;
}
if (m_firstType.isNull()) {
m_firstType = type;
}
ArTypeObjectKind objectKind = m_source.GetTypeObjectKind(type);
QualType elementType;
unsigned int elementCount;
const RecordType *recordType;
RecordDecl::field_iterator fi, fe;
switch (objectKind) {
case ArTypeObjectKind::AR_TOBJ_ARRAY:
// TODO: handle multi-dimensional arrays
elementType = type->getAsArrayTypeUnsafe()
->getElementType(); // handle arrays of arrays
elementCount = GetArraySize(type);
if (elementCount == 0) {
if (type->isIncompleteArrayType()) {
m_typeTrackers.push_back(
FlattenedTypeIterator::FlattenedTypeTracker(elementType));
return true;
}
return false;
}
m_typeTrackers.push_back(FlattenedTypeIterator::FlattenedTypeTracker(
elementType, elementCount, nullptr));
return true;
case ArTypeObjectKind::AR_TOBJ_BASIC:
m_typeTrackers.push_back(
FlattenedTypeIterator::FlattenedTypeTracker(type, 1, expression));
return true;
case ArTypeObjectKind::AR_TOBJ_COMPOUND: {
recordType = type->getAs<RecordType>();
DXASSERT(recordType, "compound type is expected to be a RecordType");
fi = recordType->getDecl()->field_begin();
fe = recordType->getDecl()->field_end();
bool bAddTracker = false;
// Skip empty struct.
if (fi != fe) {
m_typeTrackers.push_back(
FlattenedTypeIterator::FlattenedTypeTracker(type, fi, fe));
type = (*fi)->getType();
bAddTracker = true;
}
if (CXXRecordDecl *cxxRecordDecl =
dyn_cast<CXXRecordDecl>(recordType->getDecl())) {
// We'll error elsewhere if the record has no definition,
// just don't attempt to use it.
if (cxxRecordDecl->hasDefinition()) {
CXXRecordDecl::base_class_iterator bi, be;
bi = cxxRecordDecl->bases_begin();
be = cxxRecordDecl->bases_end();
if (bi != be) {
// Add type tracker for base.
// Add base after child to make sure base considered first.
m_typeTrackers.push_back(
FlattenedTypeIterator::FlattenedTypeTracker(type, bi, be));
bAddTracker = true;
}
}
}
return bAddTracker;
}
case ArTypeObjectKind::AR_TOBJ_MATRIX:
m_typeTrackers.push_back(FlattenedTypeIterator::FlattenedTypeTracker(
m_source.GetMatrixOrVectorElementType(type), GetElementCount(type),
nullptr));
return true;
case ArTypeObjectKind::AR_TOBJ_VECTOR:
m_typeTrackers.push_back(FlattenedTypeIterator::FlattenedTypeTracker(
m_source.GetMatrixOrVectorElementType(type), GetHLSLVecSize(type),
nullptr));
return true;
case ArTypeObjectKind::AR_TOBJ_OBJECT: {
if (m_source.IsSubobjectType(type)) {
// subobjects are initialized with initialization lists
recordType = type->getAs<RecordType>();
fi = recordType->getDecl()->field_begin();
fe = recordType->getDecl()->field_end();
m_typeTrackers.push_back(
FlattenedTypeIterator::FlattenedTypeTracker(type, fi, fe));
return true;
} else {
// Object have no sub-types.
m_typeTrackers.push_back(FlattenedTypeIterator::FlattenedTypeTracker(
type.getCanonicalType(), 1, expression));
return true;
}
}
case ArTypeObjectKind::AR_TOBJ_STRING: {
// Strings have no sub-types.
m_typeTrackers.push_back(FlattenedTypeIterator::FlattenedTypeTracker(
type.getCanonicalType(), 1, expression));
return true;
}
default:
DXASSERT(false, "unreachable");
return false;
}
}
FlattenedTypeIterator::ComparisonResult FlattenedTypeIterator::CompareIterators(
HLSLExternalSource &source, SourceLocation loc,
FlattenedTypeIterator &leftIter, FlattenedTypeIterator &rightIter) {
FlattenedTypeIterator::ComparisonResult result;
result.LeftCount = 0;
result.RightCount = 0;
result.AreElementsEqual = true; // Until proven otherwise.
result.CanConvertElements = true; // Until proven otherwise.
while (leftIter.hasCurrentElement() && rightIter.hasCurrentElement()) {
Expr *actualExpr = rightIter.getExprOrNull();
bool hasExpr = actualExpr != nullptr;
StmtExpr scratchExpr(nullptr, rightIter.getCurrentElement(), NoLoc, NoLoc);
StandardConversionSequence standard;
ExprResult convertedExpr;
if (!source.CanConvert(loc, hasExpr ? actualExpr : &scratchExpr,
leftIter.getCurrentElement(),
ExplicitConversionFalse, nullptr, &standard)) {
result.AreElementsEqual = false;
result.CanConvertElements = false;
break;
} else if (hasExpr && (standard.First != ICK_Identity ||
!standard.isIdentityConversion())) {
convertedExpr = source.getSema()->PerformImplicitConversion(
actualExpr, leftIter.getCurrentElement(), standard, Sema::AA_Casting,
Sema::CCK_ImplicitConversion);
}
if (rightIter.getCurrentElement()->getCanonicalTypeUnqualified() !=
leftIter.getCurrentElement()->getCanonicalTypeUnqualified()) {
result.AreElementsEqual = false;
}
unsigned int advance = std::min(leftIter.getCurrentElementSize(),
rightIter.getCurrentElementSize());
DXASSERT(advance > 0, "otherwise one iterator should report empty");
// If we need to apply conversions to the expressions, then advance a single
// element.
if (hasExpr && convertedExpr.isUsable()) {
rightIter.replaceExpr(convertedExpr.get());
advance = 1;
}
// If both elements are unbound arrays, break out or we'll never finish
if (leftIter.getCurrentElementKind() == FK_IncompleteArray &&
rightIter.getCurrentElementKind() == FK_IncompleteArray)
break;
leftIter.advanceCurrentElement(advance);
rightIter.advanceCurrentElement(advance);
result.LeftCount += advance;
result.RightCount += advance;
}
result.LeftCount += leftIter.countRemaining();
result.RightCount += rightIter.countRemaining();
return result;
}
FlattenedTypeIterator::ComparisonResult FlattenedTypeIterator::CompareTypes(
HLSLExternalSource &source, SourceLocation leftLoc, SourceLocation rightLoc,
QualType left, QualType right) {
FlattenedTypeIterator leftIter(leftLoc, left, source);
FlattenedTypeIterator rightIter(rightLoc, right, source);
return CompareIterators(source, leftLoc, leftIter, rightIter);
}
FlattenedTypeIterator::ComparisonResult
FlattenedTypeIterator::CompareTypesForInit(HLSLExternalSource &source,
QualType left, MultiExprArg args,
SourceLocation leftLoc,
SourceLocation rightLoc) {
FlattenedTypeIterator leftIter(leftLoc, left, source);
FlattenedTypeIterator rightIter(rightLoc, args, source);
return CompareIterators(source, leftLoc, leftIter, rightIter);
}
////////////////////////////////////////////////////////////////////////////////
// Attribute processing support. //
static int ValidateAttributeIntArg(Sema &S, const AttributeList &Attr,
unsigned index = 0) {
int64_t value = 0;
if (Attr.getNumArgs() > index) {
Expr *E = nullptr;
if (!Attr.isArgExpr(index)) {
// For case arg is constant variable.
IdentifierLoc *loc = Attr.getArgAsIdent(index);
VarDecl *decl = dyn_cast_or_null<VarDecl>(
S.LookupSingleName(S.getCurScope(), loc->Ident, loc->Loc,
Sema::LookupNameKind::LookupOrdinaryName));
if (!decl) {
S.Diag(Attr.getLoc(), diag::warn_hlsl_attribute_expects_uint_literal)
<< Attr.getName();
return value;
}
Expr *init = decl->getInit();
if (!init) {
S.Diag(Attr.getLoc(), diag::warn_hlsl_attribute_expects_uint_literal)
<< Attr.getName();
return value;
}
E = init;
} else
E = Attr.getArgAsExpr(index);
clang::APValue ArgNum;
bool displayError = false;
if (E->isTypeDependent() || E->isValueDependent() ||
!E->isCXX11ConstantExpr(S.Context, &ArgNum)) {
displayError = true;
} else {
if (ArgNum.isInt()) {
value = ArgNum.getInt().getSExtValue();
if (!(E->getType()->isIntegralOrEnumerationType()) || value < 0) {
S.Diag(Attr.getLoc(), diag::warn_hlsl_attribute_expects_uint_literal)
<< Attr.getName();
}
} else if (ArgNum.isFloat()) {
llvm::APSInt floatInt;
bool isPrecise;
if (ArgNum.getFloat().convertToInteger(
floatInt, llvm::APFloat::rmTowardZero, &isPrecise) ==
llvm::APFloat::opStatus::opOK) {
value = floatInt.getSExtValue();
if (value < 0) {
S.Diag(Attr.getLoc(),
diag::warn_hlsl_attribute_expects_uint_literal)
<< Attr.getName();
}
} else {
S.Diag(Attr.getLoc(), diag::warn_hlsl_attribute_expects_uint_literal)
<< Attr.getName();
}
} else {
displayError = true;
}
}
if (displayError) {
S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
<< Attr.getName() << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
}
}
return (int)value;
}
// TODO: support float arg directly.
static int ValidateAttributeFloatArg(Sema &S, const AttributeList &Attr,
unsigned index = 0) {
int value = 0;
if (Attr.getNumArgs() > index) {
Expr *E = Attr.getArgAsExpr(index);
if (FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) {
llvm::APFloat flV = FL->getValue();
if (flV.getSizeInBits(flV.getSemantics()) == 64) {
llvm::APInt intV = llvm::APInt::floatToBits(flV.convertToDouble());
value = intV.getLimitedValue();
} else {
llvm::APInt intV = llvm::APInt::floatToBits(flV.convertToFloat());
value = intV.getLimitedValue();
}
} else if (IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) {
llvm::APInt intV =
llvm::APInt::floatToBits((float)IL->getValue().getLimitedValue());
value = intV.getLimitedValue();
} else {
S.Diag(E->getLocStart(), diag::err_hlsl_attribute_expects_float_literal)
<< Attr.getName();
}
}
return value;
}
template <typename AttrType, typename EnumType,
bool (*ConvertStrToEnumType)(StringRef, EnumType &)>
static EnumType ValidateAttributeEnumArg(Sema &S, const AttributeList &Attr,
EnumType defaultValue,
unsigned index = 0,
bool isCaseSensitive = true) {
EnumType value(defaultValue);
StringRef Str = "";
SourceLocation ArgLoc;
if (Attr.getNumArgs() > index) {
if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc))
return value;
std::string str = isCaseSensitive ? Str.str() : Str.lower();
if (!ConvertStrToEnumType(str, value)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
<< Attr.getName() << Str << ArgLoc;
}
return value;
}
return value;
}
static Stmt *IgnoreParensAndDecay(Stmt *S) {
for (;;) {
switch (S->getStmtClass()) {
case Stmt::ParenExprClass:
S = cast<ParenExpr>(S)->getSubExpr();
break;
case Stmt::ImplicitCastExprClass: {
ImplicitCastExpr *castExpr = cast<ImplicitCastExpr>(S);
if (castExpr->getCastKind() != CK_ArrayToPointerDecay &&
castExpr->getCastKind() != CK_NoOp &&
castExpr->getCastKind() != CK_LValueToRValue) {
return S;
}
S = castExpr->getSubExpr();
} break;
default:
return S;
}
}
}
static Expr *ValidateClipPlaneArraySubscriptExpr(Sema &S,
ArraySubscriptExpr *E) {
DXASSERT_NOMSG(E != nullptr);
Expr *subscriptExpr = E->getIdx();
subscriptExpr = dyn_cast<Expr>(subscriptExpr->IgnoreParens());
if (subscriptExpr == nullptr || subscriptExpr->isTypeDependent() ||
subscriptExpr->isValueDependent() ||
!subscriptExpr->isCXX11ConstantExpr(S.Context)) {
S.Diag((subscriptExpr == nullptr) ? E->getLocStart()
: subscriptExpr->getLocStart(),
diag::err_hlsl_unsupported_clipplane_argument_subscript_expression);
return nullptr;
}
return E->getBase();
}
static bool IsValidClipPlaneDecl(Decl *D) {
Decl::Kind kind = D->getKind();
if (kind == Decl::Var) {
VarDecl *varDecl = cast<VarDecl>(D);
if (varDecl->getStorageClass() == StorageClass::SC_Static &&
varDecl->getType().isConstQualified()) {
return false;
}
return true;
} else if (kind == Decl::Field) {
return true;
}
return false;
}
static Expr *ValidateClipPlaneExpr(Sema &S, Expr *E) {
Stmt *cursor = E;
// clip plane expressions are a linear path, so no need to traverse the tree
// here.
while (cursor != nullptr) {
bool supported = true;
cursor = IgnoreParensAndDecay(cursor);
switch (cursor->getStmtClass()) {
case Stmt::ArraySubscriptExprClass:
cursor = ValidateClipPlaneArraySubscriptExpr(
S, cast<ArraySubscriptExpr>(cursor));
if (cursor == nullptr) {
// nullptr indicates failure, and the error message has already been
// printed out
return nullptr;
}
break;
case Stmt::DeclRefExprClass: {
DeclRefExpr *declRef = cast<DeclRefExpr>(cursor);
Decl *decl = declRef->getDecl();
supported = IsValidClipPlaneDecl(decl);
cursor = supported ? nullptr : cursor;
} break;
case Stmt::MemberExprClass: {
MemberExpr *member = cast<MemberExpr>(cursor);
supported = IsValidClipPlaneDecl(member->getMemberDecl());
cursor = supported ? member->getBase() : cursor;
} break;
default:
supported = false;
break;
}
if (!supported) {
DXASSERT(
cursor != nullptr,
"otherwise it was cleared when the supported flag was set to false");
S.Diag(cursor->getLocStart(),
diag::err_hlsl_unsupported_clipplane_argument_expression);
return nullptr;
}
}
// Validate that the type is a float4.
QualType expressionType = E->getType();
HLSLExternalSource *hlslSource = HLSLExternalSource::FromSema(&S);
if (hlslSource->GetTypeElementKind(expressionType) !=
ArBasicKind::AR_BASIC_FLOAT32 ||
hlslSource->GetTypeObjectKind(expressionType) !=
ArTypeObjectKind::AR_TOBJ_VECTOR) {
S.Diag(E->getLocStart(), diag::err_hlsl_unsupported_clipplane_argument_type)
<< expressionType;
return nullptr;
}
return E;
}
static Attr *HandleClipPlanes(Sema &S, const AttributeList &A) {
Expr *clipExprs[6];
for (unsigned int index = 0; index < _countof(clipExprs); index++) {
if (A.getNumArgs() <= index) {
clipExprs[index] = nullptr;
continue;
}
Expr *E = A.getArgAsExpr(index);
clipExprs[index] = ValidateClipPlaneExpr(S, E);
}
return ::new (S.Context)
HLSLClipPlanesAttr(A.getRange(), S.Context, clipExprs[0], clipExprs[1],
clipExprs[2], clipExprs[3], clipExprs[4], clipExprs[5],
A.getAttributeSpellingListIndex());
}
static Attr *HandleUnrollAttribute(Sema &S, const AttributeList &Attr) {
int argValue = ValidateAttributeIntArg(S, Attr);
// Default value is 0 (full unroll).
if (Attr.getNumArgs() == 0)
argValue = 0;
return ::new (S.Context) HLSLUnrollAttr(Attr.getRange(), S.Context, argValue,
Attr.getAttributeSpellingListIndex());
}
static void ValidateAttributeOnLoop(Sema &S, Stmt *St,
const AttributeList &Attr) {
Stmt::StmtClass stClass = St->getStmtClass();
if (stClass != Stmt::ForStmtClass && stClass != Stmt::WhileStmtClass &&
stClass != Stmt::DoStmtClass) {
S.Diag(Attr.getLoc(),
diag::warn_hlsl_unsupported_statement_for_loop_attribute)
<< Attr.getName();
}
}
static void ValidateAttributeOnSwitch(Sema &S, Stmt *St,
const AttributeList &Attr) {
Stmt::StmtClass stClass = St->getStmtClass();
if (stClass != Stmt::SwitchStmtClass) {
S.Diag(Attr.getLoc(),
diag::warn_hlsl_unsupported_statement_for_switch_attribute)
<< Attr.getName();
}
}
static void ValidateAttributeOnSwitchOrIf(Sema &S, Stmt *St,
const AttributeList &Attr) {
Stmt::StmtClass stClass = St->getStmtClass();
if (stClass != Stmt::SwitchStmtClass && stClass != Stmt::IfStmtClass) {
S.Diag(Attr.getLoc(),
diag::warn_hlsl_unsupported_statement_for_if_switch_attribute)
<< Attr.getName();
}
}
static StringRef ValidateAttributeStringArg(Sema &S, const AttributeList &A,
const char *values,
unsigned index = 0) {
// values is an optional comma-separated list of potential values.
if (A.getNumArgs() <= index)
return StringRef();
Expr *E = A.getArgAsExpr(index);
if (E->isTypeDependent() || E->isValueDependent() ||
E->getStmtClass() != Stmt::StringLiteralClass) {
S.Diag(E->getLocStart(), diag::err_hlsl_attribute_expects_string_literal)
<< A.getName();
return StringRef();
}
StringLiteral *sl = cast<StringLiteral>(E);
StringRef result = sl->getString();
// Return result with no additional validation.
if (values == nullptr) {
return result;
}
const char *value = values;
while (*value != '\0') {
DXASSERT_NOMSG(*value != ','); // no leading commas in values
// Look for a match.
const char *argData = result.data();
size_t argDataLen = result.size();
while (argDataLen != 0 && *argData == *value && *value) {
++argData;
++value;
--argDataLen;
}
// Match found if every input character matched.
if (argDataLen == 0 && (*value == '\0' || *value == ',')) {
return result;
}
// Move to next separator.
while (*value != '\0' && *value != ',') {
++value;
}
// Move to the start of the next item if any.
if (*value == ',')
value++;
}
DXASSERT_NOMSG(*value == '\0'); // no other terminating conditions
// No match found.
S.Diag(E->getLocStart(),
diag::err_hlsl_attribute_expects_string_literal_from_list)
<< A.getName() << values;
return StringRef();
}
static bool ValidateAttributeTargetIsFunction(Sema &S, Decl *D,
const AttributeList &A) {
if (D->isFunctionOrFunctionTemplate()) {
return true;
}
S.Diag(A.getLoc(), diag::err_hlsl_attribute_valid_on_function_only);
return false;
}
HLSLShaderAttr *ValidateShaderAttributes(Sema &S, Decl *D,
const AttributeList &A) {
Expr *ArgExpr = A.getArgAsExpr(0);
StringLiteral *Literal = dyn_cast<StringLiteral>(ArgExpr->IgnoreParenCasts());
DXIL::ShaderKind Stage = ShaderModel::KindFromFullName(Literal->getString());
if (Stage == DXIL::ShaderKind::Invalid) {
S.Diag(A.getLoc(),
diag::err_hlsl_attribute_expects_string_literal_from_list)
<< "'shader'"
<< "compute,vertex,pixel,hull,domain,geometry,raygeneration,"
"intersection,anyhit,closesthit,miss,callable,mesh,"
"amplification,node";
return nullptr; // don't create the attribute
}
HLSLShaderAttr *Existing = D->getAttr<HLSLShaderAttr>();
if (Existing) {
DXIL::ShaderKind NewStage =
ShaderModel::KindFromFullName(Existing->getStage());
if (Stage == NewStage)
return nullptr; // don't create, but no error.
else {
S.Diag(A.getLoc(), diag::err_hlsl_conflicting_shader_attribute)
<< ShaderModel::FullNameFromKind(Stage)
<< ShaderModel::FullNameFromKind(NewStage);
S.Diag(Existing->getLocation(), diag::note_conflicting_attribute);
return nullptr;
}
}
return ::new (S.Context)
HLSLShaderAttr(A.getRange(), S.Context, Literal->getString(),
A.getAttributeSpellingListIndex());
}
HLSLMaxRecordsAttr *ValidateMaxRecordsAttributes(Sema &S, Decl *D,
const AttributeList &A) {
HLSLMaxRecordsAttr *ExistingMRA = D->getAttr<HLSLMaxRecordsAttr>();
HLSLMaxRecordsSharedWithAttr *ExistingMRSWA =
D->getAttr<HLSLMaxRecordsSharedWithAttr>();
if (ExistingMRA || ExistingMRSWA) {
Expr *ArgExpr = A.getArgAsExpr(0);
IntegerLiteral *LiteralInt =
dyn_cast<IntegerLiteral>(ArgExpr->IgnoreParenCasts());
if (ExistingMRSWA || ExistingMRA->getMaxCount() != LiteralInt->getValue()) {
clang::SourceLocation Loc = ExistingMRA ? ExistingMRA->getLocation()
: ExistingMRSWA->getLocation();
S.Diag(A.getLoc(), diag::err_hlsl_maxrecord_attrs_on_same_arg);
S.Diag(Loc, diag::note_conflicting_attribute);
return nullptr;
}
}
return ::new (S.Context)
HLSLMaxRecordsAttr(A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
}
// This function validates the wave size attribute in a stand-alone way,
// by directly determining whether the attribute is well formed or
// allowed. It performs validation outside of the context
// of other attributes that could exist on this decl, and immediately
// upon detecting the attribute on the decl.
HLSLWaveSizeAttr *ValidateWaveSizeAttributes(Sema &S, Decl *D,
const AttributeList &A) {
// validate that the wavesize argument is a power of 2 between 4 and 128
// inclusive
HLSLWaveSizeAttr *pAttr = ::new (S.Context) HLSLWaveSizeAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A, 0),
ValidateAttributeIntArg(S, A, 1), ValidateAttributeIntArg(S, A, 2),
A.getAttributeSpellingListIndex());
pAttr->setSpelledArgsCount(A.getNumArgs());
hlsl::DxilWaveSize waveSize(pAttr->getMin(), pAttr->getMax(),
pAttr->getPreferred());
DxilWaveSize::ValidationResult validationResult = waveSize.Validate();
// WaveSize validation succeeds when not defined, but since we have an
// attribute, this means min was zero, which is invalid for min.
if (validationResult == DxilWaveSize::ValidationResult::Success &&
!waveSize.IsDefined())
validationResult = DxilWaveSize::ValidationResult::InvalidMin;
// It is invalid to explicitly specify degenerate cases.
if (A.getNumArgs() > 1 && waveSize.Max == 0)
validationResult = DxilWaveSize::ValidationResult::InvalidMax;
else if (A.getNumArgs() > 2 && waveSize.Preferred == 0)
validationResult = DxilWaveSize::ValidationResult::InvalidPreferred;
switch (validationResult) {
case DxilWaveSize::ValidationResult::Success:
break;
case DxilWaveSize::ValidationResult::InvalidMin:
case DxilWaveSize::ValidationResult::InvalidMax:
case DxilWaveSize::ValidationResult::InvalidPreferred:
case DxilWaveSize::ValidationResult::NoRangeOrMin:
S.Diag(A.getLoc(), diag::err_hlsl_wavesize_size)
<< DXIL::kMinWaveSize << DXIL::kMaxWaveSize;
break;
case DxilWaveSize::ValidationResult::MaxEqualsMin:
S.Diag(A.getLoc(), diag::warn_hlsl_wavesize_min_eq_max)
<< (unsigned)waveSize.Min << (unsigned)waveSize.Max;
break;
case DxilWaveSize::ValidationResult::MaxLessThanMin:
S.Diag(A.getLoc(), diag::err_hlsl_wavesize_min_geq_max)
<< (unsigned)waveSize.Min << (unsigned)waveSize.Max;
break;
case DxilWaveSize::ValidationResult::PreferredOutOfRange:
S.Diag(A.getLoc(), diag::err_hlsl_wavesize_pref_size_out_of_range)
<< (unsigned)waveSize.Preferred << (unsigned)waveSize.Min
<< (unsigned)waveSize.Max;
break;
case DxilWaveSize::ValidationResult::MaxOrPreferredWhenUndefined:
case DxilWaveSize::ValidationResult::PreferredWhenNoRange:
llvm_unreachable("Should have hit InvalidMax or InvalidPreferred instead.");
break;
default:
llvm_unreachable("Unknown ValidationResult");
}
// make sure there is not already an existing conflicting
// wavesize attribute on the decl
HLSLWaveSizeAttr *waveSizeAttr = D->getAttr<HLSLWaveSizeAttr>();
if (waveSizeAttr) {
if (waveSizeAttr->getMin() != pAttr->getMin() ||
waveSizeAttr->getMax() != pAttr->getMax() ||
waveSizeAttr->getPreferred() != pAttr->getPreferred()) {
S.Diag(A.getLoc(), diag::err_hlsl_conflicting_shader_attribute)
<< pAttr->getSpelling() << waveSizeAttr->getSpelling();
S.Diag(waveSizeAttr->getLocation(), diag::note_conflicting_attribute);
}
}
return pAttr;
}
HLSLMaxRecordsSharedWithAttr *
ValidateMaxRecordsSharedWithAttributes(Sema &S, Decl *D,
const AttributeList &A) {
if (!A.isArgIdent(0)) {
S.Diag(A.getLoc(), diag::err_attribute_argument_n_type)
<< A.getName() << 1 << AANT_ArgumentIdentifier;
return nullptr;
}
IdentifierInfo *II = A.getArgAsIdent(0)->Ident;
StringRef sharedName = II->getName();
HLSLMaxRecordsAttr *ExistingMRA = D->getAttr<HLSLMaxRecordsAttr>();
HLSLMaxRecordsSharedWithAttr *ExistingMRSWA =
D->getAttr<HLSLMaxRecordsSharedWithAttr>();
ParmVarDecl *pPVD = cast<ParmVarDecl>(D);
StringRef ArgName = pPVD->getName();
// check that this is the only MaxRecords* attribute for this parameter
if (ExistingMRA || ExistingMRSWA) {
// only emit a diagnostic if the argument to the attribute differs from the
// current attribute when an extra MRSWA attribute is attached to this
// parameter
if (ExistingMRA ||
sharedName !=
ExistingMRSWA->getName()
->getName()) { // won't null deref, because short-circuit
clang::SourceLocation Loc = ExistingMRA ? ExistingMRA->getLocation()
: ExistingMRSWA->getLocation();
S.Diag(A.getLoc(), diag::err_hlsl_maxrecord_attrs_on_same_arg);
S.Diag(Loc, diag::note_conflicting_attribute);
return nullptr;
}
}
// check that the parameter that MaxRecordsSharedWith is targeting isn't
// applied to that exact parameter
if (sharedName == ArgName) {
S.Diag(A.getLoc(), diag::err_hlsl_maxrecordssharedwith_references_itself);
return nullptr;
}
return ::new (S.Context) HLSLMaxRecordsSharedWithAttr(
A.getRange(), S.Context, II, A.getAttributeSpellingListIndex());
}
void Sema::DiagnoseHLSLDeclAttr(const Decl *D, const Attr *A) {
HLSLExternalSource *ExtSource = HLSLExternalSource::FromSema(this);
if (const HLSLGloballyCoherentAttr *HLSLGCAttr =
dyn_cast<HLSLGloballyCoherentAttr>(A)) {
const ValueDecl *TD = cast<ValueDecl>(D);
if (TD->getType()->isDependentType())
return;
QualType DeclType = TD->getType();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TD))
DeclType = FD->getReturnType();
while (DeclType->isArrayType())
DeclType = QualType(DeclType->getArrayElementTypeNoTypeQual(), 0);
if (ExtSource->GetTypeObjectKind(DeclType) != AR_TOBJ_OBJECT ||
(hlsl::GetResourceClassForType(getASTContext(), DeclType) !=
hlsl::DXIL::ResourceClass::UAV &&
GetNodeIOType(DeclType) !=
DXIL::NodeIOKind::RWDispatchNodeInputRecord)) {
Diag(A->getLocation(), diag::err_hlsl_varmodifierna_decltype)
<< A << DeclType->getCanonicalTypeUnqualified() << A->getRange();
Diag(A->getLocation(), diag::note_hlsl_globallycoherent_applies_to)
<< A << A->getRange();
}
return;
}
}
void Sema::DiagnoseGloballyCoherentMismatch(const Expr *SrcExpr,
QualType TargetType,
SourceLocation Loc) {
QualType SrcTy = SrcExpr->getType();
QualType DstTy = TargetType;
if (SrcTy->isArrayType() && DstTy->isArrayType()) {
SrcTy = QualType(SrcTy->getBaseElementTypeUnsafe(), 0);
DstTy = QualType(DstTy->getBaseElementTypeUnsafe(), 0);
}
if ((hlsl::IsHLSLResourceType(DstTy) &&
!hlsl::IsHLSLDynamicResourceType(SrcTy)) ||
GetNodeIOType(DstTy) == DXIL::NodeIOKind::RWDispatchNodeInputRecord) {
bool SrcGL = hlsl::HasHLSLGloballyCoherent(SrcTy);
bool DstGL = hlsl::HasHLSLGloballyCoherent(DstTy);
if (SrcGL != DstGL)
Diag(Loc, diag::warn_hlsl_impcast_glc_mismatch)
<< SrcExpr->getType() << TargetType << /*loses|adds*/ DstGL;
}
}
void ValidateDispatchGridValues(DiagnosticsEngine &Diags,
const AttributeList &A, Attr *declAttr) {
unsigned x = 1, y = 1, z = 1;
if (HLSLNodeDispatchGridAttr *pA =
dyn_cast<HLSLNodeDispatchGridAttr>(declAttr)) {
x = pA->getX();
y = pA->getY();
z = pA->getZ();
} else if (HLSLNodeMaxDispatchGridAttr *pA =
dyn_cast<HLSLNodeMaxDispatchGridAttr>(declAttr)) {
x = pA->getX();
y = pA->getY();
z = pA->getZ();
} else {
llvm_unreachable("ValidateDispatchGridValues() called for wrong attribute");
}
static const unsigned MaxComponentValue = 65535; // 2^16 - 1
static const unsigned MaxProductValue = 16777215; // 2^24 - 1
// If a component is out of range, we reset it to 0 to avoid also generating
// a secondary error if the product would be out of range
if (x < 1 || x > MaxComponentValue) {
Diags.Report(A.getArgAsExpr(0)->getExprLoc(),
diag::err_hlsl_dispatchgrid_component)
<< A.getName() << "X" << A.getRange();
x = 0;
}
if (y < 1 || y > MaxComponentValue) {
Diags.Report(A.getArgAsExpr(1)->getExprLoc(),
diag::err_hlsl_dispatchgrid_component)
<< A.getName() << "Y" << A.getRange();
y = 0;
}
if (z < 1 || z > MaxComponentValue) {
Diags.Report(A.getArgAsExpr(2)->getExprLoc(),
diag::err_hlsl_dispatchgrid_component)
<< A.getName() << "Z" << A.getRange();
z = 0;
}
uint64_t product = (uint64_t)x * (uint64_t)y * (uint64_t)z;
if (product > MaxProductValue)
Diags.Report(A.getLoc(), diag::err_hlsl_dispatchgrid_product)
<< A.getName() << A.getRange();
}
void hlsl::HandleDeclAttributeForHLSL(Sema &S, Decl *D, const AttributeList &A,
bool &Handled) {
DXASSERT_NOMSG(D != nullptr);
DXASSERT_NOMSG(!A.isInvalid());
Attr *declAttr = nullptr;
Handled = true;
switch (A.getKind()) {
case AttributeList::AT_HLSLIn:
declAttr = ::new (S.Context)
HLSLInAttr(A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLOut:
declAttr = ::new (S.Context)
HLSLOutAttr(A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLInOut:
declAttr = ::new (S.Context) HLSLInOutAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLMaybeUnused:
declAttr = ::new (S.Context) HLSLMaybeUnusedAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNoInterpolation:
declAttr = ::new (S.Context) HLSLNoInterpolationAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLLinear:
case AttributeList::AT_HLSLCenter:
declAttr = ::new (S.Context) HLSLLinearAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNoPerspective:
declAttr = ::new (S.Context) HLSLNoPerspectiveAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLSample:
declAttr = ::new (S.Context) HLSLSampleAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLCentroid:
declAttr = ::new (S.Context) HLSLCentroidAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLPrecise:
declAttr = ::new (S.Context) HLSLPreciseAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLShared:
declAttr = ::new (S.Context) HLSLSharedAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLGroupShared:
declAttr = ::new (S.Context) HLSLGroupSharedAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
VD->setType(
S.Context.getAddrSpaceQualType(VD->getType(), DXIL::kTGSMAddrSpace));
}
break;
case AttributeList::AT_HLSLUniform:
declAttr = ::new (S.Context) HLSLUniformAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLUnorm:
declAttr = ::new (S.Context) HLSLUnormAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLSnorm:
declAttr = ::new (S.Context) HLSLSnormAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLPoint:
declAttr = ::new (S.Context) HLSLPointAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLLine:
declAttr = ::new (S.Context) HLSLLineAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLLineAdj:
declAttr = ::new (S.Context) HLSLLineAdjAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLTriangle:
declAttr = ::new (S.Context) HLSLTriangleAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLTriangleAdj:
declAttr = ::new (S.Context) HLSLTriangleAdjAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLGloballyCoherent:
declAttr = ::new (S.Context) HLSLGloballyCoherentAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLIndices:
declAttr = ::new (S.Context) HLSLIndicesAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLVertices:
declAttr = ::new (S.Context) HLSLVerticesAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLPrimitives:
declAttr = ::new (S.Context) HLSLPrimitivesAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLPayload:
declAttr = ::new (S.Context) HLSLPayloadAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLRayPayload:
declAttr = ::new (S.Context) HLSLRayPayloadAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLMaxRecords:
declAttr = ValidateMaxRecordsAttributes(S, D, A);
if (!declAttr) {
return;
}
break;
case AttributeList::AT_HLSLMaxRecordsSharedWith: {
declAttr = ValidateMaxRecordsSharedWithAttributes(S, D, A);
if (!declAttr) {
return;
}
break;
}
case AttributeList::AT_HLSLNodeArraySize: {
declAttr = ::new (S.Context) HLSLNodeArraySizeAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
}
case AttributeList::AT_HLSLAllowSparseNodes:
declAttr = ::new (S.Context) HLSLAllowSparseNodesAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLUnboundedSparseNodes:
declAttr = ::new (S.Context) HLSLUnboundedSparseNodesAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeId:
declAttr = ::new (S.Context) HLSLNodeIdAttr(
A.getRange(), S.Context, ValidateAttributeStringArg(S, A, nullptr, 0),
ValidateAttributeIntArg(S, A, 1), A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeTrackRWInputSharing:
declAttr = ::new (S.Context) HLSLNodeTrackRWInputSharingAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
// SPIRV Change Starts
case AttributeList::AT_VKDecorateIdExt: {
if (A.getNumArgs() == 0 || !A.getArg(0).is<clang::Expr *>()) {
Handled = false;
break;
}
unsigned decoration = 0;
if (IntegerLiteral *decorationAsLiteral =
dyn_cast<IntegerLiteral>(A.getArg(0).get<clang::Expr *>())) {
decoration = decorationAsLiteral->getValue().getZExtValue();
} else {
Handled = false;
break;
}
llvm::SmallVector<Expr *, 2> args;
for (unsigned i = 1; i < A.getNumArgs(); ++i) {
if (!A.getArg(i).is<clang::Expr *>()) {
Handled = false;
break;
}
args.push_back(A.getArg(i).get<clang::Expr *>());
}
if (!Handled)
break;
declAttr = ::new (S.Context)
VKDecorateIdExtAttr(A.getRange(), S.Context, decoration, args.data(),
args.size(), A.getAttributeSpellingListIndex());
} break;
// SPIRV Change Ends
default:
Handled = false;
break;
}
if (declAttr != nullptr) {
S.DiagnoseHLSLDeclAttr(D, declAttr);
DXASSERT_NOMSG(Handled);
D->addAttr(declAttr);
return;
}
Handled = true;
switch (A.getKind()) {
// These apply to statements, not declarations. The warning messages clarify
// this properly.
case AttributeList::AT_HLSLUnroll:
case AttributeList::AT_HLSLAllowUAVCondition:
case AttributeList::AT_HLSLLoop:
case AttributeList::AT_HLSLFastOpt:
S.Diag(A.getLoc(), diag::warn_hlsl_unsupported_statement_for_loop_attribute)
<< A.getName();
return;
case AttributeList::AT_HLSLBranch:
case AttributeList::AT_HLSLFlatten:
S.Diag(A.getLoc(),
diag::warn_hlsl_unsupported_statement_for_if_switch_attribute)
<< A.getName();
return;
case AttributeList::AT_HLSLForceCase:
case AttributeList::AT_HLSLCall:
S.Diag(A.getLoc(),
diag::warn_hlsl_unsupported_statement_for_switch_attribute)
<< A.getName();
return;
// These are the cases that actually apply to declarations.
case AttributeList::AT_HLSLClipPlanes:
declAttr = HandleClipPlanes(S, A);
break;
case AttributeList::AT_HLSLDomain:
declAttr = ::new (S.Context)
HLSLDomainAttr(A.getRange(), S.Context,
ValidateAttributeStringArg(S, A, "tri,quad,isoline"),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLEarlyDepthStencil:
declAttr = ::new (S.Context) HLSLEarlyDepthStencilAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLInstance:
declAttr = ::new (S.Context)
HLSLInstanceAttr(A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLMaxTessFactor:
declAttr = ::new (S.Context) HLSLMaxTessFactorAttr(
A.getRange(), S.Context, ValidateAttributeFloatArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNumThreads: {
int X = ValidateAttributeIntArg(S, A, 0);
int Y = ValidateAttributeIntArg(S, A, 1);
int Z = ValidateAttributeIntArg(S, A, 2);
int N = X * Y * Z;
if (N > 0 && N <= 1024) {
auto numThreads = ::new (S.Context) HLSLNumThreadsAttr(
A.getRange(), S.Context, X, Y, Z, A.getAttributeSpellingListIndex());
declAttr = numThreads;
} else {
// If the number of threads is invalid, diagnose and drop the attribute.
S.Diags.Report(A.getLoc(), diag::warn_hlsl_numthreads_group_size)
<< N << X << Y << Z << A.getRange();
return;
}
break;
}
case AttributeList::AT_HLSLRootSignature:
declAttr = ::new (S.Context) HLSLRootSignatureAttr(
A.getRange(), S.Context,
ValidateAttributeStringArg(S, A, /*validate strings*/ nullptr),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLOutputControlPoints: {
// Hull shader output must be between 1 and 32 control points.
int outputControlPoints = ValidateAttributeIntArg(S, A);
if (outputControlPoints < 1 || outputControlPoints > 32) {
S.Diags.Report(A.getLoc(), diag::err_hlsl_controlpoints_size)
<< outputControlPoints << A.getRange();
return;
}
declAttr = ::new (S.Context) HLSLOutputControlPointsAttr(
A.getRange(), S.Context, outputControlPoints,
A.getAttributeSpellingListIndex());
break;
}
case AttributeList::AT_HLSLOutputTopology:
declAttr = ::new (S.Context) HLSLOutputTopologyAttr(
A.getRange(), S.Context,
ValidateAttributeStringArg(
S, A, "point,line,triangle,triangle_cw,triangle_ccw"),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLPartitioning:
declAttr = ::new (S.Context) HLSLPartitioningAttr(
A.getRange(), S.Context,
ValidateAttributeStringArg(
S, A, "integer,fractional_even,fractional_odd,pow2"),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLPatchConstantFunc:
declAttr = ::new (S.Context) HLSLPatchConstantFuncAttr(
A.getRange(), S.Context, ValidateAttributeStringArg(S, A, nullptr),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLShader:
declAttr = ValidateShaderAttributes(S, D, A);
if (!declAttr) {
Handled = true;
return;
}
break;
case AttributeList::AT_HLSLMaxVertexCount:
declAttr = ::new (S.Context) HLSLMaxVertexCountAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLExperimental:
declAttr = ::new (S.Context) HLSLExperimentalAttr(
A.getRange(), S.Context, ValidateAttributeStringArg(S, A, nullptr, 0),
ValidateAttributeStringArg(S, A, nullptr, 1),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_NoInline:
declAttr = ::new (S.Context) NoInlineAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLExport:
declAttr = ::new (S.Context) HLSLExportAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLWaveSensitive:
declAttr = ::new (S.Context) HLSLWaveSensitiveAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLWaveSize:
declAttr = ValidateWaveSizeAttributes(S, D, A);
break;
case AttributeList::AT_HLSLWaveOpsIncludeHelperLanes:
declAttr = ::new (S.Context) HLSLWaveOpsIncludeHelperLanesAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeLaunch:
declAttr = ::new (S.Context) HLSLNodeLaunchAttr(
A.getRange(), S.Context,
ValidateAttributeStringArg(S, A, "broadcasting,coalescing,thread"),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeIsProgramEntry:
declAttr = ::new (S.Context) HLSLNodeIsProgramEntryAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeTrackRWInputSharing:
declAttr = ::new (S.Context) HLSLNodeTrackRWInputSharingAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeLocalRootArgumentsTableIndex:
declAttr = ::new (S.Context) HLSLNodeLocalRootArgumentsTableIndexAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeShareInputOf:
declAttr = ::new (S.Context) HLSLNodeShareInputOfAttr(
A.getRange(), S.Context, ValidateAttributeStringArg(S, A, nullptr, 0),
ValidateAttributeIntArg(S, A, 1), A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLNodeDispatchGrid:
declAttr = ::new (S.Context) HLSLNodeDispatchGridAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
ValidateAttributeIntArg(S, A, 1), ValidateAttributeIntArg(S, A, 2),
A.getAttributeSpellingListIndex());
ValidateDispatchGridValues(S.Diags, A, declAttr);
break;
case AttributeList::AT_HLSLNodeMaxDispatchGrid:
declAttr = ::new (S.Context) HLSLNodeMaxDispatchGridAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
ValidateAttributeIntArg(S, A, 1), ValidateAttributeIntArg(S, A, 2),
A.getAttributeSpellingListIndex());
ValidateDispatchGridValues(S.Diags, A, declAttr);
break;
case AttributeList::AT_HLSLNodeMaxRecursionDepth:
declAttr = ::new (S.Context) HLSLNodeMaxRecursionDepthAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
if (cast<HLSLNodeMaxRecursionDepthAttr>(declAttr)->getCount() > 32)
S.Diags.Report(declAttr->getLocation(),
diag::err_hlsl_maxrecursiondepth_exceeded)
<< declAttr->getRange();
break;
default:
Handled = false;
break; // SPIRV Change: was return;
}
if (declAttr != nullptr) {
DXASSERT_NOMSG(Handled);
D->addAttr(declAttr);
// The attribute has been set but will have no effect. Validation will emit
// a diagnostic and prevent code generation.
ValidateAttributeTargetIsFunction(S, D, A);
return; // SPIRV Change
}
// SPIRV Change Starts
Handled = true;
switch (A.getKind()) {
case AttributeList::AT_VKBuiltIn:
declAttr = ::new (S.Context)
VKBuiltInAttr(A.getRange(), S.Context,
ValidateAttributeStringArg(
S, A,
"PointSize,HelperInvocation,BaseVertex,BaseInstance,"
"DrawIndex,DeviceIndex,ViewportMaskNV"),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKExtBuiltinInput:
declAttr = ::new (S.Context) VKExtBuiltinInputAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKExtBuiltinOutput:
declAttr = ::new (S.Context) VKExtBuiltinOutputAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKLocation:
declAttr = ::new (S.Context)
VKLocationAttr(A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKIndex:
declAttr = ::new (S.Context)
VKIndexAttr(A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKBinding:
declAttr = ::new (S.Context) VKBindingAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getNumArgs() < 2 ? INT_MIN : ValidateAttributeIntArg(S, A, 1),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKCounterBinding:
declAttr = ::new (S.Context) VKCounterBindingAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKPushConstant:
declAttr = ::new (S.Context) VKPushConstantAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKOffset:
declAttr = ::new (S.Context)
VKOffsetAttr(A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKCombinedImageSampler:
declAttr = ::new (S.Context) VKCombinedImageSamplerAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKImageFormat: {
VKImageFormatAttr::ImageFormatType Kind = ValidateAttributeEnumArg<
VKImageFormatAttr, VKImageFormatAttr::ImageFormatType,
VKImageFormatAttr::ConvertStrToImageFormatType>(
S, A, VKImageFormatAttr::ImageFormatType::unknown);
declAttr = ::new (S.Context) VKImageFormatAttr(
A.getRange(), S.Context, Kind, A.getAttributeSpellingListIndex());
break;
}
case AttributeList::AT_VKInputAttachmentIndex:
declAttr = ::new (S.Context) VKInputAttachmentIndexAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKConstantId:
declAttr = ::new (S.Context)
VKConstantIdAttr(A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKPostDepthCoverage:
declAttr = ::new (S.Context) VKPostDepthCoverageAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKEarlyAndLateTests:
declAttr = ::new (S.Context) VKEarlyAndLateTestsAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKDepthUnchanged:
declAttr = ::new (S.Context) VKDepthUnchangedAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKStencilRefUnchangedFront:
declAttr = ::new (S.Context) VKStencilRefUnchangedFrontAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKStencilRefGreaterEqualFront:
declAttr = ::new (S.Context) VKStencilRefGreaterEqualFrontAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKStencilRefLessEqualFront:
declAttr = ::new (S.Context) VKStencilRefLessEqualFrontAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKStencilRefUnchangedBack:
declAttr = ::new (S.Context) VKStencilRefUnchangedBackAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKStencilRefGreaterEqualBack:
declAttr = ::new (S.Context) VKStencilRefGreaterEqualBackAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKStencilRefLessEqualBack:
declAttr = ::new (S.Context) VKStencilRefLessEqualBackAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKShaderRecordNV:
declAttr = ::new (S.Context) VKShaderRecordNVAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKShaderRecordEXT:
declAttr = ::new (S.Context) VKShaderRecordEXTAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKCapabilityExt:
declAttr = ::new (S.Context) VKCapabilityExtAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKExtensionExt:
declAttr = ::new (S.Context) VKExtensionExtAttr(
A.getRange(), S.Context, ValidateAttributeStringArg(S, A, nullptr),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKSpvExecutionMode:
declAttr = ::new (S.Context) VKSpvExecutionModeAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKInstructionExt:
declAttr = ::new (S.Context) VKInstructionExtAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
ValidateAttributeStringArg(S, A, nullptr, 1),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKLiteralExt:
declAttr = ::new (S.Context) VKLiteralExtAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKReferenceExt:
declAttr = ::new (S.Context) VKReferenceExtAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKDecorateExt: {
unsigned decoration = unsigned(ValidateAttributeIntArg(S, A));
llvm::SmallVector<unsigned, 2> args;
for (unsigned i = 1; i < A.getNumArgs(); ++i) {
args.push_back(unsigned(ValidateAttributeIntArg(S, A, i)));
}
// Note that `llvm::SmallVector<unsigned, 2> args` will be destroyed at
// the end of this function. However, VKDecorateExtAttr() constructor
// allocate a new integer array internally for args. It does not create
// a dangling pointer.
declAttr = ::new (S.Context)
VKDecorateExtAttr(A.getRange(), S.Context, decoration, args.data(),
args.size(), A.getAttributeSpellingListIndex());
} break;
case AttributeList::AT_VKDecorateStringExt: {
unsigned decoration = unsigned(ValidateAttributeIntArg(S, A));
llvm::SmallVector<std::string, 2> args;
for (unsigned i = 1; i < A.getNumArgs(); ++i) {
args.push_back(ValidateAttributeStringArg(S, A, nullptr, i));
}
// Note that `llvm::SmallVector<std::string, 2> args` will be destroyed
// at the end of this function. However, VKDecorateExtAttr() constructor
// allocate a new integer array internally for args. It does not create
// a dangling pointer.
declAttr = ::new (S.Context) VKDecorateStringExtAttr(
A.getRange(), S.Context, decoration, args.data(), args.size(),
A.getAttributeSpellingListIndex());
} break;
case AttributeList::AT_VKStorageClassExt:
declAttr = ::new (S.Context) VKStorageClassExtAttr(
A.getRange(), S.Context, unsigned(ValidateAttributeIntArg(S, A)),
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_VKTypeDefExt:
declAttr = ::new (S.Context) VKTypeDefExtAttr(
A.getRange(), S.Context, unsigned(ValidateAttributeIntArg(S, A)),
unsigned(ValidateAttributeIntArg(S, A, 1)),
A.getAttributeSpellingListIndex());
break;
default:
Handled = false;
return;
}
if (declAttr != nullptr) {
DXASSERT_NOMSG(Handled);
D->addAttr(declAttr);
}
// SPIRV Change Ends
}
/// <summary>Processes an attribute for a statement.</summary>
/// <param name="S">Sema with context.</param>
/// <param name="St">Statement annotated.</param>
/// <param name="A">Single parsed attribute to process.</param>
/// <param name="Range">Range of all attribute lists (useful for FixIts to
/// suggest inclusions).</param> <param name="Handled">After execution, whether
/// this was recognized and handled.</param> <returns>An attribute instance if
/// processed, nullptr if not recognized or an error was found.</returns>
Attr *hlsl::ProcessStmtAttributeForHLSL(Sema &S, Stmt *St,
const AttributeList &A,
SourceRange Range, bool &Handled) {
// | Construct | Allowed Attributes |
// +------------------+--------------------------------------------+
// | for, while, do | loop, fastopt, unroll, allow_uav_condition |
// | if | branch, flatten |
// | switch | branch, flatten, forcecase, call |
Attr *result = nullptr;
Handled = true;
// SPIRV Change Starts
if (A.hasScope() && A.getScopeName()->getName().equals("vk")) {
switch (A.getKind()) {
case AttributeList::AT_VKCapabilityExt:
return ::new (S.Context) VKCapabilityExtAttr(
A.getRange(), S.Context, ValidateAttributeIntArg(S, A),
A.getAttributeSpellingListIndex());
case AttributeList::AT_VKExtensionExt:
return ::new (S.Context) VKExtensionExtAttr(
A.getRange(), S.Context, ValidateAttributeStringArg(S, A, nullptr),
A.getAttributeSpellingListIndex());
default:
Handled = false;
return nullptr;
}
}
// SPIRV Change Ends
switch (A.getKind()) {
case AttributeList::AT_HLSLUnroll:
ValidateAttributeOnLoop(S, St, A);
result = HandleUnrollAttribute(S, A);
break;
case AttributeList::AT_HLSLAllowUAVCondition:
ValidateAttributeOnLoop(S, St, A);
result = ::new (S.Context) HLSLAllowUAVConditionAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLLoop:
ValidateAttributeOnLoop(S, St, A);
result = ::new (S.Context) HLSLLoopAttr(A.getRange(), S.Context,
A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLFastOpt:
ValidateAttributeOnLoop(S, St, A);
result = ::new (S.Context) HLSLFastOptAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLBranch:
ValidateAttributeOnSwitchOrIf(S, St, A);
result = ::new (S.Context) HLSLBranchAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLFlatten:
ValidateAttributeOnSwitchOrIf(S, St, A);
result = ::new (S.Context) HLSLFlattenAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLForceCase:
ValidateAttributeOnSwitch(S, St, A);
result = ::new (S.Context) HLSLForceCaseAttr(
A.getRange(), S.Context, A.getAttributeSpellingListIndex());
break;
case AttributeList::AT_HLSLCall:
ValidateAttributeOnSwitch(S, St, A);
result = ::new (S.Context) HLSLCallAttr(A.getRange(), S.Context,
A.getAttributeSpellingListIndex());
break;
default:
Handled = false;
break;
}
return result;
}
////////////////////////////////////////////////////////////////////////////////
// Implementation of Sema members. //
Decl *Sema::ActOnStartHLSLBuffer(
Scope *bufferScope, bool cbuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
std::vector<hlsl::UnusualAnnotation *> &BufferAttributes,
SourceLocation LBrace) {
// For anonymous namespace, take the location of the left brace.
DeclContext *lexicalParent = getCurLexicalContext();
clang::HLSLBufferDecl *result = HLSLBufferDecl::Create(
Context, lexicalParent, cbuffer, /*isConstantBufferView*/ false, KwLoc,
Ident, IdentLoc, BufferAttributes, LBrace);
// Keep track of the currently active buffer.
HLSLBuffers.push_back(result);
// Validate unusual annotations and emit diagnostics.
DiagnoseUnusualAnnotationsForHLSL(*this, BufferAttributes);
auto &&unusualIter = BufferAttributes.begin();
auto &&unusualEnd = BufferAttributes.end();
char expectedRegisterType = cbuffer ? 'b' : 't';
for (; unusualIter != unusualEnd; ++unusualIter) {
switch ((*unusualIter)->getKind()) {
case hlsl::UnusualAnnotation::UA_ConstantPacking: {
hlsl::ConstantPacking *constantPacking =
cast<hlsl::ConstantPacking>(*unusualIter);
Diag(constantPacking->Loc, diag::err_hlsl_unsupported_buffer_packoffset);
break;
}
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
hlsl::RegisterAssignment *registerAssignment =
cast<hlsl::RegisterAssignment>(*unusualIter);
if (registerAssignment->isSpaceOnly())
continue;
if (registerAssignment->RegisterType != expectedRegisterType &&
registerAssignment->RegisterType != toupper(expectedRegisterType)) {
Diag(registerAssignment->Loc, diag::err_hlsl_incorrect_bind_semantic)
<< (cbuffer ? "'b'" : "'t'");
} else if (registerAssignment->ShaderProfile.size() > 0) {
Diag(registerAssignment->Loc,
diag::err_hlsl_unsupported_buffer_slot_target_specific);
}
break;
}
case hlsl::UnusualAnnotation::UA_SemanticDecl: {
// Ignore semantic declarations.
break;
}
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier: {
hlsl::PayloadAccessAnnotation *annotation =
cast<hlsl::PayloadAccessAnnotation>(*unusualIter);
Diag(annotation->Loc,
diag::err_hlsl_unsupported_payload_access_qualifier);
break;
}
}
}
PushOnScopeChains(result, bufferScope);
PushDeclContext(bufferScope, result);
ActOnDocumentableDecl(result);
return result;
}
void Sema::ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace) {
DXASSERT_NOMSG(Dcl != nullptr);
DXASSERT(Dcl == HLSLBuffers.back(), "otherwise push/pop is incorrect");
auto *BufDecl = cast<HLSLBufferDecl>(Dcl);
BufDecl->setRBraceLoc(RBrace);
HLSLBuffers.pop_back();
// Validate packoffset.
llvm::SmallVector<std::pair<VarDecl *, unsigned>, 4> PackOffsetVec;
bool HasPackOffset = false;
bool HasNonPackOffset = false;
for (auto *Field : BufDecl->decls()) {
VarDecl *Var = dyn_cast<VarDecl>(Field);
if (!Var)
continue;
unsigned Offset = UINT_MAX;
for (const hlsl::UnusualAnnotation *it : Var->getUnusualAnnotations()) {
if (it->getKind() == hlsl::UnusualAnnotation::UA_ConstantPacking) {
const hlsl::ConstantPacking *packOffset =
cast<hlsl::ConstantPacking>(it);
unsigned CBufferOffset = packOffset->Subcomponent << 2;
CBufferOffset += packOffset->ComponentOffset;
// Change to bits.
Offset = CBufferOffset << 5;
HasPackOffset = true;
}
}
PackOffsetVec.emplace_back(Var, Offset);
if (Offset == UINT_MAX) {
HasNonPackOffset = true;
}
}
if (HasPackOffset && HasNonPackOffset) {
Diag(BufDecl->getLocation(), diag::warn_hlsl_packoffset_mix);
} else if (HasPackOffset) {
// Make sure no overlap in packoffset.
llvm::SmallDenseMap<VarDecl *, std::pair<unsigned, unsigned>>
PackOffsetRanges;
for (auto &Pair : PackOffsetVec) {
VarDecl *Var = Pair.first;
unsigned Size = Context.getTypeSize(Var->getType());
unsigned Begin = Pair.second;
unsigned End = Begin + Size;
for (auto &Range : PackOffsetRanges) {
VarDecl *OtherVar = Range.first;
unsigned OtherBegin = Range.second.first;
unsigned OtherEnd = Range.second.second;
if (Begin < OtherEnd && OtherBegin < Begin) {
Diag(Var->getLocation(), diag::err_hlsl_packoffset_overlap)
<< Var << OtherVar;
break;
} else if (OtherBegin < End && Begin < OtherBegin) {
Diag(Var->getLocation(), diag::err_hlsl_packoffset_overlap)
<< Var << OtherVar;
break;
}
}
PackOffsetRanges[Var] = std::make_pair(Begin, End);
}
}
PopDeclContext();
}
Decl *Sema::getActiveHLSLBuffer() const {
return HLSLBuffers.empty() ? nullptr : HLSLBuffers.back();
}
bool Sema::IsOnHLSLBufferView() {
// nullptr will not pushed for cbuffer.
return !HLSLBuffers.empty() && getActiveHLSLBuffer() == nullptr;
}
HLSLBufferDecl::HLSLBufferDecl(
DeclContext *DC, bool cbuffer, bool cbufferView, SourceLocation KwLoc,
IdentifierInfo *Id, SourceLocation IdLoc,
std::vector<hlsl::UnusualAnnotation *> &BufferAttributes,
SourceLocation LBrace)
: NamedDecl(Decl::HLSLBuffer, DC, IdLoc, DeclarationName(Id)),
DeclContext(Decl::HLSLBuffer), LBraceLoc(LBrace), KwLoc(KwLoc),
IsCBuffer(cbuffer), IsConstantBufferView(cbufferView) {
if (!BufferAttributes.empty()) {
setUnusualAnnotations(UnusualAnnotation::CopyToASTContextArray(
getASTContext(), BufferAttributes.data(), BufferAttributes.size()));
}
}
HLSLBufferDecl *
HLSLBufferDecl::Create(ASTContext &C, DeclContext *lexicalParent, bool cbuffer,
bool constantbuffer, SourceLocation KwLoc,
IdentifierInfo *Id, SourceLocation IdLoc,
std::vector<hlsl::UnusualAnnotation *> &BufferAttributes,
SourceLocation LBrace) {
DeclContext *DC = C.getTranslationUnitDecl();
HLSLBufferDecl *result = ::new (C) HLSLBufferDecl(
DC, cbuffer, constantbuffer, KwLoc, Id, IdLoc, BufferAttributes, LBrace);
if (DC != lexicalParent) {
result->setLexicalDeclContext(lexicalParent);
}
return result;
}
const char *HLSLBufferDecl::getDeclKindName() const {
static const char *HLSLBufferNames[] = {"tbuffer", "cbuffer", "TextureBuffer",
"ConstantBuffer"};
unsigned index = (unsigned)isCBuffer() | (isConstantBufferView()) << 1;
return HLSLBufferNames[index];
}
void Sema::TransferUnusualAttributes(Declarator &D, NamedDecl *NewDecl) {
assert(NewDecl != nullptr);
if (!getLangOpts().HLSL) {
return;
}
if (!D.UnusualAnnotations.empty()) {
NewDecl->setUnusualAnnotations(UnusualAnnotation::CopyToASTContextArray(
getASTContext(), D.UnusualAnnotations.data(),
D.UnusualAnnotations.size()));
D.UnusualAnnotations.clear();
}
}
/// Checks whether a usage attribute is compatible with those seen so far and
/// maintains history.
static bool IsUsageAttributeCompatible(AttributeList::Kind kind, bool &usageIn,
bool &usageOut) {
switch (kind) {
case AttributeList::AT_HLSLIn:
if (usageIn)
return false;
usageIn = true;
break;
case AttributeList::AT_HLSLOut:
if (usageOut)
return false;
usageOut = true;
break;
default:
assert(kind == AttributeList::AT_HLSLInOut);
if (usageOut || usageIn)
return false;
usageIn = usageOut = true;
break;
}
return true;
}
// Diagnose valid/invalid modifiers for HLSL.
bool Sema::DiagnoseHLSLDecl(Declarator &D, DeclContext *DC, Expr *BitWidth,
TypeSourceInfo *TInfo, bool isParameter) {
assert(getLangOpts().HLSL &&
"otherwise this is called without checking language first");
// If we have a template declaration but haven't enabled templates, error.
if (DC->isDependentContext() &&
getLangOpts().HLSLVersion < hlsl::LangStd::v2021)
return false;
DeclSpec::SCS storage = D.getDeclSpec().getStorageClassSpec();
assert(!DC->isClosure() && "otherwise parser accepted closure syntax instead "
"of failing with a syntax error");
bool result = true;
bool isTypedef = storage == DeclSpec::SCS_typedef;
bool isFunction = D.isFunctionDeclarator() && !DC->isRecord();
bool isLocalVar = DC->isFunctionOrMethod() && !isFunction && !isTypedef;
bool isGlobal = !isParameter && !isTypedef && !isFunction &&
(DC->isTranslationUnit() || DC->isNamespace() ||
DC->getDeclKind() == Decl::HLSLBuffer);
bool isMethod = DC->isRecord() && D.isFunctionDeclarator() && !isTypedef;
bool isField = DC->isRecord() && !D.isFunctionDeclarator() && !isTypedef;
bool isConst = D.getDeclSpec().getTypeQualifiers() & DeclSpec::TQ::TQ_const;
bool isVolatile =
D.getDeclSpec().getTypeQualifiers() & DeclSpec::TQ::TQ_volatile;
bool isStatic = storage == DeclSpec::SCS::SCS_static;
bool isExtern = storage == DeclSpec::SCS::SCS_extern;
bool hasSignSpec =
D.getDeclSpec().getTypeSpecSign() != DeclSpec::TSS::TSS_unspecified;
// Function declarations are not allowed in parameter declaration
// TODO : Remove this check once we support function declarations/pointers in
// HLSL
if (isParameter && isFunction) {
Diag(D.getLocStart(), diag::err_hlsl_func_in_func_decl);
D.setInvalidType();
return false;
}
assert((1 == (isLocalVar ? 1 : 0) + (isGlobal ? 1 : 0) + (isField ? 1 : 0) +
(isTypedef ? 1 : 0) + (isFunction ? 1 : 0) +
(isMethod ? 1 : 0) + (isParameter ? 1 : 0)) &&
"exactly one type of declarator is being processed");
// qt/pType captures either the type being modified, or the return type in the
// case of a function (or method).
QualType qt = TInfo->getType();
const Type *pType = qt.getTypePtrOrNull();
HLSLExternalSource *hlslSource = HLSLExternalSource::FromSema(this);
if (!isFunction)
hlslSource->WarnMinPrecision(qt, D.getLocStart());
// Early checks - these are not simple attribution errors, but constructs that
// are fundamentally unsupported,
// and so we avoid errors that might indicate they can be repaired.
if (DC->isRecord()) {
unsigned int nestedDiagId = 0;
if (isTypedef) {
nestedDiagId = diag::err_hlsl_unsupported_nested_typedef;
}
if (isField && pType && pType->isIncompleteArrayType()) {
nestedDiagId = diag::err_hlsl_unsupported_incomplete_array;
}
if (nestedDiagId) {
Diag(D.getLocStart(), nestedDiagId);
D.setInvalidType();
return false;
}
}
// String and subobject declarations are supported only as top level global
// variables. Const and static modifiers are implied - add them if missing.
if ((hlsl::IsStringType(qt) || hlslSource->IsSubobjectType(qt)) &&
!D.isInvalidType()) {
// string are supported only as top level global variables
if (!DC->isTranslationUnit()) {
Diag(D.getLocStart(), diag::err_hlsl_object_not_global)
<< (int)hlsl::IsStringType(qt);
result = false;
}
if (isExtern) {
Diag(D.getLocStart(), diag::err_hlsl_object_extern_not_supported)
<< (int)hlsl::IsStringType(qt);
result = false;
}
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
if (!isStatic) {
D.getMutableDeclSpec().SetStorageClassSpec(
*this, DeclSpec::SCS_static, D.getLocStart(), PrevSpec, DiagID,
Context.getPrintingPolicy());
isStatic = true;
}
if (!isConst) {
D.getMutableDeclSpec().SetTypeQual(DeclSpec::TQ_const, D.getLocStart(),
PrevSpec, DiagID, getLangOpts());
isConst = true;
}
}
const char *declarationType = (isLocalVar) ? "local variable"
: (isTypedef) ? "typedef"
: (isFunction) ? "function"
: (isMethod) ? "method"
: (isGlobal) ? "global variable"
: (isParameter) ? "parameter"
: (isField) ? "field"
: "<unknown>";
if (pType && D.isFunctionDeclarator()) {
const FunctionProtoType *pFP = pType->getAs<FunctionProtoType>();
if (pFP) {
qt = pFP->getReturnType();
hlslSource->WarnMinPrecision(qt, D.getLocStart());
pType = qt.getTypePtrOrNull();
// prohibit string as a return type
if (hlsl::IsStringType(qt)) {
static const unsigned selectReturnValueIdx = 2;
Diag(D.getLocStart(), diag::err_hlsl_unsupported_string_decl)
<< selectReturnValueIdx;
D.setInvalidType();
}
}
}
// Check for deprecated effect object type here, warn, and invalidate decl
bool bDeprecatedEffectObject = false;
bool bIsObject = false;
if (hlsl::IsObjectType(this, qt, &bDeprecatedEffectObject)) {
bIsObject = true;
if (bDeprecatedEffectObject) {
Diag(D.getLocStart(), diag::warn_hlsl_effect_object);
D.setInvalidType();
return false;
}
} else if (qt->isArrayType()) {
QualType eltQt(qt->getArrayElementTypeNoTypeQual(), 0);
while (eltQt->isArrayType())
eltQt = QualType(eltQt->getArrayElementTypeNoTypeQual(), 0);
if (hlsl::IsObjectType(this, eltQt, &bDeprecatedEffectObject)) {
bIsObject = true;
}
}
if (isExtern) {
if (!(isFunction || isGlobal)) {
Diag(D.getLocStart(), diag::err_hlsl_varmodifierna)
<< "'extern'" << declarationType;
result = false;
}
}
if (isStatic) {
if (!(isLocalVar || isGlobal || isFunction || isMethod || isField)) {
Diag(D.getLocStart(), diag::err_hlsl_varmodifierna)
<< "'static'" << declarationType;
result = false;
}
}
if (isVolatile) {
if (!(isLocalVar || isTypedef)) {
Diag(D.getLocStart(), diag::err_hlsl_varmodifierna)
<< "'volatile'" << declarationType;
result = false;
}
}
if (isConst) {
if (isField && !isStatic) {
Diag(D.getLocStart(), diag::err_hlsl_varmodifierna)
<< "'const'" << declarationType;
result = false;
}
}
ArBasicKind basicKind = hlslSource->GetTypeElementKind(qt);
if (hasSignSpec) {
ArTypeObjectKind objKind = hlslSource->GetTypeObjectKind(qt);
// vectors or matrices can only have unsigned integer types.
if (objKind == AR_TOBJ_MATRIX || objKind == AR_TOBJ_VECTOR ||
objKind == AR_TOBJ_BASIC || objKind == AR_TOBJ_ARRAY) {
if (!IS_BASIC_UNSIGNABLE(basicKind)) {
Diag(D.getLocStart(), diag::err_sema_invalid_sign_spec)
<< g_ArBasicTypeNames[basicKind];
result = false;
}
} else {
Diag(D.getLocStart(), diag::err_sema_invalid_sign_spec)
<< g_ArBasicTypeNames[basicKind];
result = false;
}
}
// Validate attributes
clang::AttributeList *pUniform = nullptr, *pUsage = nullptr,
*pNoInterpolation = nullptr, *pLinear = nullptr,
*pNoPerspective = nullptr, *pSample = nullptr,
*pCentroid = nullptr, *pCenter = nullptr,
*pAnyLinear = nullptr, // first linear attribute found
*pTopology = nullptr, *pMeshModifier = nullptr,
*pDispatchGrid = nullptr, *pMaxDispatchGrid = nullptr;
bool usageIn = false;
bool usageOut = false;
for (clang::AttributeList *pAttr = D.getDeclSpec().getAttributes().getList();
pAttr != NULL; pAttr = pAttr->getNext()) {
if (pAttr->isInvalid() || pAttr->isUsedAsTypeAttr())
continue;
switch (pAttr->getKind()) {
case AttributeList::AT_HLSLPrecise: // precise is applicable everywhere.
break;
case AttributeList::AT_HLSLShared:
if (!isGlobal) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (isStatic) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifiersna)
<< "'static'" << pAttr->getName() << declarationType
<< pAttr->getRange();
result = false;
}
break;
case AttributeList::AT_HLSLGroupShared:
if (!isGlobal) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (isExtern) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifiersna)
<< "'extern'" << pAttr->getName() << declarationType
<< pAttr->getRange();
result = false;
}
break;
case AttributeList::AT_HLSLGloballyCoherent: // Handled elsewhere
break;
case AttributeList::AT_HLSLUniform:
if (!(isGlobal || isParameter)) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (isStatic) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifiersna)
<< "'static'" << pAttr->getName() << declarationType
<< pAttr->getRange();
result = false;
}
pUniform = pAttr;
break;
case AttributeList::AT_HLSLIn:
case AttributeList::AT_HLSLOut:
case AttributeList::AT_HLSLInOut:
if (!isParameter) {
Diag(pAttr->getLoc(), diag::err_hlsl_usage_not_on_parameter)
<< pAttr->getName() << pAttr->getRange();
result = false;
}
if (!IsUsageAttributeCompatible(pAttr->getKind(), usageIn, usageOut)) {
Diag(pAttr->getLoc(), diag::err_hlsl_duplicate_parameter_usages)
<< pAttr->getName() << pAttr->getRange();
result = false;
}
pUsage = pAttr;
break;
case AttributeList::AT_HLSLNoInterpolation:
if (!(isParameter || isField || isFunction)) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (pNoInterpolation) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
}
pNoInterpolation = pAttr;
break;
case AttributeList::AT_HLSLLinear:
case AttributeList::AT_HLSLCenter:
case AttributeList::AT_HLSLNoPerspective:
case AttributeList::AT_HLSLSample:
case AttributeList::AT_HLSLCentroid:
if (!(isParameter || isField || isFunction)) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (nullptr == pAnyLinear)
pAnyLinear = pAttr;
switch (pAttr->getKind()) {
case AttributeList::AT_HLSLLinear:
if (pLinear) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
}
pLinear = pAttr;
break;
case AttributeList::AT_HLSLCenter:
if (pCenter) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
}
pCenter = pAttr;
break;
case AttributeList::AT_HLSLNoPerspective:
if (pNoPerspective) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
}
pNoPerspective = pAttr;
break;
case AttributeList::AT_HLSLSample:
if (pSample) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
}
pSample = pAttr;
break;
case AttributeList::AT_HLSLCentroid:
if (pCentroid) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
}
pCentroid = pAttr;
break;
default:
// Only relevant to the four attribs included in this block.
break;
}
break;
case AttributeList::AT_HLSLPoint:
case AttributeList::AT_HLSLLine:
case AttributeList::AT_HLSLLineAdj:
case AttributeList::AT_HLSLTriangle:
case AttributeList::AT_HLSLTriangleAdj:
if (!(isParameter)) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (pTopology) {
if (pTopology->getKind() == pAttr->getKind()) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
} else {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifiersna)
<< pAttr->getName() << pTopology->getName() << declarationType
<< pAttr->getRange();
result = false;
}
}
pTopology = pAttr;
break;
case AttributeList::AT_HLSLExport:
if (!isFunction) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (isStatic) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifiersna)
<< "'static'" << pAttr->getName() << declarationType
<< pAttr->getRange();
result = false;
}
break;
case AttributeList::AT_HLSLIndices:
case AttributeList::AT_HLSLVertices:
case AttributeList::AT_HLSLPrimitives:
case AttributeList::AT_HLSLPayload:
if (!(isParameter)) {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifierna)
<< pAttr->getName() << declarationType << pAttr->getRange();
result = false;
}
if (pMeshModifier) {
if (pMeshModifier->getKind() == pAttr->getKind()) {
Diag(pAttr->getLoc(), diag::warn_hlsl_duplicate_specifier)
<< pAttr->getName() << pAttr->getRange();
} else {
Diag(pAttr->getLoc(), diag::err_hlsl_varmodifiersna)
<< pAttr->getName() << pMeshModifier->getName() << declarationType
<< pAttr->getRange();
result = false;
}
}
pMeshModifier = pAttr;
break;
case AttributeList::AT_HLSLNodeDispatchGrid:
if (pDispatchGrid) {
// TODO: it would be nice to diffentiate between an exact duplicate and
// conflicting values
Diag(pAttr->getLoc(), diag::warn_duplicate_attribute_exact)
<< pAttr->getName() << pAttr->getRange();
result = false;
} else {
// Note: the NodeDispatchGrid values are validated later in
// HandleDeclAttributeForHLSL()
pDispatchGrid = pAttr;
}
break;
case AttributeList::AT_HLSLNodeMaxDispatchGrid:
if (pMaxDispatchGrid) {
// TODO: it would be nice to diffentiate between an exact duplicate and
// conflicting values
Diag(pAttr->getLoc(), diag::warn_duplicate_attribute_exact)
<< pAttr->getName() << pAttr->getRange();
result = false;
} else {
// Note: the NodeMaxDispatchGrid values are validated later in
// HandleDeclAttributeForHLSL()
pMaxDispatchGrid = pAttr;
}
break;
default:
break;
}
}
if (pNoInterpolation && pAnyLinear) {
Diag(pNoInterpolation->getLoc(), diag::err_hlsl_varmodifiersna)
<< pNoInterpolation->getName() << pAnyLinear->getName()
<< declarationType << pNoInterpolation->getRange();
result = false;
}
if (pSample && pCentroid) {
Diag(pCentroid->getLoc(), diag::warn_hlsl_specifier_overridden)
<< pCentroid->getName() << pSample->getName() << pCentroid->getRange();
}
if (pCenter && pCentroid) {
Diag(pCenter->getLoc(), diag::warn_hlsl_specifier_overridden)
<< pCenter->getName() << pCentroid->getName() << pCenter->getRange();
}
if (pSample && pCenter) {
Diag(pCenter->getLoc(), diag::warn_hlsl_specifier_overridden)
<< pCenter->getName() << pSample->getName() << pCenter->getRange();
}
clang::AttributeList *pNonUniformAttr =
pAnyLinear ? pAnyLinear
: (pNoInterpolation ? pNoInterpolation : pTopology);
if (pUniform && pNonUniformAttr) {
Diag(pUniform->getLoc(), diag::err_hlsl_varmodifiersna)
<< pNonUniformAttr->getName() << pUniform->getName() << declarationType
<< pUniform->getRange();
result = false;
}
if (pAnyLinear && pTopology) {
Diag(pAnyLinear->getLoc(), diag::err_hlsl_varmodifiersna)
<< pTopology->getName() << pAnyLinear->getName() << declarationType
<< pAnyLinear->getRange();
result = false;
}
if (pNoInterpolation && pTopology) {
Diag(pNoInterpolation->getLoc(), diag::err_hlsl_varmodifiersna)
<< pTopology->getName() << pNoInterpolation->getName()
<< declarationType << pNoInterpolation->getRange();
result = false;
}
if (pUniform && pUsage) {
if (pUsage->getKind() != AttributeList::Kind::AT_HLSLIn) {
Diag(pUniform->getLoc(), diag::err_hlsl_varmodifiersna)
<< pUsage->getName() << pUniform->getName() << declarationType
<< pUniform->getRange();
result = false;
}
}
if (pMeshModifier) {
if (pMeshModifier->getKind() == AttributeList::Kind::AT_HLSLPayload) {
if (!usageIn) {
Diag(D.getLocStart(), diag::err_hlsl_missing_in_attr)
<< pMeshModifier->getName();
result = false;
}
} else {
if (!usageOut) {
Diag(D.getLocStart(), diag::err_hlsl_missing_out_attr)
<< pMeshModifier->getName();
result = false;
}
}
}
// Validate that stream-ouput objects are marked as inout
if (isParameter && !(usageIn && usageOut) &&
(basicKind == ArBasicKind::AR_OBJECT_LINESTREAM ||
basicKind == ArBasicKind::AR_OBJECT_POINTSTREAM ||
basicKind == ArBasicKind::AR_OBJECT_TRIANGLESTREAM)) {
Diag(D.getLocStart(), diag::err_hlsl_missing_inout_attr);
result = false;
}
// SPIRV change starts
#ifdef ENABLE_SPIRV_CODEGEN
// Validate that Vulkan specific feature is only used when targeting SPIR-V
if (!getLangOpts().SPIRV) {
if (basicKind == ArBasicKind::AR_OBJECT_VK_SUBPASS_INPUT ||
basicKind == ArBasicKind::AR_OBJECT_VK_SUBPASS_INPUT_MS ||
basicKind == ArBasicKind::AR_OBJECT_VK_SPIRV_TYPE ||
basicKind == ArBasicKind::AR_OBJECT_VK_SPIRV_OPAQUE_TYPE ||
basicKind == ArBasicKind::AR_OBJECT_VK_SPV_INTRINSIC_TYPE ||
basicKind == ArBasicKind::AR_OBJECT_VK_SPV_INTRINSIC_RESULT_ID) {
Diag(D.getLocStart(), diag::err_hlsl_vulkan_specific_feature)
<< g_ArBasicTypeNames[basicKind];
result = false;
}
}
#endif // ENABLE_SPIRV_CODEGEN
// SPIRV change ends
// Disallow bitfields where not enabled explicitly or by HV
if (BitWidth) {
if (getLangOpts().HLSLVersion < hlsl::LangStd::v2021) {
Diag(BitWidth->getExprLoc(), diag::err_hlsl_bitfields);
result = false;
} else if (!D.UnusualAnnotations.empty()) {
Diag(BitWidth->getExprLoc(), diag::err_hlsl_bitfields_with_annotation);
result = false;
}
}
// Validate unusual annotations.
hlsl::DiagnoseUnusualAnnotationsForHLSL(*this, D.UnusualAnnotations);
if (isField)
hlsl::DiagnosePayloadAccessQualifierAnnotations(*this, D, qt,
D.UnusualAnnotations);
auto &&unusualIter = D.UnusualAnnotations.begin();
auto &&unusualEnd = D.UnusualAnnotations.end();
for (; unusualIter != unusualEnd; ++unusualIter) {
switch ((*unusualIter)->getKind()) {
case hlsl::UnusualAnnotation::UA_ConstantPacking: {
hlsl::ConstantPacking *constantPacking =
cast<hlsl::ConstantPacking>(*unusualIter);
if (!isGlobal || HLSLBuffers.size() == 0) {
Diag(constantPacking->Loc, diag::err_hlsl_packoffset_requires_cbuffer);
continue;
}
if (constantPacking->ComponentOffset > 0) {
// Validate that this will fit.
if (!qt.isNull()) {
hlsl::DiagnosePackingOffset(this, constantPacking->Loc, qt,
constantPacking->ComponentOffset);
}
}
break;
}
case hlsl::UnusualAnnotation::UA_RegisterAssignment: {
hlsl::RegisterAssignment *registerAssignment =
cast<hlsl::RegisterAssignment>(*unusualIter);
if (registerAssignment->IsValid) {
if (!qt.isNull()) {
hlsl::DiagnoseRegisterType(this, registerAssignment->Loc, qt,
registerAssignment->RegisterType);
}
}
break;
}
case hlsl::UnusualAnnotation::UA_SemanticDecl: {
hlsl::SemanticDecl *semanticDecl = cast<hlsl::SemanticDecl>(*unusualIter);
if (isTypedef || isLocalVar) {
Diag(semanticDecl->Loc, diag::err_hlsl_varmodifierna)
<< "semantic" << declarationType;
}
break;
}
case hlsl::UnusualAnnotation::UA_PayloadAccessQualifier: {
hlsl::PayloadAccessAnnotation *annotation =
cast<hlsl::PayloadAccessAnnotation>(*unusualIter);
if (!isField) {
Diag(annotation->Loc,
diag::err_hlsl_unsupported_payload_access_qualifier);
}
break;
}
}
}
if (!result) {
D.setInvalidType();
}
return result;
}
static QualType getUnderlyingType(QualType Type) {
while (const TypedefType *TD = dyn_cast<TypedefType>(Type)) {
if (const TypedefNameDecl *pDecl = TD->getDecl())
Type = pDecl->getUnderlyingType();
else
break;
}
return Type;
}
/// <summary>Return HLSL AttributedType objects if they exist on type.</summary>
/// <param name="self">Sema with context.</param>
/// <param name="type">QualType to inspect.</param>
/// <param name="ppMatrixOrientation">Set pointer to column_major/row_major
/// AttributedType if supplied.</param> <param name="ppNorm">Set pointer to
/// snorm/unorm AttributedType if supplied.</param>
void hlsl::GetHLSLAttributedTypes(
clang::Sema *self, clang::QualType type,
const clang::AttributedType **ppMatrixOrientation,
const clang::AttributedType **ppNorm, const clang::AttributedType **ppGLC) {
AssignOpt<const clang::AttributedType *>(nullptr, ppMatrixOrientation);
AssignOpt<const clang::AttributedType *>(nullptr, ppNorm);
AssignOpt<const clang::AttributedType *>(nullptr, ppGLC);
// Note: we clear output pointers once set so we can stop searching
QualType Desugared = getUnderlyingType(type);
const AttributedType *AT = dyn_cast<AttributedType>(Desugared);
while (AT && (ppMatrixOrientation || ppNorm || ppGLC)) {
AttributedType::Kind Kind = AT->getAttrKind();
if (Kind == AttributedType::attr_hlsl_row_major ||
Kind == AttributedType::attr_hlsl_column_major) {
if (ppMatrixOrientation) {
*ppMatrixOrientation = AT;
ppMatrixOrientation = nullptr;
}
} else if (Kind == AttributedType::attr_hlsl_unorm ||
Kind == AttributedType::attr_hlsl_snorm) {
if (ppNorm) {
*ppNorm = AT;
ppNorm = nullptr;
}
} else if (Kind == AttributedType::attr_hlsl_globallycoherent) {
if (ppGLC) {
*ppGLC = AT;
ppGLC = nullptr;
}
}
Desugared = getUnderlyingType(AT->getEquivalentType());
AT = dyn_cast<AttributedType>(Desugared);
}
// Unwrap component type on vector or matrix and check snorm/unorm
Desugared = getUnderlyingType(hlsl::GetOriginalElementType(self, Desugared));
AT = dyn_cast<AttributedType>(Desugared);
while (AT && ppNorm) {
AttributedType::Kind Kind = AT->getAttrKind();
if (Kind == AttributedType::attr_hlsl_unorm ||
Kind == AttributedType::attr_hlsl_snorm) {
*ppNorm = AT;
ppNorm = nullptr;
}
Desugared = getUnderlyingType(AT->getEquivalentType());
AT = dyn_cast<AttributedType>(Desugared);
}
}
/// <summary>Returns true if QualType is an HLSL Matrix type.</summary>
/// <param name="self">Sema with context.</param>
/// <param name="type">QualType to check.</param>
bool hlsl::IsMatrixType(clang::Sema *self, clang::QualType type) {
return HLSLExternalSource::FromSema(self)->GetTypeObjectKind(type) ==
AR_TOBJ_MATRIX;
}
/// <summary>Returns true if QualType is an HLSL Vector type.</summary>
/// <param name="self">Sema with context.</param>
/// <param name="type">QualType to check.</param>
bool hlsl::IsVectorType(clang::Sema *self, clang::QualType type) {
return HLSLExternalSource::FromSema(self)->GetTypeObjectKind(type) ==
AR_TOBJ_VECTOR;
}
/// <summary>Get element type for an HLSL Matrix or Vector, preserving
/// AttributedType.</summary> <param name="self">Sema with context.</param>
/// <param name="type">Matrix or Vector type.</param>
clang::QualType
hlsl::GetOriginalMatrixOrVectorElementType(clang::QualType type) {
// TODO: Determine if this is really the best way to get the matrix/vector
// specialization without losing the AttributedType on the template parameter
if (const Type *Ty = type.getTypePtrOrNull()) {
// A non-dependent template specialization type is always "sugar",
// typically for a RecordType. For example, a class template
// specialization type of @c vector<int> will refer to a tag type for
// the instantiation @c std::vector<int, std::allocator<int>>.
if (const TemplateSpecializationType *pTemplate =
Ty->getAs<TemplateSpecializationType>()) {
// If we have enough arguments, pull them from the template directly,
// rather than doing the extra lookups.
if (pTemplate->getNumArgs() > 0)
return pTemplate->getArg(0).getAsType();
QualType templateRecord = pTemplate->desugar();
Ty = templateRecord.getTypePtr();
}
if (!Ty)
return QualType();
if (const auto *TagTy = Ty->getAs<TagType>()) {
if (const auto *SpecDecl =
dyn_cast_or_null<ClassTemplateSpecializationDecl>(
TagTy->getDecl()))
return SpecDecl->getTemplateArgs()[0].getAsType();
}
}
return QualType();
}
/// <summary>Get element type, preserving AttributedType, if vector or matrix,
/// otherwise return the type unmodified.</summary> <param name="self">Sema with
/// context.</param> <param name="type">Input type.</param>
clang::QualType hlsl::GetOriginalElementType(clang::Sema *self,
clang::QualType type) {
ArTypeObjectKind Kind =
HLSLExternalSource::FromSema(self)->GetTypeObjectKind(type);
if (Kind == AR_TOBJ_MATRIX || Kind == AR_TOBJ_VECTOR) {
return GetOriginalMatrixOrVectorElementType(type);
}
return type;
}
void hlsl::CustomPrintHLSLAttr(const clang::Attr *A, llvm::raw_ostream &Out,
const clang::PrintingPolicy &Policy,
unsigned int Indentation) {
switch (A->getKind()) {
// Parameter modifiers
case clang::attr::HLSLIn:
Out << "in ";
break;
case clang::attr::HLSLInOut:
Out << "inout ";
break;
case clang::attr::HLSLOut:
Out << "out ";
break;
// Interpolation modifiers
case clang::attr::HLSLLinear:
Out << "linear ";
break;
case clang::attr::HLSLCenter:
Out << "center ";
break;
case clang::attr::HLSLCentroid:
Out << "centroid ";
break;
case clang::attr::HLSLNoInterpolation:
Out << "nointerpolation ";
break;
case clang::attr::HLSLNoPerspective:
Out << "noperspective ";
break;
case clang::attr::HLSLSample:
Out << "sample ";
break;
// Function attributes
case clang::attr::HLSLClipPlanes: {
Attr *noconst = const_cast<Attr *>(A);
HLSLClipPlanesAttr *ACast = static_cast<HLSLClipPlanesAttr *>(noconst);
if (!ACast->getClipPlane1())
break;
Indent(Indentation, Out);
Out << "[clipplanes(";
ACast->getClipPlane1()->printPretty(Out, 0, Policy);
PrintClipPlaneIfPresent(ACast->getClipPlane2(), Out, Policy);
PrintClipPlaneIfPresent(ACast->getClipPlane3(), Out, Policy);
PrintClipPlaneIfPresent(ACast->getClipPlane4(), Out, Policy);
PrintClipPlaneIfPresent(ACast->getClipPlane5(), Out, Policy);
PrintClipPlaneIfPresent(ACast->getClipPlane6(), Out, Policy);
Out << ")]\n";
break;
}
case clang::attr::HLSLDomain: {
Attr *noconst = const_cast<Attr *>(A);
HLSLDomainAttr *ACast = static_cast<HLSLDomainAttr *>(noconst);
Indent(Indentation, Out);
Out << "[domain(\"" << ACast->getDomainType() << "\")]\n";
break;
}
case clang::attr::HLSLEarlyDepthStencil:
Indent(Indentation, Out);
Out << "[earlydepthstencil]\n";
break;
case clang::attr::HLSLInstance: // TODO - test
{
Attr *noconst = const_cast<Attr *>(A);
HLSLInstanceAttr *ACast = static_cast<HLSLInstanceAttr *>(noconst);
Indent(Indentation, Out);
Out << "[instance(" << ACast->getCount() << ")]\n";
break;
}
case clang::attr::HLSLMaxTessFactor: // TODO - test
{
Attr *noconst = const_cast<Attr *>(A);
HLSLMaxTessFactorAttr *ACast =
static_cast<HLSLMaxTessFactorAttr *>(noconst);
Indent(Indentation, Out);
Out << "[maxtessfactor(" << ACast->getFactor() << ")]\n";
break;
}
case clang::attr::HLSLNumThreads: // TODO - test
{
Attr *noconst = const_cast<Attr *>(A);
HLSLNumThreadsAttr *ACast = static_cast<HLSLNumThreadsAttr *>(noconst);
Indent(Indentation, Out);
Out << "[numthreads(" << ACast->getX() << ", " << ACast->getY() << ", "
<< ACast->getZ() << ")]\n";
break;
}
case clang::attr::HLSLRootSignature: {
Attr *noconst = const_cast<Attr *>(A);
HLSLRootSignatureAttr *ACast =
static_cast<HLSLRootSignatureAttr *>(noconst);
Indent(Indentation, Out);
Out << "[RootSignature(\"" << ACast->getSignatureName() << "\")]\n";
break;
}
case clang::attr::HLSLOutputControlPoints: {
Attr *noconst = const_cast<Attr *>(A);
HLSLOutputControlPointsAttr *ACast =
static_cast<HLSLOutputControlPointsAttr *>(noconst);
Indent(Indentation, Out);
Out << "[outputcontrolpoints(" << ACast->getCount() << ")]\n";
break;
}
case clang::attr::HLSLOutputTopology: {
Attr *noconst = const_cast<Attr *>(A);
HLSLOutputTopologyAttr *ACast =
static_cast<HLSLOutputTopologyAttr *>(noconst);
Indent(Indentation, Out);
Out << "[outputtopology(\"" << ACast->getTopology() << "\")]\n";
break;
}
case clang::attr::HLSLPartitioning: {
Attr *noconst = const_cast<Attr *>(A);
HLSLPartitioningAttr *ACast = static_cast<HLSLPartitioningAttr *>(noconst);
Indent(Indentation, Out);
Out << "[partitioning(\"" << ACast->getScheme() << "\")]\n";
break;
}
case clang::attr::HLSLPatchConstantFunc: {
Attr *noconst = const_cast<Attr *>(A);
HLSLPatchConstantFuncAttr *ACast =
static_cast<HLSLPatchConstantFuncAttr *>(noconst);
Indent(Indentation, Out);
Out << "[patchconstantfunc(\"" << ACast->getFunctionName() << "\")]\n";
break;
}
case clang::attr::HLSLShader: {
Attr *noconst = const_cast<Attr *>(A);
HLSLShaderAttr *ACast = static_cast<HLSLShaderAttr *>(noconst);
Indent(Indentation, Out);
Out << "[shader(\"" << ACast->getStage() << "\")]\n";
break;
}
case clang::attr::HLSLExperimental: {
Attr *noconst = const_cast<Attr *>(A);
HLSLExperimentalAttr *ACast = static_cast<HLSLExperimentalAttr *>(noconst);
Indent(Indentation, Out);
Out << "[experimental(\"" << ACast->getName() << "\", \""
<< ACast->getValue() << "\")]\n";
break;
}
case clang::attr::HLSLMaxVertexCount: {
Attr *noconst = const_cast<Attr *>(A);
HLSLMaxVertexCountAttr *ACast =
static_cast<HLSLMaxVertexCountAttr *>(noconst);
Indent(Indentation, Out);
Out << "[maxvertexcount(" << ACast->getCount() << ")]\n";
break;
}
case clang::attr::NoInline:
Indent(Indentation, Out);
Out << "[noinline]\n";
break;
case clang::attr::HLSLExport:
Indent(Indentation, Out);
Out << "export\n";
break;
// Statement attributes
case clang::attr::HLSLAllowUAVCondition:
Indent(Indentation, Out);
Out << "[allow_uav_condition]\n";
break;
case clang::attr::HLSLBranch:
Indent(Indentation, Out);
Out << "[branch]\n";
break;
case clang::attr::HLSLCall:
Indent(Indentation, Out);
Out << "[call]\n";
break;
case clang::attr::HLSLFastOpt:
Indent(Indentation, Out);
Out << "[fastopt]\n";
break;
case clang::attr::HLSLFlatten:
Indent(Indentation, Out);
Out << "[flatten]\n";
break;
case clang::attr::HLSLForceCase:
Indent(Indentation, Out);
Out << "[forcecase]\n";
break;
case clang::attr::HLSLLoop:
Indent(Indentation, Out);
Out << "[loop]\n";
break;
case clang::attr::HLSLUnroll: {
Attr *noconst = const_cast<Attr *>(A);
HLSLUnrollAttr *ACast = static_cast<HLSLUnrollAttr *>(noconst);
Indent(Indentation, Out);
if (ACast->getCount() == 0)
Out << "[unroll]\n";
else
Out << "[unroll(" << ACast->getCount() << ")]\n";
break;
}
case clang::attr::HLSLWaveSize: {
Attr *noconst = const_cast<Attr *>(A);
HLSLWaveSizeAttr *ACast = static_cast<HLSLWaveSizeAttr *>(noconst);
Indent(Indentation, Out);
Out << "[wavesize(" << ACast->getMin();
if (ACast->getMax() > 0) {
Out << ", " << ACast->getMax();
if (ACast->getPreferred() > 0)
Out << ", " << ACast->getPreferred();
}
Out << ")]\n";
break;
}
// Variable modifiers
case clang::attr::HLSLGroupShared:
Out << "groupshared ";
break;
case clang::attr::HLSLPrecise:
Out << "precise ";
break;
case clang::attr::HLSLSemantic: // TODO: Consider removing HLSLSemantic
// attribute
break;
case clang::attr::HLSLShared:
Out << "shared ";
break;
case clang::attr::HLSLUniform:
Out << "uniform ";
break;
// These four cases are printed in TypePrinter::printAttributedBefore
case clang::attr::HLSLSnorm:
case clang::attr::HLSLUnorm:
break;
case clang::attr::HLSLPoint:
Out << "point ";
break;
case clang::attr::HLSLLine:
Out << "line ";
break;
case clang::attr::HLSLLineAdj:
Out << "lineadj ";
break;
case clang::attr::HLSLTriangle:
Out << "triangle ";
break;
case clang::attr::HLSLTriangleAdj:
Out << "triangleadj ";
break;
case clang::attr::HLSLGloballyCoherent:
Out << "globallycoherent ";
break;
case clang::attr::HLSLIndices:
Out << "indices ";
break;
case clang::attr::HLSLVertices:
Out << "vertices ";
break;
case clang::attr::HLSLPrimitives:
Out << "primitives ";
break;
case clang::attr::HLSLPayload:
Out << "payload ";
break;
case clang::attr::HLSLNodeLaunch: {
Attr *noconst = const_cast<Attr *>(A);
HLSLNodeLaunchAttr *ACast = static_cast<HLSLNodeLaunchAttr *>(noconst);
Indent(Indentation, Out);
Out << "[NodeLaunch(\"" << ACast->getLaunchType() << "\")]\n";
break;
}
case clang::attr::HLSLNodeIsProgramEntry:
Indent(Indentation, Out);
Out << "[NodeIsProgramEntry]\n";
break;
case clang::attr::HLSLNodeId: {
Attr *noconst = const_cast<Attr *>(A);
HLSLNodeIdAttr *ACast = static_cast<HLSLNodeIdAttr *>(noconst);
Indent(Indentation, Out);
if (ACast->getArrayIndex() > 0)
Out << "[NodeId(\"" << ACast->getName() << "\"," << ACast->getArrayIndex()
<< ")]\n";
else
Out << "[NodeId(\"" << ACast->getName() << "\")]\n";
break;
}
case clang::attr::HLSLNodeLocalRootArgumentsTableIndex: {
Attr *noconst = const_cast<Attr *>(A);
HLSLNodeLocalRootArgumentsTableIndexAttr *ACast =
static_cast<HLSLNodeLocalRootArgumentsTableIndexAttr *>(noconst);
Indent(Indentation, Out);
Out << "[NodeLocalRootTableIndex(" << ACast->getIndex() << ")]\n";
break;
}
case clang::attr::HLSLNodeShareInputOf: {
Attr *noconst = const_cast<Attr *>(A);
HLSLNodeShareInputOfAttr *ACast =
static_cast<HLSLNodeShareInputOfAttr *>(noconst);
Indent(Indentation, Out);
if (ACast->getArrayIndex() > 0)
Out << "[NodeShareInputOf(\"" << ACast->getName() << "\","
<< ACast->getArrayIndex() << ")]\n";
else
Out << "[NodeShareInputOf(\"" << ACast->getName() << "\")]\n";
break;
}
case clang::attr::HLSLNodeTrackRWInputSharing: {
Indent(Indentation, Out);
Out << "[NodeTrackRWInputSharing]\n";
break;
}
case clang::attr::HLSLNodeDispatchGrid: {
Attr *noconst = const_cast<Attr *>(A);
HLSLNodeDispatchGridAttr *ACast =
static_cast<HLSLNodeDispatchGridAttr *>(noconst);
Indent(Indentation, Out);
Out << "[NodeDispatchGrid(" << ACast->getX() << ", " << ACast->getY()
<< ", " << ACast->getZ() << ")]\n";
break;
}
case clang::attr::HLSLNodeMaxDispatchGrid: {
Attr *noconst = const_cast<Attr *>(A);
HLSLNodeMaxDispatchGridAttr *ACast =
static_cast<HLSLNodeMaxDispatchGridAttr *>(noconst);
Indent(Indentation, Out);
Out << "[NodeMaxDispatchGrid(" << ACast->getX() << ", " << ACast->getY()
<< ", " << ACast->getZ() << ")]\n";
break;
}
case clang::attr::HLSLNodeMaxRecursionDepth: {
Attr *noconst = const_cast<Attr *>(A);
HLSLNodeMaxRecursionDepthAttr *ACast =
static_cast<HLSLNodeMaxRecursionDepthAttr *>(noconst);
Indent(Indentation, Out);
Out << "[NodeMaxRecursionDepth(" << ACast->getCount() << ")]\n";
break;
}
case clang::attr::HLSLMaxRecords: {
Attr *noconst = const_cast<Attr *>(A);
auto *ACast = static_cast<HLSLMaxRecordsAttr *>(noconst);
Indent(Indentation, Out);
Out << "[MaxRecords(" << ACast->getMaxCount() << ")]\n";
break;
}
case clang::attr::HLSLNodeArraySize: {
Attr *noconst = const_cast<Attr *>(A);
auto *ACast = static_cast<HLSLNodeArraySizeAttr *>(noconst);
Indent(Indentation, Out);
Out << "[NodeArraySize(" << ACast->getCount() << ")]\n";
break;
}
case clang::attr::HLSLMaxRecordsSharedWith: {
Attr *noconst = const_cast<Attr *>(A);
HLSLMaxRecordsSharedWithAttr *ACast =
static_cast<HLSLMaxRecordsSharedWithAttr *>(noconst);
Indent(Indentation, Out);
Out << "[MaxRecordsSharedWith(\"" << ACast->getName() << "\")]\n";
break;
}
case clang::attr::HLSLAllowSparseNodes: {
Indent(Indentation, Out);
Out << "[AllowSparseNodes]\n";
break;
}
case clang::attr::HLSLUnboundedSparseNodes: {
Indent(Indentation, Out);
Out << "[UnboundedSparseNodes]\n";
break;
}
default:
A->printPretty(Out, Policy);
break;
}
}
bool hlsl::IsHLSLAttr(clang::attr::Kind AttrKind) {
switch (AttrKind) {
case clang::attr::HLSLAllowUAVCondition:
case clang::attr::HLSLBranch:
case clang::attr::HLSLCall:
case clang::attr::HLSLCentroid:
case clang::attr::HLSLClipPlanes:
case clang::attr::HLSLDomain:
case clang::attr::HLSLEarlyDepthStencil:
case clang::attr::HLSLFastOpt:
case clang::attr::HLSLFlatten:
case clang::attr::HLSLForceCase:
case clang::attr::HLSLGroupShared:
case clang::attr::HLSLIn:
case clang::attr::HLSLInOut:
case clang::attr::HLSLInstance:
case clang::attr::HLSLLinear:
case clang::attr::HLSLCenter:
case clang::attr::HLSLLoop:
case clang::attr::HLSLMaxTessFactor:
case clang::attr::HLSLNoInterpolation:
case clang::attr::HLSLNoPerspective:
case clang::attr::HLSLNumThreads:
case clang::attr::HLSLRootSignature:
case clang::attr::HLSLOut:
case clang::attr::HLSLOutputControlPoints:
case clang::attr::HLSLOutputTopology:
case clang::attr::HLSLPartitioning:
case clang::attr::HLSLPatchConstantFunc:
case clang::attr::HLSLMaxVertexCount:
case clang::attr::HLSLPrecise:
case clang::attr::HLSLSample:
case clang::attr::HLSLSemantic:
case clang::attr::HLSLShader:
case clang::attr::HLSLShared:
case clang::attr::HLSLSnorm:
case clang::attr::HLSLUniform:
case clang::attr::HLSLUnorm:
case clang::attr::HLSLUnroll:
case clang::attr::HLSLPoint:
case clang::attr::HLSLLine:
case clang::attr::HLSLLineAdj:
case clang::attr::HLSLTriangle:
case clang::attr::HLSLTriangleAdj:
case clang::attr::HLSLGloballyCoherent:
case clang::attr::HLSLIndices:
case clang::attr::HLSLVertices:
case clang::attr::HLSLPrimitives:
case clang::attr::HLSLPayload:
case clang::attr::NoInline:
case clang::attr::HLSLExport:
case clang::attr::HLSLWaveSensitive:
case clang::attr::HLSLWaveSize:
case clang::attr::HLSLMaxRecordsSharedWith:
case clang::attr::HLSLMaxRecords:
case clang::attr::HLSLNodeArraySize:
case clang::attr::HLSLAllowSparseNodes:
case clang::attr::HLSLUnboundedSparseNodes:
case clang::attr::HLSLNodeDispatchGrid:
case clang::attr::HLSLNodeMaxDispatchGrid:
case clang::attr::HLSLNodeMaxRecursionDepth:
case clang::attr::HLSLNodeId:
case clang::attr::HLSLNodeIsProgramEntry:
case clang::attr::HLSLNodeLaunch:
case clang::attr::HLSLNodeLocalRootArgumentsTableIndex:
case clang::attr::HLSLNodeShareInputOf:
case clang::attr::HLSLNodeTrackRWInputSharing:
case clang::attr::VKBinding:
case clang::attr::VKBuiltIn:
case clang::attr::VKConstantId:
case clang::attr::VKCounterBinding:
case clang::attr::VKIndex:
case clang::attr::VKInputAttachmentIndex:
case clang::attr::VKLocation:
case clang::attr::VKOffset:
case clang::attr::VKPushConstant:
case clang::attr::VKShaderRecordNV:
case clang::attr::VKShaderRecordEXT:
return true;
default:
// Only HLSL/VK Attributes return true. Only used for printPretty(), which
// doesn't support them.
break;
}
return false;
}
void hlsl::PrintClipPlaneIfPresent(clang::Expr *ClipPlane,
llvm::raw_ostream &Out,
const clang::PrintingPolicy &Policy) {
if (ClipPlane) {
Out << ", ";
ClipPlane->printPretty(Out, 0, Policy);
}
}
bool hlsl::IsObjectType(clang::Sema *self, clang::QualType type,
bool *isDeprecatedEffectObject) {
HLSLExternalSource *pExternalSource = HLSLExternalSource::FromSema(self);
if (pExternalSource &&
pExternalSource->GetTypeObjectKind(type) == AR_TOBJ_OBJECT) {
if (isDeprecatedEffectObject)
*isDeprecatedEffectObject =
pExternalSource->GetTypeElementKind(type) == AR_OBJECT_LEGACY_EFFECT;
return true;
}
if (isDeprecatedEffectObject)
*isDeprecatedEffectObject = false;
return false;
}
bool hlsl::CanConvert(clang::Sema *self, clang::SourceLocation loc,
clang::Expr *sourceExpr, clang::QualType target,
bool explicitConversion,
clang::StandardConversionSequence *standard) {
return HLSLExternalSource::FromSema(self)->CanConvert(
loc, sourceExpr, target, explicitConversion, nullptr, standard);
}
void hlsl::Indent(unsigned int Indentation, llvm::raw_ostream &Out) {
for (unsigned i = 0; i != Indentation; ++i)
Out << " ";
}
void hlsl::RegisterIntrinsicTable(clang::ExternalSemaSource *self,
IDxcIntrinsicTable *table) {
DXASSERT_NOMSG(self != nullptr);
DXASSERT_NOMSG(table != nullptr);
HLSLExternalSource *source = (HLSLExternalSource *)self;
source->RegisterIntrinsicTable(table);
}
clang::QualType
hlsl::CheckVectorConditional(clang::Sema *self, clang::ExprResult &Cond,
clang::ExprResult &LHS, clang::ExprResult &RHS,
clang::SourceLocation QuestionLoc) {
return HLSLExternalSource::FromSema(self)->CheckVectorConditional(
Cond, LHS, RHS, QuestionLoc);
}
bool IsTypeNumeric(clang::Sema *self, clang::QualType &type) {
UINT count;
return HLSLExternalSource::FromSema(self)->IsTypeNumeric(type, &count);
}
void Sema::CheckHLSLArrayAccess(const Expr *expr) {
DXASSERT_NOMSG(isa<CXXOperatorCallExpr>(expr));
const CXXOperatorCallExpr *OperatorCallExpr = cast<CXXOperatorCallExpr>(expr);
DXASSERT_NOMSG(OperatorCallExpr->getOperator() ==
OverloadedOperatorKind::OO_Subscript);
const Expr *RHS = OperatorCallExpr->getArg(1); // first subscript expression
llvm::APSInt index;
if (RHS->EvaluateAsInt(index, Context)) {
int64_t intIndex = index.getLimitedValue();
const QualType LHSQualType = OperatorCallExpr->getArg(0)->getType();
if (IsVectorType(this, LHSQualType)) {
uint32_t vectorSize = GetHLSLVecSize(LHSQualType);
// If expression is a double two subscript operator for matrix (e.g
// x[0][1]) we also have to check the first subscript oprator by
// recursively calling this funciton for the first CXXOperatorCallExpr
if (isa<CXXOperatorCallExpr>(OperatorCallExpr->getArg(0))) {
const CXXOperatorCallExpr *object =
cast<CXXOperatorCallExpr>(OperatorCallExpr->getArg(0));
if (object->getOperator() == OverloadedOperatorKind::OO_Subscript) {
CheckHLSLArrayAccess(object);
}
}
if (intIndex < 0 || (uint32_t)intIndex >= vectorSize) {
Diag(RHS->getExprLoc(),
diag::err_hlsl_vector_element_index_out_of_bounds)
<< (int)intIndex;
}
} else if (IsMatrixType(this, LHSQualType)) {
uint32_t rowCount, colCount;
GetHLSLMatRowColCount(LHSQualType, rowCount, colCount);
if (intIndex < 0 || (uint32_t)intIndex >= rowCount) {
Diag(RHS->getExprLoc(), diag::err_hlsl_matrix_row_index_out_of_bounds)
<< (int)intIndex;
}
}
}
}
clang::QualType ApplyTypeSpecSignToParsedType(clang::Sema *self,
clang::QualType &type,
clang::TypeSpecifierSign TSS,
clang::SourceLocation Loc) {
return HLSLExternalSource::FromSema(self)->ApplyTypeSpecSignToParsedType(
type, TSS, Loc);
}
QualType Sema::getHLSLDefaultSpecialization(TemplateDecl *Decl) {
if (Decl->getTemplateParameters()->getMinRequiredArguments() == 0) {
TemplateArgumentListInfo EmptyArgs;
EmptyArgs.setLAngleLoc(Decl->getSourceRange().getEnd());
EmptyArgs.setRAngleLoc(Decl->getSourceRange().getEnd());
return CheckTemplateIdType(TemplateName(Decl),
Decl->getSourceRange().getEnd(), EmptyArgs);
}
return QualType();
}
static bool isRelatedDeclMarkedNointerpolation(Expr *E) {
if (!E)
return false;
E = E->IgnoreCasts();
if (auto *DRE = dyn_cast<DeclRefExpr>(E))
return DRE->getDecl()->hasAttr<HLSLNoInterpolationAttr>();
if (auto *ME = dyn_cast<MemberExpr>(E))
return ME->getMemberDecl()->hasAttr<HLSLNoInterpolationAttr>() ||
isRelatedDeclMarkedNointerpolation(ME->getBase());
if (auto *HVE = dyn_cast<HLSLVectorElementExpr>(E))
return isRelatedDeclMarkedNointerpolation(HVE->getBase());
if (auto *ASE = dyn_cast<ArraySubscriptExpr>(E))
return isRelatedDeclMarkedNointerpolation(ASE->getBase());
return false;
}
static bool CheckIntrinsicGetAttributeAtVertex(Sema *S, FunctionDecl *FDecl,
CallExpr *TheCall) {
assert(TheCall->getNumArgs() > 0);
auto argument = TheCall->getArg(0)->IgnoreCasts();
if (!isRelatedDeclMarkedNointerpolation(argument)) {
S->Diag(argument->getExprLoc(), diag::err_hlsl_parameter_requires_attribute)
<< 0 << FDecl->getName() << "nointerpolation";
return true;
}
return false;
}
bool Sema::CheckHLSLIntrinsicCall(FunctionDecl *FDecl, CallExpr *TheCall) {
auto attr = FDecl->getAttr<HLSLIntrinsicAttr>();
switch (hlsl::IntrinsicOp(attr->getOpcode())) {
case hlsl::IntrinsicOp::IOP_GetAttributeAtVertex:
// See #hlsl-specs/issues/181. Feature is broken. For SPIR-V we want
// to limit the scope, and fail gracefully in some cases.
if (!getLangOpts().SPIRV)
return false;
// This should never happen for SPIR-V. But on the DXIL side, extension can
// be added by inserting new intrinsics, meaning opcodes can collide with
// existing ones. See the ExtensionTest.EvalAttributeCollision test.
assert(FDecl->getName() == "GetAttributeAtVertex");
return CheckIntrinsicGetAttributeAtVertex(this, FDecl, TheCall);
default:
break;
}
return false;
}
bool Sema::CheckHLSLFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
if (hlsl::IsIntrinsicOp(FDecl) && CheckHLSLIntrinsicCall(FDecl, TheCall))
return true;
// See #hlsl-specs/issues/181. Feature is broken. For SPIR-V we want
// to limit the scope, and fail gracefully in some cases.
if (!getLangOpts().SPIRV)
return false;
bool error = false;
for (unsigned i = 0; i < FDecl->getNumParams(); i++) {
assert(i < TheCall->getNumArgs());
if (!FDecl->getParamDecl(i)->hasAttr<HLSLNoInterpolationAttr>())
continue;
if (!isRelatedDeclMarkedNointerpolation(TheCall->getArg(i))) {
Diag(TheCall->getArg(i)->getExprLoc(),
diag::err_hlsl_parameter_requires_attribute)
<< i << FDecl->getName() << "nointerpolation";
error = true;
}
}
return error;
}
namespace hlsl {
static bool nodeInputIsCompatible(DXIL::NodeIOKind IOType,
DXIL::NodeLaunchType launchType) {
switch (IOType) {
case DXIL::NodeIOKind::DispatchNodeInputRecord:
case DXIL::NodeIOKind::RWDispatchNodeInputRecord:
return launchType == DXIL::NodeLaunchType::Broadcasting;
case DXIL::NodeIOKind::GroupNodeInputRecords:
case DXIL::NodeIOKind::RWGroupNodeInputRecords:
case DXIL::NodeIOKind::EmptyInput:
return launchType == DXIL::NodeLaunchType::Coalescing;
case DXIL::NodeIOKind::ThreadNodeInputRecord:
case DXIL::NodeIOKind::RWThreadNodeInputRecord:
return launchType == DXIL::NodeLaunchType::Thread;
default:
return false;
}
}
// Diagnose input node record to make sure it has exactly one SV_DispatchGrid
// semantics. Recursivelly walk all fields on the record and all of its base
// classes/structs
void DiagnoseDispatchGridSemantics(Sema &S, RecordDecl *InputRecordDecl,
SourceLocation NodeRecordLoc,
SourceLocation &DispatchGridLoc,
bool &Found) {
if (auto *CXXInputRecordDecl = dyn_cast<CXXRecordDecl>(InputRecordDecl)) {
// Walk up the inheritance chain and check all fields on base classes
for (auto &B : CXXInputRecordDecl->bases()) {
const RecordType *BaseStructType = B.getType()->getAsStructureType();
if (nullptr != BaseStructType) {
CXXRecordDecl *BaseTypeDecl =
dyn_cast<CXXRecordDecl>(BaseStructType->getDecl());
if (nullptr != BaseTypeDecl) {
DiagnoseDispatchGridSemantics(S, BaseTypeDecl, NodeRecordLoc,
DispatchGridLoc, Found);
}
}
}
}
// Iterate over fields of the current struct
for (FieldDecl *FD : InputRecordDecl->fields()) {
// Check if any of the fields have SV_DispatchGrid annotation
for (const hlsl::UnusualAnnotation *it : FD->getUnusualAnnotations()) {
if (it->getKind() == hlsl::UnusualAnnotation::UA_SemanticDecl) {
const hlsl::SemanticDecl *sd = cast<hlsl::SemanticDecl>(it);
if (sd->SemanticName.equals("SV_DispatchGrid")) {
if (!Found) {
Found = true;
QualType Ty = FD->getType();
QualType ElTy = Ty;
unsigned NumElt = 1;
if (hlsl::IsVectorType(&S, Ty)) {
NumElt = hlsl::GetElementCount(Ty);
ElTy = hlsl::GetHLSLVecElementType(Ty);
} else if (const ArrayType *AT = Ty->getAsArrayTypeUnsafe()) {
if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
NumElt = CAT->getSize().getZExtValue();
ElTy = AT->getElementType();
}
}
ElTy = ElTy.getDesugaredType(S.getASTContext());
if (NumElt > 3 || (ElTy != S.getASTContext().UnsignedIntTy &&
ElTy != S.getASTContext().UnsignedShortTy)) {
S.Diags.Report(
it->Loc,
diag::err_hlsl_incompatible_dispatchgrid_semantic_type)
<< Ty;
S.Diags.Report(NodeRecordLoc, diag::note_defined_here)
<< "NodeInput/Output record";
}
DispatchGridLoc = it->Loc;
} else {
// There should be just one SV_DispatchGrid in per record struct
S.Diags.Report(
it->Loc,
diag::err_hlsl_dispatchgrid_semantic_already_specified);
S.Diags.Report(DispatchGridLoc, diag::note_defined_here)
<< "other SV_DispatchGrid";
}
break;
}
}
}
// Check nested structs
const RecordType *FieldTypeAsStruct = FD->getType()->getAsStructureType();
if (nullptr != FieldTypeAsStruct) {
CXXRecordDecl *FieldTypeDecl =
dyn_cast<CXXRecordDecl>(FieldTypeAsStruct->getDecl());
if (nullptr != FieldTypeDecl) {
DiagnoseDispatchGridSemantics(S, FieldTypeDecl, NodeRecordLoc,
DispatchGridLoc, Found);
}
}
}
}
void DiagnoseDispatchGridSemantics(Sema &S, RecordDecl *NodeRecordStruct,
SourceLocation NodeRecordLoc, bool &Found) {
SourceLocation DispatchGridLoc;
DiagnoseDispatchGridSemantics(S, NodeRecordStruct, NodeRecordLoc,
DispatchGridLoc, Found);
}
void DiagnoseAmplificationEntry(Sema &S, FunctionDecl *FD,
llvm::StringRef StageName) {
if (!(FD->getAttr<HLSLNumThreadsAttr>()))
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "numthreads";
return;
}
void DiagnoseVertexEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName) {
for (auto *annotation : FD->getUnusualAnnotations()) {
if (auto *sema = dyn_cast<hlsl::SemanticDecl>(annotation)) {
if (sema->SemanticName.equals_lower("POSITION") ||
sema->SemanticName.equals_lower("POSITION0")) {
S.Diags.Report(FD->getLocation(),
diag::warn_hlsl_semantic_attribute_position_misuse_hint)
<< sema->SemanticName;
}
}
}
return;
}
void DiagnoseMeshEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName) {
if (!(FD->getAttr<HLSLNumThreadsAttr>()))
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "numthreads";
if (!(FD->getAttr<HLSLOutputTopologyAttr>()))
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "outputtopology";
return;
}
void DiagnoseDomainEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName) {
for (const auto *param : FD->params()) {
if (!hlsl::IsHLSLOutputPatchType(param->getType()))
continue;
if (hlsl::GetHLSLInputPatchCount(param->getType()) > 0)
continue;
S.Diags.Report(param->getLocation(), diag::err_hlsl_outputpatch_size);
}
return;
}
void DiagnoseHullEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName) {
HLSLPatchConstantFuncAttr *Attr = FD->getAttr<HLSLPatchConstantFuncAttr>();
if (!Attr)
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "patchconstantfunc";
if (!(FD->getAttr<HLSLOutputTopologyAttr>()))
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "outputtopology";
if (!(FD->getAttr<HLSLOutputControlPointsAttr>()))
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "outputcontrolpoints";
for (const auto *param : FD->params()) {
if (!hlsl::IsHLSLInputPatchType(param->getType()))
continue;
if (hlsl::GetHLSLInputPatchCount(param->getType()) > 0)
continue;
S.Diags.Report(param->getLocation(), diag::err_hlsl_inputpatch_size);
}
return;
}
void DiagnoseGeometryEntry(Sema &S, FunctionDecl *FD,
llvm::StringRef StageName) {
if (!(FD->getAttr<HLSLMaxVertexCountAttr>()))
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "maxvertexcount";
return;
}
void DiagnoseComputeEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName,
bool isActiveEntry) {
if (isActiveEntry) {
if (!(FD->getAttr<HLSLNumThreadsAttr>()))
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_attr)
<< StageName << "numthreads";
if (auto WaveSizeAttr = FD->getAttr<HLSLWaveSizeAttr>()) {
std::string profile = S.getLangOpts().HLSLProfile;
const ShaderModel *SM = hlsl::ShaderModel::GetByName(profile.c_str());
if (!SM->IsSM66Plus()) {
S.Diags.Report(WaveSizeAttr->getRange().getBegin(),
diag::err_hlsl_attribute_in_wrong_shader_model)
<< "wavesize"
<< "6.6";
}
if (!SM->IsSM68Plus() && WaveSizeAttr->getSpelledArgsCount() > 1)
S.Diags.Report(WaveSizeAttr->getRange().getBegin(),
diag::err_hlsl_wavesize_insufficient_shader_model)
<< "wavesize" << 1;
}
}
}
void DiagnoseNodeEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName,
bool isActiveEntry) {
SourceLocation NodeLoc = SourceLocation();
SourceLocation NodeLaunchLoc = SourceLocation();
DXIL::NodeLaunchType NodeLaunchTy = DXIL::NodeLaunchType::Invalid;
unsigned InputCount = 0;
auto pAttr = FD->getAttr<HLSLShaderAttr>();
DXIL::ShaderKind shaderKind = ShaderModel::KindFromFullName(StageName);
if (shaderKind == DXIL::ShaderKind::Node) {
NodeLoc = pAttr->getLocation();
}
if (NodeLoc.isInvalid()) {
return;
}
// save NodeLaunch type for use later
if (auto NodeLaunchAttr = FD->getAttr<HLSLNodeLaunchAttr>()) {
NodeLaunchTy =
ShaderModel::NodeLaunchTypeFromName(NodeLaunchAttr->getLaunchType());
NodeLaunchLoc = NodeLaunchAttr->getLocation();
} else {
NodeLaunchTy = DXIL::NodeLaunchType::Broadcasting;
NodeLaunchLoc = SourceLocation();
}
// Check that if a Thread launch node has the NumThreads attribute the
// thread group size is (1,1,1)
if (NodeLaunchTy == DXIL::NodeLaunchType::Thread) {
if (auto NumThreads = FD->getAttr<HLSLNumThreadsAttr>()) {
if (NumThreads->getX() != 1 || NumThreads->getY() != 1 ||
NumThreads->getZ() != 1) {
S.Diags.Report(NumThreads->getLocation(),
diag::err_hlsl_wg_thread_launch_group_size)
<< NumThreads->getRange();
// Only output the note if the source location is valid
if (NodeLaunchLoc.isValid())
S.Diags.Report(NodeLaunchLoc, diag::note_defined_here)
<< "Launch type";
}
}
} else if (!FD->hasAttr<HLSLNumThreadsAttr>()) {
// All other launch types require the NumThreads attribute.
S.Diags.Report(FD->getLocation(), diag::err_hlsl_missing_node_attr)
<< FD->getName() << ShaderModel::GetNodeLaunchTypeName(NodeLaunchTy)
<< "numthreads";
}
if (isActiveEntry) {
if (auto WaveSizeAttr = FD->getAttr<HLSLWaveSizeAttr>()) {
std::string profile = S.getLangOpts().HLSLProfile;
const ShaderModel *SM = hlsl::ShaderModel::GetByName(profile.c_str());
if (!SM->IsSM66Plus()) {
S.Diags.Report(WaveSizeAttr->getRange().getBegin(),
diag::err_hlsl_attribute_in_wrong_shader_model)
<< "wavesize"
<< "6.6";
}
}
}
auto *NodeDG = FD->getAttr<HLSLNodeDispatchGridAttr>();
auto *NodeMDG = FD->getAttr<HLSLNodeMaxDispatchGridAttr>();
if (NodeLaunchTy != DXIL::NodeLaunchType::Broadcasting) {
// NodeDispatchGrid is only valid for Broadcasting nodes
if (NodeDG) {
S.Diags.Report(NodeDG->getLocation(), diag::err_hlsl_launch_type_attr)
<< NodeDG->getSpelling()
<< ShaderModel::GetNodeLaunchTypeName(
DXIL::NodeLaunchType::Broadcasting)
<< NodeDG->getRange();
// Only output the note if the source location is valid
if (NodeLaunchLoc.isValid())
S.Diags.Report(NodeLaunchLoc, diag::note_defined_here) << "Launch type";
}
// NodeMaxDispatchGrid is only valid for Broadcasting nodes
if (NodeMDG) {
S.Diags.Report(NodeMDG->getLocation(), diag::err_hlsl_launch_type_attr)
<< NodeMDG->getSpelling()
<< ShaderModel::GetNodeLaunchTypeName(
DXIL::NodeLaunchType::Broadcasting)
<< NodeMDG->getRange();
// Only output the note if the source location is valid
if (NodeLaunchLoc.isValid())
S.Diags.Report(NodeLaunchLoc, diag::note_defined_here) << "Launch type";
}
} else {
// A Broadcasting node must have one of NodeDispatchGrid or
// NodeMaxDispatchGrid
if (!NodeMDG && !NodeDG)
S.Diags.Report(FD->getLocation(),
diag::err_hlsl_missing_dispatchgrid_attr)
<< FD->getName();
// NodeDispatchGrid and NodeMaxDispatchGrid may not be used together
if (NodeMDG && NodeDG) {
S.Diags.Report(NodeMDG->getLocation(),
diag::err_hlsl_incompatible_node_attr)
<< FD->getName() << NodeMDG->getSpelling() << NodeDG->getSpelling()
<< NodeMDG->getRange();
S.Diags.Report(NodeDG->getLocation(), diag::note_defined_here)
<< NodeDG->getSpelling();
}
// Diagnose dispatch grid semantics.
bool Found = false;
for (ParmVarDecl *PD : FD->params()) {
QualType ParamType = PD->getType().getCanonicalType();
// Find parameter that is the node input record
if (hlsl::IsHLSLNodeInputType(ParamType)) {
// Node records are template types
if (RecordDecl *NodeStructDecl =
hlsl::GetRecordDeclFromNodeObjectType(ParamType)) {
// Diagnose any SV_DispatchGrid semantics used in record.
DiagnoseDispatchGridSemantics(S, NodeStructDecl, PD->getLocation(),
Found);
}
}
}
// Node with NodeMaxDispatchGrid must have SV_DispatchGrid semantic.
if (NodeMDG && !Found) {
S.Diags.Report(FD->getLocation(),
diag::err_hlsl_missing_dispatchgrid_semantic)
<< FD->getName();
}
}
// Dignose node output.
for (ParmVarDecl *PD : FD->params()) {
QualType ParamType = PD->getType().getCanonicalType();
// Find parameter that is the node input record
if (hlsl::IsHLSLNodeOutputType(ParamType)) {
// Node records are template types
if (RecordDecl *NodeStructDecl =
hlsl::GetRecordDeclFromNodeObjectType(ParamType)) {
// Diagnose any SV_DispatchGrid semantics used in record.
bool OutputFound = false;
DiagnoseDispatchGridSemantics(S, NodeStructDecl, PD->getLocation(),
OutputFound);
}
}
}
if (!FD->getReturnType()->isVoidType())
S.Diag(FD->getLocation(), diag::err_shader_must_return_void) << StageName;
// Check parameter constraints
for (unsigned Idx = 0; Idx < FD->getNumParams(); ++Idx) {
ParmVarDecl *Param = FD->getParamDecl(Idx);
clang::QualType ParamTy = Param->getType();
auto *MaxRecordsAttr = Param->getAttr<HLSLMaxRecordsAttr>();
auto *MaxRecordsSharedWithAttr =
Param->getAttr<HLSLMaxRecordsSharedWithAttr>();
auto *AllowSparseNodesAttr = Param->getAttr<HLSLAllowSparseNodesAttr>();
auto *NodeArraySizeAttr = Param->getAttr<HLSLNodeArraySizeAttr>();
auto *UnboundedSparseNodesAttr =
Param->getAttr<HLSLUnboundedSparseNodesAttr>();
// Check any node input is compatible with the node launch type
if (hlsl::IsHLSLNodeInputType(ParamTy)) {
InputCount++;
if (NodeLaunchTy != DXIL::NodeLaunchType::Invalid &&
!nodeInputIsCompatible(GetNodeIOType(Param->getType()),
NodeLaunchTy)) {
const RecordType *RT = Param->getType()->getAs<RecordType>();
S.Diags.Report(Param->getLocation(), diag::err_hlsl_wg_input_kind)
<< RT->getDecl()->getName()
<< ShaderModel::GetNodeLaunchTypeName(NodeLaunchTy)
<< (static_cast<unsigned>(NodeLaunchTy) - 1)
<< Param->getSourceRange();
if (NodeLaunchLoc.isValid())
S.Diags.Report(NodeLaunchLoc, diag::note_defined_here)
<< "Launch type";
}
if (InputCount > 1)
S.Diags.Report(Param->getLocation(),
diag::err_hlsl_too_many_node_inputs)
<< FD->getName() << Param->getSourceRange();
if (MaxRecordsAttr && NodeLaunchTy != DXIL::NodeLaunchType::Coalescing) {
S.Diags.Report(MaxRecordsAttr->getLocation(),
diag::err_hlsl_maxrecord_on_wrong_launch)
<< MaxRecordsAttr->getRange();
}
} else if (hlsl::IsHLSLNodeOutputType(ParamTy)) {
// If node output is not an array, diagnose array only attributes
if (((uint32_t)GetNodeIOType(ParamTy) &
(uint32_t)DXIL::NodeIOFlags::NodeArray) == 0) {
Attr *ArrayAttrs[] = {NodeArraySizeAttr, UnboundedSparseNodesAttr};
for (auto *A : ArrayAttrs) {
if (A) {
S.Diags.Report(A->getLocation(),
diag::err_hlsl_wg_attr_only_on_output_array)
<< A << A->getRange();
}
}
}
} else {
if (MaxRecordsAttr) {
S.Diags.Report(MaxRecordsAttr->getLocation(),
diag::err_hlsl_wg_attr_only_on_output_or_input_record)
<< MaxRecordsAttr << MaxRecordsAttr->getRange();
}
}
if (!hlsl::IsHLSLNodeOutputType(ParamTy)) {
Attr *OutputOnly[] = {MaxRecordsSharedWithAttr, AllowSparseNodesAttr,
NodeArraySizeAttr, UnboundedSparseNodesAttr};
for (auto *A : OutputOnly) {
if (A) {
S.Diags.Report(A->getLocation(),
diag::err_hlsl_wg_attr_only_on_output)
<< A << A->getRange();
}
}
}
if (UnboundedSparseNodesAttr && NodeArraySizeAttr &&
NodeArraySizeAttr->getCount() != -1) {
S.Diags.Report(NodeArraySizeAttr->getLocation(),
diag::err_hlsl_wg_nodearraysize_conflict_unbounded)
<< NodeArraySizeAttr->getCount() << NodeArraySizeAttr->getRange();
S.Diags.Report(UnboundedSparseNodesAttr->getLocation(),
diag::note_conflicting_attribute)
<< UnboundedSparseNodesAttr->getRange();
}
// arrays of NodeOutput or EmptyNodeOutput are not supported as node
// parameters
if (ParamTy->isArrayType()) {
const ArrayType *AT = dyn_cast<ArrayType>(ParamTy);
DXIL::NodeIOKind Kind = GetNodeIOType(AT->getElementType());
if (Kind != DXIL::NodeIOKind::Invalid) {
Param->setInvalidDecl();
S.Diags.Report(Param->getLocation(), diag::err_hlsl_array_disallowed)
<< ParamTy << /*entry parameter*/ 0;
if (Kind == DXIL::NodeIOKind::NodeOutput ||
Kind == DXIL::NodeIOKind::EmptyOutput)
S.Diags.Report(Param->getLocation(), diag::note_hlsl_node_array)
<< HLSLNodeObjectAttr::ConvertRecordTypeToStr(Kind);
}
}
HLSLMaxRecordsSharedWithAttr *ExistingMRSWA =
Param->getAttr<HLSLMaxRecordsSharedWithAttr>();
if (ExistingMRSWA) {
StringRef sharedName = ExistingMRSWA->getName()->getName();
bool Found = false;
for (const ParmVarDecl *ParamDecl : FD->params()) {
// validation that MRSW doesn't reference its own parameter is
// already done at
// SemaHLSL.cpp:ValidateMaxRecordsSharedWithAttributes so we don't
// need to check that we are on the same argument.
if (ParamDecl->getName() == sharedName) {
// now we need to check that this parameter has an output record type.
hlsl::NodeFlags nodeFlags;
if (GetHLSLNodeIORecordType(ParamDecl, nodeFlags)) {
hlsl::NodeIOProperties node(nodeFlags);
if (node.Flags.IsOutputNode()) {
Found = true;
break;
}
}
}
}
if (!Found) {
S.Diag(ExistingMRSWA->getLocation(),
diag::err_hlsl_maxrecordssharedwith_references_invalid_arg);
}
}
// Make sure NodeTrackRWInputSharing attribute cannot be applied to
// Input Records that are not RWDispatchNodeInputRecord
if (hlsl::IsHLSLNodeInputType(ParamTy)) {
hlsl::NodeFlags nodeFlags;
if (GetHLSLNodeIORecordType(Param, nodeFlags)) {
hlsl::NodeIOProperties node(nodeFlags);
// determine if the NodeTrackRWInputSharing is an attribute on the
// template type
clang::RecordDecl *RD = hlsl::GetRecordDeclFromNodeObjectType(ParamTy);
if (RD) {
// Emit a diagnostic if the record is not RWDispatchNode and
// if it has the NodeTrackRWInputSharing attribute
if (RD->hasAttr<HLSLNodeTrackRWInputSharingAttr>() &&
node.Flags.GetNodeIOKind() !=
DXIL::NodeIOKind::RWDispatchNodeInputRecord) {
S.Diags.Report(Param->getLocation(),
diag::err_hlsl_wg_nodetrackrwinputsharing_invalid);
}
}
}
}
}
DiagnoseSVForLaunchType(FD, NodeLaunchTy, S.Diags);
return;
}
// if this is the Entry FD, then try adding the target profile
// shader attribute to the FD and carry on with validation
void TryAddShaderAttrFromTargetProfile(Sema &S, FunctionDecl *FD,
bool &isActiveEntry) {
// When isActiveEntry is true and this function is an entry point, this entry
// point is used in compilation. This is an important distinction when
// diagnosing certain types of errors based on the compilation parameters. For
// example, if isActiveEntry is false, diagnostics dependent on the shader
// model should not be performed. That way we won't raise an error about a
// feature used by the inactive entry that's not available in the current
// shader model. Since that entry point is not used, it may still be valid in
// another compilation where a different shader model is specified.
isActiveEntry = false;
const std::string &EntryPointName = S.getLangOpts().HLSLEntryFunction;
// if there's no defined entry point, just return
if (EntryPointName.empty()) {
return;
}
// if this FD isn't the entry point, then we shouldn't add
// a shader attribute to this decl, so just return
if (!FD->getIdentifier() ||
EntryPointName != FD->getIdentifier()->getName()) {
return;
}
isActiveEntry = true;
std::string profile = S.getLangOpts().HLSLProfile;
const ShaderModel *SM = hlsl::ShaderModel::GetByName(profile.c_str());
const llvm::StringRef fullName = ShaderModel::FullNameFromKind(SM->GetKind());
// don't add the attribute for an invalid profile, like library
if (fullName.empty()) {
llvm_unreachable("invalid shader kind");
}
// At this point, we've found the active entry, so we'll take a note of that
// and try to add the shader attr.
isActiveEntry = true;
HLSLShaderAttr *currentShaderAttr = FD->getAttr<HLSLShaderAttr>();
// Don't add the attribute if it already exists as an attribute on the decl,
// and emit an error.
if (currentShaderAttr) {
llvm::StringRef currentFullName = currentShaderAttr->getStage();
if (currentFullName != fullName) {
S.Diag(currentShaderAttr->getLocation(),
diag::err_hlsl_profile_conflicts_with_shader_attribute)
<< fullName << profile << currentFullName << EntryPointName;
}
// Don't add another attr if one exists, to prevent
// more unrelated errors down the line.
return;
}
HLSLShaderAttr *pShaderAttr =
HLSLShaderAttr::CreateImplicit(S.Context, fullName);
FD->addAttr(pShaderAttr);
return;
}
// The compiler should emit a warning when an entry-point-only attribute
// is detected without the presence of a shader attribute,
// to prevent reliance on deprecated behavior
// (where the compiler would infer a specific shader kind based on
// a present entry-point-only attribute).
void WarnOnEntryAttrWithoutShaderAttr(Sema &S, FunctionDecl *FD) {
if (!FD->hasAttrs())
return;
for (Attr *A : FD->getAttrs()) {
switch (A->getKind()) {
// Entry-Function-only attributes
case clang::attr::HLSLClipPlanes:
case clang::attr::HLSLDomain:
case clang::attr::HLSLEarlyDepthStencil:
case clang::attr::HLSLInstance:
case clang::attr::HLSLMaxTessFactor:
case clang::attr::HLSLNumThreads:
case clang::attr::HLSLRootSignature:
case clang::attr::HLSLOutputControlPoints:
case clang::attr::HLSLOutputTopology:
case clang::attr::HLSLPartitioning:
case clang::attr::HLSLPatchConstantFunc:
case clang::attr::HLSLMaxVertexCount:
case clang::attr::HLSLWaveSize:
case clang::attr::HLSLNodeLaunch:
case clang::attr::HLSLNodeIsProgramEntry:
case clang::attr::HLSLNodeId:
case clang::attr::HLSLNodeLocalRootArgumentsTableIndex:
case clang::attr::HLSLNodeShareInputOf:
case clang::attr::HLSLNodeDispatchGrid:
case clang::attr::HLSLNodeMaxDispatchGrid:
case clang::attr::HLSLNodeMaxRecursionDepth:
S.Diag(A->getLocation(),
diag::warn_hlsl_entry_attribute_without_shader_attribute)
<< A->getSpelling();
break;
}
}
return;
}
// The DiagnoseEntry function does 2 things:
// 1. Determine whether this function is the current entry point for a
// non-library compilation, add an implicit shader attribute if so.
// 2. For an entry point function, now identified by the shader attribute,
// diagnose entry point constraints:
// a. Diagnose whether or not all entry point attributes on the decl are
// allowed on the entry point type (ShaderKind) at all.
// b. Diagnose the full entry point decl for required attributes, constraints
// on or between attributes and parameters, and more.
void DiagnoseEntry(Sema &S, FunctionDecl *FD) {
bool isActiveEntry = false;
if (S.getLangOpts().IsHLSLLibrary) {
// TODO: Analyze -exports option to determine which entries
// are active for lib target.
// For now, assume all entries are active.
isActiveEntry = true;
} else {
TryAddShaderAttrFromTargetProfile(S, FD, isActiveEntry);
}
HLSLShaderAttr *shaderAttr = FD->getAttr<HLSLShaderAttr>();
if (!shaderAttr) {
if (S.getLangOpts().IsHLSLLibrary)
WarnOnEntryAttrWithoutShaderAttr(S, FD);
return;
}
DXIL::ShaderKind Stage =
ShaderModel::KindFromFullName(shaderAttr->getStage());
llvm::StringRef StageName = shaderAttr->getStage();
DiagnoseEntryAttrAllowedOnStage(&S, FD, Stage);
switch (Stage) {
case DXIL::ShaderKind::Vertex:
return DiagnoseVertexEntry(S, FD, StageName);
case DXIL::ShaderKind::Pixel:
case DXIL::ShaderKind::Library:
case DXIL::ShaderKind::Invalid:
return;
case DXIL::ShaderKind::Amplification: {
return DiagnoseAmplificationEntry(S, FD, StageName);
}
case DXIL::ShaderKind::Mesh: {
return DiagnoseMeshEntry(S, FD, StageName);
}
case DXIL::ShaderKind::Domain:
return DiagnoseDomainEntry(S, FD, StageName);
case DXIL::ShaderKind::Hull: {
return DiagnoseHullEntry(S, FD, StageName);
}
case DXIL::ShaderKind::Geometry: {
return DiagnoseGeometryEntry(S, FD, StageName);
}
case DXIL::ShaderKind::Callable: {
return DiagnoseCallableEntry(S, FD, StageName);
}
case DXIL::ShaderKind::Miss:
case DXIL::ShaderKind::AnyHit: {
return DiagnoseMissOrAnyHitEntry(S, FD, StageName, Stage);
}
case DXIL::ShaderKind::RayGeneration:
case DXIL::ShaderKind::Intersection: {
return DiagnoseRayGenerationOrIntersectionEntry(S, FD, StageName);
}
case DXIL::ShaderKind::ClosestHit: {
return DiagnoseClosestHitEntry(S, FD, StageName);
}
case DXIL::ShaderKind::Compute: {
return DiagnoseComputeEntry(S, FD, StageName, isActiveEntry);
}
case DXIL::ShaderKind::Node: {
// A compute shader may also be a node, so we check it here
return DiagnoseNodeEntry(S, FD, StageName, isActiveEntry);
}
}
}
} // namespace hlsl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.