
This is a major change on how we represent nested name qualifications in the AST. * The nested name specifier itself and how it's stored is changed. The prefixes for types are handled within the type hierarchy, which makes canonicalization for them super cheap, no memory allocation required. Also translating a type into nested name specifier form becomes a no-op. An identifier is stored as a DependentNameType. The nested name specifier gains a lightweight handle class, to be used instead of passing around pointers, which is similar to what is implemented for TemplateName. There is still one free bit available, and this handle can be used within a PointerUnion and PointerIntPair, which should keep bit-packing aficionados happy. * The ElaboratedType node is removed, all type nodes in which it could previously apply to can now store the elaborated keyword and name qualifier, tail allocating when present. * TagTypes can now point to the exact declaration found when producing these, as opposed to the previous situation of there only existing one TagType per entity. This increases the amount of type sugar retained, and can have several applications, for example in tracking module ownership, and other tools which care about source file origins, such as IWYU. These TagTypes are lazily allocated, in order to limit the increase in AST size. This patch offers a great performance benefit. It greatly improves compilation time for [stdexec](https://github.com/NVIDIA/stdexec). For one datapoint, for `test_on2.cpp` in that project, which is the slowest compiling test, this patch improves `-c` compilation time by about 7.2%, with the `-fsyntax-only` improvement being at ~12%. This has great results on compile-time-tracker as well:  This patch also further enables other optimziations in the future, and will reduce the performance impact of template specialization resugaring when that lands. It has some other miscelaneous drive-by fixes. About the review: Yes the patch is huge, sorry about that. Part of the reason is that I started by the nested name specifier part, before the ElaboratedType part, but that had a huge performance downside, as ElaboratedType is a big performance hog. I didn't have the steam to go back and change the patch after the fact. There is also a lot of internal API changes, and it made sense to remove ElaboratedType in one go, versus removing it from one type at a time, as that would present much more churn to the users. Also, the nested name specifier having a different API avoids missing changes related to how prefixes work now, which could make existing code compile but not work. How to review: The important changes are all in `clang/include/clang/AST` and `clang/lib/AST`, with also important changes in `clang/lib/Sema/TreeTransform.h`. The rest and bulk of the changes are mostly consequences of the changes in API. PS: TagType::getDecl is renamed to `getOriginalDecl` in this patch, just for easier to rebasing. I plan to rename it back after this lands. Fixes #136624 Fixes https://github.com/llvm/llvm-project/issues/43179 Fixes https://github.com/llvm/llvm-project/issues/68670 Fixes https://github.com/llvm/llvm-project/issues/92757
292 lines
9.7 KiB
C++
292 lines
9.7 KiB
C++
//===- ABIInfo.cpp --------------------------------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "ABIInfo.h"
|
|
#include "ABIInfoImpl.h"
|
|
|
|
using namespace clang;
|
|
using namespace clang::CodeGen;
|
|
|
|
// Pin the vtable to this file.
|
|
ABIInfo::~ABIInfo() = default;
|
|
|
|
CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); }
|
|
|
|
ASTContext &ABIInfo::getContext() const { return CGT.getContext(); }
|
|
|
|
llvm::LLVMContext &ABIInfo::getVMContext() const {
|
|
return CGT.getLLVMContext();
|
|
}
|
|
|
|
const llvm::DataLayout &ABIInfo::getDataLayout() const {
|
|
return CGT.getDataLayout();
|
|
}
|
|
|
|
const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); }
|
|
|
|
const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
|
|
return CGT.getCodeGenOpts();
|
|
}
|
|
|
|
bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
|
|
|
|
bool ABIInfo::isOHOSFamily() const {
|
|
return getTarget().getTriple().isOHOSFamily();
|
|
}
|
|
|
|
RValue ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
|
|
QualType Ty, AggValueSlot Slot) const {
|
|
return RValue::getIgnored();
|
|
}
|
|
|
|
bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
|
|
return false;
|
|
}
|
|
|
|
bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
|
|
uint64_t Members) const {
|
|
return false;
|
|
}
|
|
|
|
bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
|
|
// For compatibility with GCC, ignore empty bitfields in C++ mode.
|
|
return getContext().getLangOpts().CPlusPlus;
|
|
}
|
|
|
|
bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
|
|
uint64_t &Members) const {
|
|
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
|
|
uint64_t NElements = AT->getZExtSize();
|
|
if (NElements == 0)
|
|
return false;
|
|
if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
|
|
return false;
|
|
Members *= NElements;
|
|
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
|
|
const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
|
|
if (RD->hasFlexibleArrayMember())
|
|
return false;
|
|
|
|
Members = 0;
|
|
|
|
// If this is a C++ record, check the properties of the record such as
|
|
// bases and ABI specific restrictions
|
|
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
|
|
if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
|
|
return false;
|
|
|
|
for (const auto &I : CXXRD->bases()) {
|
|
// Ignore empty records.
|
|
if (isEmptyRecord(getContext(), I.getType(), true))
|
|
continue;
|
|
|
|
uint64_t FldMembers;
|
|
if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
|
|
return false;
|
|
|
|
Members += FldMembers;
|
|
}
|
|
}
|
|
|
|
for (const auto *FD : RD->fields()) {
|
|
// Ignore (non-zero arrays of) empty records.
|
|
QualType FT = FD->getType();
|
|
while (const ConstantArrayType *AT =
|
|
getContext().getAsConstantArrayType(FT)) {
|
|
if (AT->isZeroSize())
|
|
return false;
|
|
FT = AT->getElementType();
|
|
}
|
|
if (isEmptyRecord(getContext(), FT, true))
|
|
continue;
|
|
|
|
if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
|
|
FD->isZeroLengthBitField())
|
|
continue;
|
|
|
|
uint64_t FldMembers;
|
|
if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
|
|
return false;
|
|
|
|
Members = (RD->isUnion() ?
|
|
std::max(Members, FldMembers) : Members + FldMembers);
|
|
}
|
|
|
|
if (!Base)
|
|
return false;
|
|
|
|
// Ensure there is no padding.
|
|
if (getContext().getTypeSize(Base) * Members !=
|
|
getContext().getTypeSize(Ty))
|
|
return false;
|
|
} else {
|
|
Members = 1;
|
|
if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
|
|
Members = 2;
|
|
Ty = CT->getElementType();
|
|
}
|
|
|
|
// Most ABIs only support float, double, and some vector type widths.
|
|
if (!isHomogeneousAggregateBaseType(Ty))
|
|
return false;
|
|
|
|
// The base type must be the same for all members. Types that
|
|
// agree in both total size and mode (float vs. vector) are
|
|
// treated as being equivalent here.
|
|
const Type *TyPtr = Ty.getTypePtr();
|
|
if (!Base) {
|
|
Base = TyPtr;
|
|
// If it's a non-power-of-2 vector, its size is already a power-of-2,
|
|
// so make sure to widen it explicitly.
|
|
if (const VectorType *VT = Base->getAs<VectorType>()) {
|
|
QualType EltTy = VT->getElementType();
|
|
unsigned NumElements =
|
|
getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
|
|
Base = getContext()
|
|
.getVectorType(EltTy, NumElements, VT->getVectorKind())
|
|
.getTypePtr();
|
|
}
|
|
}
|
|
|
|
if (Base->isVectorType() != TyPtr->isVectorType() ||
|
|
getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
|
|
return false;
|
|
}
|
|
return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
|
|
}
|
|
|
|
bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
|
|
if (getContext().isPromotableIntegerType(Ty))
|
|
return true;
|
|
|
|
if (const auto *EIT = Ty->getAs<BitIntType>())
|
|
if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, unsigned AddrSpace,
|
|
bool ByVal, bool Realign,
|
|
llvm::Type *Padding) const {
|
|
return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
|
|
AddrSpace, ByVal, Realign, Padding);
|
|
}
|
|
|
|
ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
|
|
bool Realign) const {
|
|
return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
|
|
/*ByVal*/ false, Realign);
|
|
}
|
|
|
|
void ABIInfo::appendAttributeMangling(TargetAttr *Attr,
|
|
raw_ostream &Out) const {
|
|
if (Attr->isDefaultVersion())
|
|
return;
|
|
appendAttributeMangling(Attr->getFeaturesStr(), Out);
|
|
}
|
|
|
|
void ABIInfo::appendAttributeMangling(TargetVersionAttr *Attr,
|
|
raw_ostream &Out) const {
|
|
appendAttributeMangling(Attr->getNamesStr(), Out);
|
|
}
|
|
|
|
void ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
|
|
raw_ostream &Out) const {
|
|
appendAttributeMangling(Attr->getFeatureStr(Index), Out);
|
|
Out << '.' << Attr->getMangledIndex(Index);
|
|
}
|
|
|
|
void ABIInfo::appendAttributeMangling(StringRef AttrStr,
|
|
raw_ostream &Out) const {
|
|
if (AttrStr == "default") {
|
|
Out << ".default";
|
|
return;
|
|
}
|
|
|
|
Out << '.';
|
|
const TargetInfo &TI = CGT.getTarget();
|
|
ParsedTargetAttr Info = TI.parseTargetAttr(AttrStr);
|
|
|
|
llvm::sort(Info.Features, [&TI](StringRef LHS, StringRef RHS) {
|
|
// Multiversioning doesn't allow "no-${feature}", so we can
|
|
// only have "+" prefixes here.
|
|
assert(LHS.starts_with("+") && RHS.starts_with("+") &&
|
|
"Features should always have a prefix.");
|
|
return TI.getFMVPriority({LHS.substr(1)})
|
|
.ugt(TI.getFMVPriority({RHS.substr(1)}));
|
|
});
|
|
|
|
bool IsFirst = true;
|
|
if (!Info.CPU.empty()) {
|
|
IsFirst = false;
|
|
Out << "arch_" << Info.CPU;
|
|
}
|
|
|
|
for (StringRef Feat : Info.Features) {
|
|
if (!IsFirst)
|
|
Out << '_';
|
|
IsFirst = false;
|
|
Out << Feat.substr(1);
|
|
}
|
|
}
|
|
|
|
llvm::FixedVectorType *
|
|
ABIInfo::getOptimalVectorMemoryType(llvm::FixedVectorType *T,
|
|
const LangOptions &Opt) const {
|
|
if (T->getNumElements() == 3 && !Opt.PreserveVec3Type)
|
|
return llvm::FixedVectorType::get(T->getElementType(), 4);
|
|
return T;
|
|
}
|
|
|
|
// Pin the vtable to this file.
|
|
SwiftABIInfo::~SwiftABIInfo() = default;
|
|
|
|
/// Does the given lowering require more than the given number of
|
|
/// registers when expanded?
|
|
///
|
|
/// This is intended to be the basis of a reasonable basic implementation
|
|
/// of should{Pass,Return}Indirectly.
|
|
///
|
|
/// For most targets, a limit of four total registers is reasonable; this
|
|
/// limits the amount of code required in order to move around the value
|
|
/// in case it wasn't produced immediately prior to the call by the caller
|
|
/// (or wasn't produced in exactly the right registers) or isn't used
|
|
/// immediately within the callee. But some targets may need to further
|
|
/// limit the register count due to an inability to support that many
|
|
/// return registers.
|
|
bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
|
|
unsigned maxAllRegisters) const {
|
|
unsigned intCount = 0, fpCount = 0;
|
|
for (llvm::Type *type : scalarTypes) {
|
|
if (type->isPointerTy()) {
|
|
intCount++;
|
|
} else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
|
|
auto ptrWidth = CGT.getTarget().getPointerWidth(LangAS::Default);
|
|
intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
|
|
} else {
|
|
assert(type->isVectorTy() || type->isFloatingPointTy());
|
|
fpCount++;
|
|
}
|
|
}
|
|
|
|
return (intCount + fpCount > maxAllRegisters);
|
|
}
|
|
|
|
bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
|
|
bool AsReturnValue) const {
|
|
return occupiesMoreThan(ComponentTys, /*total=*/4);
|
|
}
|
|
|
|
bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
|
|
unsigned NumElts) const {
|
|
// The default implementation of this assumes that the target guarantees
|
|
// 128-bit SIMD support but nothing more.
|
|
return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16);
|
|
}
|