Rename "Expand" to "ExpandCustom"
This commit is contained in:
parent
d05704bce4
commit
9cdf588d22
@ -263,13 +263,12 @@ public:
|
||||
// greater atomic guarantees than a normal load.
|
||||
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
|
||||
MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
|
||||
XChg, // Expand a store too large to be atomic into a xchg, then re-process
|
||||
// it.
|
||||
BitTestIntrinsic, // Use a target-specific intrinsic for special bit
|
||||
// operations; used by X86.
|
||||
CmpArithIntrinsic, // Use a target-specific intrinsic for special compare
|
||||
// operations; used by X86.
|
||||
Expand, // Generic expansion in terms of other atomic operations.
|
||||
CustomExpand, // Custom target-specific expansion using TLI hooks.
|
||||
|
||||
// Rewrite to a non-atomic form for use in a known non-preemptible
|
||||
// environment.
|
||||
@ -2391,8 +2390,8 @@ public:
|
||||
}
|
||||
|
||||
/// Returns how the given (atomic) store should be expanded by the IR-level
|
||||
/// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
|
||||
/// to use an atomicrmw xchg.
|
||||
/// AtomicExpand pass into. For instance AtomicExpansionKind::CustomExpand
|
||||
/// will try to use an atomicrmw xchg.
|
||||
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
return AtomicExpansionKind::None;
|
||||
}
|
||||
|
@ -537,7 +537,7 @@ bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
|
||||
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
|
||||
LI->setAtomic(AtomicOrdering::NotAtomic);
|
||||
return true;
|
||||
case TargetLoweringBase::AtomicExpansionKind::Expand:
|
||||
case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
|
||||
TLI->emitExpandAtomicLoad(LI);
|
||||
return true;
|
||||
default:
|
||||
@ -549,10 +549,10 @@ bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
|
||||
switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
|
||||
case TargetLoweringBase::AtomicExpansionKind::None:
|
||||
return false;
|
||||
case TargetLoweringBase::AtomicExpansionKind::Expand:
|
||||
case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
|
||||
TLI->emitExpandAtomicStore(SI);
|
||||
return true;
|
||||
case TargetLoweringBase::AtomicExpansionKind::XChg:
|
||||
case TargetLoweringBase::AtomicExpansionKind::Expand:
|
||||
expandAtomicStoreToXChg(SI);
|
||||
return true;
|
||||
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
|
||||
@ -747,7 +747,7 @@ bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
|
||||
}
|
||||
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
|
||||
return lowerAtomicRMWInst(AI);
|
||||
case TargetLoweringBase::AtomicExpansionKind::Expand:
|
||||
case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
|
||||
TLI->emitExpandAtomicRMW(AI);
|
||||
return true;
|
||||
default:
|
||||
@ -1701,7 +1701,7 @@ bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
|
||||
return true;
|
||||
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
|
||||
return lowerAtomicCmpXchgInst(CI);
|
||||
case TargetLoweringBase::AtomicExpansionKind::Expand: {
|
||||
case TargetLoweringBase::AtomicExpansionKind::CustomExpand: {
|
||||
TLI->emitExpandAtomicCmpXchg(CI);
|
||||
return true;
|
||||
}
|
||||
|
@ -28410,10 +28410,10 @@ AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
if (isOpSuitableForRCPC3(SI))
|
||||
return AtomicExpansionKind::None;
|
||||
if (isOpSuitableForLSE128(SI))
|
||||
return AtomicExpansionKind::XChg;
|
||||
return AtomicExpansionKind::Expand;
|
||||
if (isOpSuitableForLDPSTP(SI))
|
||||
return AtomicExpansionKind::None;
|
||||
return AtomicExpansionKind::XChg;
|
||||
return AtomicExpansionKind::Expand;
|
||||
}
|
||||
|
||||
// Loads and stores less than 128-bits are already atomic; ones above that
|
||||
|
@ -17823,7 +17823,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
|
||||
if (AS == AMDGPUAS::FLAT_ADDRESS &&
|
||||
DL.getTypeSizeInBits(RMW->getType()) == 64 &&
|
||||
flatInstrMayAccessPrivate(RMW))
|
||||
return AtomicExpansionKind::Expand;
|
||||
return AtomicExpansionKind::CustomExpand;
|
||||
|
||||
auto ReportUnsafeHWInst = [=](TargetLowering::AtomicExpansionKind Kind) {
|
||||
OptimizationRemarkEmitter ORE(RMW->getFunction());
|
||||
@ -17898,7 +17898,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
|
||||
// does. InstCombine transforms these with 0 to or, so undo that.
|
||||
if (Constant *ConstVal = dyn_cast<Constant>(RMW->getValOperand());
|
||||
ConstVal && ConstVal->isNullValue())
|
||||
return AtomicExpansionKind::Expand;
|
||||
return AtomicExpansionKind::CustomExpand;
|
||||
}
|
||||
|
||||
// If the allocation could be in remote, fine-grained memory, the rmw
|
||||
@ -18027,9 +18027,9 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
|
||||
// fadd.
|
||||
if (Subtarget->hasLDSFPAtomicAddF32()) {
|
||||
if (RMW->use_empty() && Subtarget->hasAtomicFaddNoRtnInsts())
|
||||
return AtomicExpansionKind::Expand;
|
||||
return AtomicExpansionKind::CustomExpand;
|
||||
if (!RMW->use_empty() && Subtarget->hasAtomicFaddRtnInsts())
|
||||
return AtomicExpansionKind::Expand;
|
||||
return AtomicExpansionKind::CustomExpand;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -18109,7 +18109,7 @@ SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const {
|
||||
|
||||
// If a 64-bit flat atomic may alias private, we need to avoid using the
|
||||
// atomic in the private case.
|
||||
return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::Expand
|
||||
return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::CustomExpand
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
|
@ -21236,7 +21236,7 @@ ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
has64BitAtomicStore = Subtarget->hasV6Ops();
|
||||
|
||||
unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
|
||||
return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::XChg
|
||||
return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
|
@ -3938,7 +3938,7 @@ TargetLowering::AtomicExpansionKind
|
||||
HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
// Do not expand loads and stores that don't exceed 64 bits.
|
||||
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
|
||||
? AtomicExpansionKind::XChg
|
||||
? AtomicExpansionKind::Expand
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
|
@ -7893,7 +7893,7 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
|
||||
if (Size < 32 && (AI->getOperation() == AtomicRMWInst::And ||
|
||||
AI->getOperation() == AtomicRMWInst::Or ||
|
||||
AI->getOperation() == AtomicRMWInst::Xor))
|
||||
return AtomicExpansionKind::Expand;
|
||||
return AtomicExpansionKind::CustomExpand;
|
||||
if (AI->getOperation() == AtomicRMWInst::Nand || Size < 32)
|
||||
return AtomicExpansionKind::CmpXChg;
|
||||
}
|
||||
|
@ -31723,7 +31723,7 @@ X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
return AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
return needsCmpXchgNb(MemType) ? AtomicExpansionKind::XChg
|
||||
return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user