diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 8c5f03ce526b..c8f31221c35c 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -262,14 +262,13 @@ public: LLOnly, // Expand the (load) instruction into just a load-linked, which has // greater atomic guarantees than a normal load. CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. - MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. - XChg, // Expand a store too large to be atomic into a xchg, then re-process - // it. + MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. BitTestIntrinsic, // Use a target-specific intrinsic for special bit // operations; used by X86. CmpArithIntrinsic, // Use a target-specific intrinsic for special compare // operations; used by X86. Expand, // Generic expansion in terms of other atomic operations. + CustomExpand, // Custom target-specific expansion using TLI hooks. // Rewrite to a non-atomic form for use in a known non-preemptible // environment. @@ -2391,8 +2390,8 @@ public: } /// Returns how the given (atomic) store should be expanded by the IR-level - /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try - /// to use an atomicrmw xchg. + /// AtomicExpand pass into. For instance AtomicExpansionKind::CustomExpand + /// will try to use an atomicrmw xchg. virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { return AtomicExpansionKind::None; } diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index b617b53385b5..601185d0d3cb 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -537,7 +537,7 @@ bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) { case TargetLoweringBase::AtomicExpansionKind::NotAtomic: LI->setAtomic(AtomicOrdering::NotAtomic); return true; - case TargetLoweringBase::AtomicExpansionKind::Expand: + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: TLI->emitExpandAtomicLoad(LI); return true; default: @@ -549,10 +549,10 @@ bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) { switch (TLI->shouldExpandAtomicStoreInIR(SI)) { case TargetLoweringBase::AtomicExpansionKind::None: return false; - case TargetLoweringBase::AtomicExpansionKind::Expand: + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: TLI->emitExpandAtomicStore(SI); return true; - case TargetLoweringBase::AtomicExpansionKind::XChg: + case TargetLoweringBase::AtomicExpansionKind::Expand: expandAtomicStoreToXChg(SI); return true; case TargetLoweringBase::AtomicExpansionKind::NotAtomic: @@ -747,7 +747,7 @@ bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) { } case TargetLoweringBase::AtomicExpansionKind::NotAtomic: return lowerAtomicRMWInst(AI); - case TargetLoweringBase::AtomicExpansionKind::Expand: + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: TLI->emitExpandAtomicRMW(AI); return true; default: @@ -1701,7 +1701,7 @@ bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) { return true; case TargetLoweringBase::AtomicExpansionKind::NotAtomic: return lowerAtomicCmpXchgInst(CI); - case TargetLoweringBase::AtomicExpansionKind::Expand: { + case TargetLoweringBase::AtomicExpansionKind::CustomExpand: { TLI->emitExpandAtomicCmpXchg(CI); return true; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 7075cd64e23a..f6b214078f58 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -28410,10 +28410,10 @@ AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { if (isOpSuitableForRCPC3(SI)) return AtomicExpansionKind::None; if (isOpSuitableForLSE128(SI)) - return AtomicExpansionKind::XChg; + return AtomicExpansionKind::Expand; if (isOpSuitableForLDPSTP(SI)) return AtomicExpansionKind::None; - return AtomicExpansionKind::XChg; + return AtomicExpansionKind::Expand; } // Loads and stores less than 128-bits are already atomic; ones above that diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 66c1dfc71c2f..080e30d42866 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -17823,7 +17823,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { if (AS == AMDGPUAS::FLAT_ADDRESS && DL.getTypeSizeInBits(RMW->getType()) == 64 && flatInstrMayAccessPrivate(RMW)) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; auto ReportUnsafeHWInst = [=](TargetLowering::AtomicExpansionKind Kind) { OptimizationRemarkEmitter ORE(RMW->getFunction()); @@ -17898,7 +17898,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { // does. InstCombine transforms these with 0 to or, so undo that. if (Constant *ConstVal = dyn_cast(RMW->getValOperand()); ConstVal && ConstVal->isNullValue()) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; } // If the allocation could be in remote, fine-grained memory, the rmw @@ -18027,9 +18027,9 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { // fadd. if (Subtarget->hasLDSFPAtomicAddF32()) { if (RMW->use_empty() && Subtarget->hasAtomicFaddNoRtnInsts()) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; if (!RMW->use_empty() && Subtarget->hasAtomicFaddRtnInsts()) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; } } } @@ -18109,7 +18109,7 @@ SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const { // If a 64-bit flat atomic may alias private, we need to avoid using the // atomic in the private case. - return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::Expand + return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::CustomExpand : AtomicExpansionKind::None; } diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index dd5dba402173..830156359e9e 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -21236,7 +21236,7 @@ ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { has64BitAtomicStore = Subtarget->hasV6Ops(); unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); - return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::XChg + return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand : AtomicExpansionKind::None; } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index e44626868454..c54b67ccd884 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3938,7 +3938,7 @@ TargetLowering::AtomicExpansionKind HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { // Do not expand loads and stores that don't exceed 64 bits. return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64 - ? AtomicExpansionKind::XChg + ? AtomicExpansionKind::Expand : AtomicExpansionKind::None; } diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 5b2d185594f4..e3929492f8c4 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -7893,7 +7893,7 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { if (Size < 32 && (AI->getOperation() == AtomicRMWInst::And || AI->getOperation() == AtomicRMWInst::Or || AI->getOperation() == AtomicRMWInst::Xor)) - return AtomicExpansionKind::Expand; + return AtomicExpansionKind::CustomExpand; if (AI->getOperation() == AtomicRMWInst::Nand || Size < 32) return AtomicExpansionKind::CmpXChg; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 653b032039aa..19131fbd4102 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -31723,7 +31723,7 @@ X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { return AtomicExpansionKind::None; } - return needsCmpXchgNb(MemType) ? AtomicExpansionKind::XChg + return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand : AtomicExpansionKind::None; }