AtomicExpand: Refactor atomic instruction handling (#102914)

Move the processing of an instruction into a helper function. Also
avoid redundant checking for all types of atomic instructions.
Including the assert, it was effectively performing the same check
3 times.
This commit is contained in:
Matt Arsenault 2024-08-13 19:51:53 +04:00 committed by GitHub
parent 55323ca6c8
commit 2d7a2c1212
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -119,6 +119,8 @@ private:
llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
CreateCmpXchgInstFun CreateCmpXchg);
bool processAtomicInstr(Instruction *I);
public:
bool run(Function &F, const TargetMachine *TM);
};
@ -203,71 +205,60 @@ static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
}
bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
const auto *Subtarget = TM->getSubtargetImpl(F);
if (!Subtarget->enableAtomicExpand())
return false;
TLI = Subtarget->getTargetLowering();
DL = &F.getDataLayout();
SmallVector<Instruction *, 1> AtomicInsts;
// Changing control-flow while iterating through it is a bad idea, so gather a
// list of all atomic instructions before we start.
for (Instruction &I : instructions(F))
if (I.isAtomic() && !isa<FenceInst>(&I))
AtomicInsts.push_back(&I);
bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
auto *LI = dyn_cast<LoadInst>(I);
auto *SI = dyn_cast<StoreInst>(I);
auto *RMWI = dyn_cast<AtomicRMWInst>(I);
auto *CASI = dyn_cast<AtomicCmpXchgInst>(I);
bool MadeChange = false;
for (auto *I : AtomicInsts) {
auto LI = dyn_cast<LoadInst>(I);
auto SI = dyn_cast<StoreInst>(I);
auto RMWI = dyn_cast<AtomicRMWInst>(I);
auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
// If the Size/Alignment is not supported, replace with a libcall.
if (LI) {
if (!LI->isAtomic())
return false;
if (!atomicSizeSupported(TLI, LI)) {
expandAtomicLoadToLibcall(LI);
return true;
}
if (TLI->shouldCastAtomicLoadInIR(LI) ==
TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
I = LI = convertAtomicLoadToIntegerType(LI);
MadeChange = true;
continue;
}
} else if (SI) {
if (!SI->isAtomic())
return false;
if (!atomicSizeSupported(TLI, SI)) {
expandAtomicStoreToLibcall(SI);
return true;
}
if (TLI->shouldCastAtomicStoreInIR(SI) ==
TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
I = SI = convertAtomicStoreToIntegerType(SI);
MadeChange = true;
continue;
}
} else if (RMWI) {
if (!atomicSizeSupported(TLI, RMWI)) {
expandAtomicRMWToLibcall(RMWI);
return true;
}
if (TLI->shouldCastAtomicRMWIInIR(RMWI) ==
TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
I = RMWI = convertAtomicXchgToIntegerType(RMWI);
MadeChange = true;
continue;
}
} else if (CASI) {
if (!atomicSizeSupported(TLI, CASI)) {
expandAtomicCASToLibcall(CASI);
MadeChange = true;
continue;
}
return true;
}
if (LI && TLI->shouldCastAtomicLoadInIR(LI) ==
TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
I = LI = convertAtomicLoadToIntegerType(LI);
MadeChange = true;
} else if (SI &&
TLI->shouldCastAtomicStoreInIR(SI) ==
TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
I = SI = convertAtomicStoreToIntegerType(SI);
MadeChange = true;
} else if (RMWI &&
TLI->shouldCastAtomicRMWIInIR(RMWI) ==
TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
I = RMWI = convertAtomicXchgToIntegerType(RMWI);
MadeChange = true;
} else if (CASI) {
// TODO: when we're ready to make the change at the IR level, we can
// extend convertCmpXchgToInteger for floating point too.
if (CASI->getCompareOperand()->getType()->isPointerTy()) {
@ -276,7 +267,8 @@ bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
I = CASI = convertCmpXchgToIntegerType(CASI);
MadeChange = true;
}
}
} else
return false;
if (TLI->shouldInsertFencesForAtomic(I)) {
auto FenceOrdering = AtomicOrdering::Monotonic;
@ -340,12 +332,38 @@ bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
MadeChange = true;
} else {
MadeChange |= tryExpandAtomicRMW(RMWI);
}
} else if (CASI)
MadeChange |= tryExpandAtomicCmpXchg(CASI);
return MadeChange;
}
bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
const auto *Subtarget = TM->getSubtargetImpl(F);
if (!Subtarget->enableAtomicExpand())
return false;
TLI = Subtarget->getTargetLowering();
DL = &F.getDataLayout();
bool MadeChange = false;
SmallVector<Instruction *, 1> AtomicInsts;
// Changing control-flow while iterating through it is a bad idea, so gather a
// list of all atomic instructions before we start.
for (Instruction &I : instructions(F))
if (I.isAtomic() && !isa<FenceInst>(&I))
AtomicInsts.push_back(&I);
for (auto *I : AtomicInsts) {
if (processAtomicInstr(I))
MadeChange = true;
}
return MadeChange;
}