AMDGPU: Expand flat atomics that may access private memory (#109407)
If the runtime flat address resolves to a scratch address, 64-bit atomics do not work correctly. Insert a runtime address space check (which is quite likely to be uniform) and select between the non-atomic and real atomic cases. Consider noalias.addrspace metadata and avoid this expansion when possible (we also need to consider it to avoid infinitely expanding after adding the predication code).
This commit is contained in:
parent
d6a0602cbb
commit
1d0370872f
@ -39,6 +39,7 @@
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
#include "llvm/IR/IntrinsicsAMDGPU.h"
|
||||
#include "llvm/IR/IntrinsicsR600.h"
|
||||
#include "llvm/IR/MDBuilder.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/KnownBits.h"
|
||||
#include "llvm/Support/ModRef.h"
|
||||
@ -16310,12 +16311,45 @@ atomicSupportedIfLegalIntType(const AtomicRMWInst *RMW) {
|
||||
: TargetLowering::AtomicExpansionKind::CmpXChg;
|
||||
}
|
||||
|
||||
/// Return if a flat address space atomicrmw can access private memory.
|
||||
static bool flatInstrMayAccessPrivate(const Instruction *I) {
|
||||
const MDNode *NoaliasAddrSpaceMD =
|
||||
I->getMetadata(LLVMContext::MD_noalias_addrspace);
|
||||
if (!NoaliasAddrSpaceMD)
|
||||
return true;
|
||||
|
||||
for (unsigned I = 0, E = NoaliasAddrSpaceMD->getNumOperands() / 2; I != E;
|
||||
++I) {
|
||||
auto *Low = mdconst::extract<ConstantInt>(
|
||||
NoaliasAddrSpaceMD->getOperand(2 * I + 0));
|
||||
auto *High = mdconst::extract<ConstantInt>(
|
||||
NoaliasAddrSpaceMD->getOperand(2 * I + 1));
|
||||
|
||||
if (Low->getValue().uge(AMDGPUAS::PRIVATE_ADDRESS) &&
|
||||
High->getValue().ult(AMDGPUAS::PRIVATE_ADDRESS))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
TargetLowering::AtomicExpansionKind
|
||||
SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
|
||||
unsigned AS = RMW->getPointerAddressSpace();
|
||||
if (AS == AMDGPUAS::PRIVATE_ADDRESS)
|
||||
return AtomicExpansionKind::NotAtomic;
|
||||
|
||||
// 64-bit flat atomics that dynamically reside in private memory will silently
|
||||
// be dropped.
|
||||
//
|
||||
// Note that we will emit a new copy of the original atomic in the expansion,
|
||||
// which will be incrementally relegalized.
|
||||
const DataLayout &DL = RMW->getFunction()->getDataLayout();
|
||||
if (AS == AMDGPUAS::FLAT_ADDRESS &&
|
||||
DL.getTypeSizeInBits(RMW->getType()) == 64 &&
|
||||
flatInstrMayAccessPrivate(RMW))
|
||||
return AtomicExpansionKind::Expand;
|
||||
|
||||
auto ReportUnsafeHWInst = [=](TargetLowering::AtomicExpansionKind Kind) {
|
||||
OptimizationRemarkEmitter ORE(RMW->getFunction());
|
||||
ORE.emit([=]() {
|
||||
@ -16716,20 +16750,34 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
|
||||
|
||||
if (Op == AtomicRMWInst::Sub || Op == AtomicRMWInst::Or ||
|
||||
Op == AtomicRMWInst::Xor) {
|
||||
// atomicrmw or %ptr, 0 -> atomicrmw add %ptr, 0
|
||||
assert(cast<Constant>(AI->getValOperand())->isNullValue() &&
|
||||
"this cannot be replaced with add");
|
||||
AI->setOperation(AtomicRMWInst::Add);
|
||||
return;
|
||||
if (auto *ConstVal = dyn_cast<Constant>(AI->getValOperand());
|
||||
ConstVal && ConstVal->isNullValue()) {
|
||||
// atomicrmw or %ptr, 0 -> atomicrmw add %ptr, 0
|
||||
AI->setOperation(AtomicRMWInst::Add);
|
||||
|
||||
// TODO: Turn the below private handling into a no-op for idempotent
|
||||
// cases.
|
||||
}
|
||||
}
|
||||
|
||||
assert(Subtarget->hasAtomicFaddInsts() &&
|
||||
"target should have atomic fadd instructions");
|
||||
assert(AI->getType()->isFloatTy() &&
|
||||
AI->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS &&
|
||||
"generic atomicrmw expansion only supports FP32 operand in flat "
|
||||
"address space");
|
||||
assert(Op == AtomicRMWInst::FAdd && "only fadd is supported for now");
|
||||
// The non-flat expansions should only perform the de-canonicalization of
|
||||
// identity values.
|
||||
if (AI->getPointerAddressSpace() != AMDGPUAS::FLAT_ADDRESS)
|
||||
return;
|
||||
|
||||
// FullFlatEmulation is true if we need to issue the private, shared, and
|
||||
// global cases.
|
||||
//
|
||||
// If this is false, we are only dealing with the flat-targeting-private case,
|
||||
// where we only insert a check for private and still use the flat instruction
|
||||
// for global and shared.
|
||||
|
||||
// TODO: Avoid the private check for the fadd case depending on
|
||||
// noalias.addrspace.
|
||||
|
||||
bool FullFlatEmulation = Op == AtomicRMWInst::FAdd &&
|
||||
Subtarget->hasAtomicFaddInsts() &&
|
||||
AI->getType()->isFloatTy();
|
||||
|
||||
// Given: atomicrmw fadd ptr %addr, float %val ordering
|
||||
//
|
||||
@ -16769,6 +16817,10 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
|
||||
//
|
||||
// atomicrmw.end:
|
||||
// [...]
|
||||
//
|
||||
//
|
||||
// For 64-bit atomics which may reside in private memory, we perform a simpler
|
||||
// version that only inserts the private check, and uses the flat operation.
|
||||
|
||||
IRBuilder<> Builder(AI);
|
||||
LLVMContext &Ctx = Builder.getContext();
|
||||
@ -16780,9 +16832,15 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
|
||||
Function *F = BB->getParent();
|
||||
BasicBlock *ExitBB =
|
||||
BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
|
||||
BasicBlock *SharedBB = BasicBlock::Create(Ctx, "atomicrmw.shared", F, ExitBB);
|
||||
BasicBlock *CheckPrivateBB =
|
||||
BasicBlock::Create(Ctx, "atomicrmw.check.private", F, ExitBB);
|
||||
BasicBlock *SharedBB = nullptr;
|
||||
|
||||
BasicBlock *CheckPrivateBB = BB;
|
||||
if (FullFlatEmulation) {
|
||||
SharedBB = BasicBlock::Create(Ctx, "atomicrmw.shared", F, ExitBB);
|
||||
CheckPrivateBB =
|
||||
BasicBlock::Create(Ctx, "atomicrmw.check.private", F, ExitBB);
|
||||
}
|
||||
|
||||
BasicBlock *PrivateBB =
|
||||
BasicBlock::Create(Ctx, "atomicrmw.private", F, ExitBB);
|
||||
BasicBlock *GlobalBB = BasicBlock::Create(Ctx, "atomicrmw.global", F, ExitBB);
|
||||
@ -16795,23 +16853,26 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
|
||||
|
||||
std::prev(BB->end())->eraseFromParent();
|
||||
Builder.SetInsertPoint(BB);
|
||||
CallInst *IsShared = Builder.CreateIntrinsic(Intrinsic::amdgcn_is_shared, {},
|
||||
{Addr}, nullptr, "is.shared");
|
||||
Builder.CreateCondBr(IsShared, SharedBB, CheckPrivateBB);
|
||||
|
||||
Builder.SetInsertPoint(SharedBB);
|
||||
Value *CastToLocal = Builder.CreateAddrSpaceCast(
|
||||
Addr, PointerType::get(Ctx, AMDGPUAS::LOCAL_ADDRESS));
|
||||
Value *LoadedShared = nullptr;
|
||||
if (FullFlatEmulation) {
|
||||
CallInst *IsShared = Builder.CreateIntrinsic(
|
||||
Intrinsic::amdgcn_is_shared, {}, {Addr}, nullptr, "is.shared");
|
||||
Builder.CreateCondBr(IsShared, SharedBB, CheckPrivateBB);
|
||||
Builder.SetInsertPoint(SharedBB);
|
||||
Value *CastToLocal = Builder.CreateAddrSpaceCast(
|
||||
Addr, PointerType::get(Ctx, AMDGPUAS::LOCAL_ADDRESS));
|
||||
|
||||
Instruction *Clone = AI->clone();
|
||||
Clone->insertInto(SharedBB, SharedBB->end());
|
||||
Clone->getOperandUse(AtomicRMWInst::getPointerOperandIndex())
|
||||
.set(CastToLocal);
|
||||
Instruction *LoadedShared = Clone;
|
||||
Instruction *Clone = AI->clone();
|
||||
Clone->insertInto(SharedBB, SharedBB->end());
|
||||
Clone->getOperandUse(AtomicRMWInst::getPointerOperandIndex())
|
||||
.set(CastToLocal);
|
||||
LoadedShared = Clone;
|
||||
|
||||
Builder.CreateBr(PhiBB);
|
||||
Builder.CreateBr(PhiBB);
|
||||
Builder.SetInsertPoint(CheckPrivateBB);
|
||||
}
|
||||
|
||||
Builder.SetInsertPoint(CheckPrivateBB);
|
||||
CallInst *IsPrivate = Builder.CreateIntrinsic(
|
||||
Intrinsic::amdgcn_is_private, {}, {Addr}, nullptr, "is.private");
|
||||
Builder.CreateCondBr(IsPrivate, PrivateBB, GlobalBB);
|
||||
@ -16828,15 +16889,32 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
|
||||
Builder.CreateBr(PhiBB);
|
||||
|
||||
Builder.SetInsertPoint(GlobalBB);
|
||||
Value *CastToGlobal = Builder.CreateAddrSpaceCast(
|
||||
Addr, PointerType::get(Ctx, AMDGPUAS::GLOBAL_ADDRESS));
|
||||
Value *LoadedGlobal = AI;
|
||||
|
||||
AI->getOperandUse(AtomicRMWInst::getPointerOperandIndex()).set(CastToGlobal);
|
||||
// Continue using a flat instruction if we only emitted the check for private.
|
||||
Instruction *LoadedGlobal = AI;
|
||||
if (FullFlatEmulation) {
|
||||
Value *CastToGlobal = Builder.CreateAddrSpaceCast(
|
||||
Addr, PointerType::get(Ctx, AMDGPUAS::GLOBAL_ADDRESS));
|
||||
AI->getOperandUse(AtomicRMWInst::getPointerOperandIndex())
|
||||
.set(CastToGlobal);
|
||||
}
|
||||
|
||||
AI->removeFromParent();
|
||||
AI->insertInto(GlobalBB, GlobalBB->end());
|
||||
|
||||
// The new atomicrmw may go through another round of legalization later.
|
||||
if (!FullFlatEmulation) {
|
||||
// We inserted the runtime check already, make sure we do not try to
|
||||
// re-expand this.
|
||||
// TODO: Should union with any existing metadata.
|
||||
MDBuilder MDB(F->getContext());
|
||||
MDNode *RangeNotPrivate =
|
||||
MDB.createRange(APInt(32, AMDGPUAS::PRIVATE_ADDRESS),
|
||||
APInt(32, AMDGPUAS::PRIVATE_ADDRESS + 1));
|
||||
LoadedGlobal->setMetadata(LLVMContext::MD_noalias_addrspace,
|
||||
RangeNotPrivate);
|
||||
}
|
||||
|
||||
Builder.CreateBr(PhiBB);
|
||||
|
||||
Builder.SetInsertPoint(PhiBB);
|
||||
@ -16844,7 +16922,8 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
|
||||
if (ReturnValueIsUsed) {
|
||||
PHINode *Loaded = Builder.CreatePHI(ValTy, 3);
|
||||
AI->replaceAllUsesWith(Loaded);
|
||||
Loaded->addIncoming(LoadedShared, SharedBB);
|
||||
if (FullFlatEmulation)
|
||||
Loaded->addIncoming(LoadedShared, SharedBB);
|
||||
Loaded->addIncoming(LoadedPrivate, PrivateBB);
|
||||
Loaded->addIncoming(LoadedGlobal, GlobalBB);
|
||||
Loaded->takeName(AI);
|
||||
|
@ -1332,7 +1332,7 @@ define double @flat_agent_atomic_fmax_ret_f64__amdgpu_no_fine_grained_memory(ptr
|
||||
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
||||
; GFX7-NEXT: buffer_wbinvl1
|
||||
; GFX7-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%result = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %result
|
||||
}
|
||||
|
||||
@ -1482,7 +1482,7 @@ define void @flat_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_memory(ptr
|
||||
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
||||
; GFX7-NEXT: buffer_wbinvl1
|
||||
; GFX7-NEXT: s_setpc_b64 s[30:31]
|
||||
%unused = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%unused = atomicrmw fmax ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -2215,3 +2215,4 @@ define void @buffer_fat_ptr_agent_atomic_fmax_noret_f64__amdgpu_no_fine_grained_
|
||||
}
|
||||
|
||||
!0 = !{}
|
||||
!1 = !{i32 5, i32 6}
|
||||
|
@ -1332,7 +1332,7 @@ define double @flat_agent_atomic_fmin_ret_f64__amdgpu_no_fine_grained_memory(ptr
|
||||
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
||||
; GFX7-NEXT: buffer_wbinvl1
|
||||
; GFX7-NEXT: s_setpc_b64 s[30:31]
|
||||
%result = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%result = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %result
|
||||
}
|
||||
|
||||
@ -1482,7 +1482,7 @@ define void @flat_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_memory(ptr
|
||||
; GFX7-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
||||
; GFX7-NEXT: buffer_wbinvl1
|
||||
; GFX7-NEXT: s_setpc_b64 s[30:31]
|
||||
%unused = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%unused = atomicrmw fmin ptr %ptr, double %val syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -2215,3 +2215,4 @@ define void @buffer_fat_ptr_agent_atomic_fmin_noret_f64__amdgpu_no_fine_grained_
|
||||
}
|
||||
|
||||
!0 = !{}
|
||||
!1 = !{i32 5, i32 6}
|
||||
|
@ -1645,7 +1645,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64(ptr %out, ptr %ptr) #1 {
|
||||
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
|
||||
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
store i64 %result, ptr %out, align 4
|
||||
ret void
|
||||
}
|
||||
@ -1747,7 +1747,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset(ptr %out, ptr %ptr) #1
|
||||
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%gep = getelementptr i64, ptr %ptr, i32 4
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
store i64 %result, ptr %out, align 4
|
||||
ret void
|
||||
}
|
||||
@ -1820,7 +1820,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64(ptr %ptr) #1 {
|
||||
; GFX11-NEXT: buffer_gl1_inv
|
||||
; GFX11-NEXT: buffer_gl0_inv
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1899,7 +1899,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset(ptr %ptr) #1 {
|
||||
; GFX11-NEXT: buffer_gl0_inv
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%gep = getelementptr i64, ptr %ptr, i32 4
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1978,7 +1978,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_system(ptr %ptr) #1
|
||||
; GFX11-NEXT: buffer_gl0_inv
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%gep = getelementptr i64, ptr %ptr, i32 4
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -2106,7 +2106,7 @@ define amdgpu_kernel void @flat_atomic_dec_ret_i64_offset_addr64(ptr %out, ptr %
|
||||
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
|
||||
%out.gep = getelementptr i64, ptr %out, i32 %id
|
||||
%gep = getelementptr i64, ptr %gep.tid, i32 5
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
store i64 %result, ptr %out.gep, align 4
|
||||
ret void
|
||||
}
|
||||
@ -2205,7 +2205,7 @@ define amdgpu_kernel void @flat_atomic_dec_noret_i64_offset_addr64(ptr %ptr) #1
|
||||
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
|
||||
%gep = getelementptr i64, ptr %gep.tid, i32 5
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -3312,7 +3312,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(ptr addrspace(1) %out,
|
||||
%tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #2
|
||||
%idx.0 = add nsw i32 %tid.x, 2
|
||||
%arrayidx0 = getelementptr inbounds [512 x i64], ptr addrspace(3) @lds1, i32 0, i32 %idx.0
|
||||
%result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i64 9 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i64 9 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
store i32 %idx.0, ptr addrspace(1) %add_use, align 4
|
||||
store i64 %result, ptr addrspace(1) %out, align 4
|
||||
ret void
|
||||
@ -3321,5 +3321,8 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(ptr addrspace(1) %out,
|
||||
attributes #0 = { nounwind speculatable willreturn memory(none) }
|
||||
attributes #1 = { nounwind }
|
||||
attributes #2 = { nounwind memory(none) }
|
||||
|
||||
!0 = !{i32 5, i32 6}
|
||||
|
||||
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
||||
; GCN: {{.*}}
|
||||
|
@ -2754,7 +2754,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64(ptr %out, ptr %ptr) #1 {
|
||||
; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
|
||||
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
store i64 %result, ptr %out, align 4
|
||||
ret void
|
||||
}
|
||||
@ -2856,7 +2856,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset(ptr %out, ptr %ptr) #1
|
||||
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%gep = getelementptr i64, ptr %ptr, i32 4
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
store i64 %result, ptr %out, align 4
|
||||
ret void
|
||||
}
|
||||
@ -2958,7 +2958,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_system(ptr %out, ptr %
|
||||
; GFX11-NEXT: flat_store_b64 v[2:3], v[0:1]
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%gep = getelementptr i64, ptr %ptr, i32 4
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0
|
||||
store i64 %result, ptr %out, align 4
|
||||
ret void
|
||||
}
|
||||
@ -3031,7 +3031,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64(ptr %ptr) #1 {
|
||||
; GFX11-NEXT: buffer_gl1_inv
|
||||
; GFX11-NEXT: buffer_gl0_inv
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %ptr, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -3110,7 +3110,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(ptr %ptr) #1 {
|
||||
; GFX11-NEXT: buffer_gl0_inv
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%gep = getelementptr i64, ptr %ptr, i32 4
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -3189,7 +3189,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_system(ptr %ptr) #1
|
||||
; GFX11-NEXT: buffer_gl0_inv
|
||||
; GFX11-NEXT: s_endpgm
|
||||
%gep = getelementptr i64, ptr %ptr, i32 4
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -3317,7 +3317,7 @@ define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(ptr %out, ptr %
|
||||
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
|
||||
%out.gep = getelementptr i64, ptr %out, i32 %id
|
||||
%gep = getelementptr i64, ptr %gep.tid, i32 5
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
store i64 %result, ptr %out.gep, align 4
|
||||
ret void
|
||||
}
|
||||
@ -3416,7 +3416,7 @@ define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(ptr %ptr) #1
|
||||
%id = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%gep.tid = getelementptr i64, ptr %ptr, i32 %id
|
||||
%gep = getelementptr i64, ptr %gep.tid, i32 5
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8
|
||||
%result = atomicrmw uinc_wrap ptr %gep, i64 42 syncscope("agent") seq_cst, align 8, !noalias.addrspace !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -3524,5 +3524,8 @@ define amdgpu_kernel void @nocse_lds_atomic_inc_ret_i32(ptr addrspace(1) %out0,
|
||||
attributes #0 = { nounwind speculatable willreturn memory(none) }
|
||||
attributes #1 = { nounwind }
|
||||
attributes #2 = { nounwind memory(none) }
|
||||
|
||||
!0 = !{i32 5, i32 6}
|
||||
|
||||
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
||||
; GCN: {{.*}}
|
||||
|
@ -15,7 +15,7 @@ define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw(ptr %ptr, double %d
|
||||
; GFX90A_GFX940-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
|
||||
; GFX90A_GFX940-NEXT: FLAT_ATOMIC_ADD_F64 [[REG_SEQUENCE]], [[REG_SEQUENCE1]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr)
|
||||
; GFX90A_GFX940-NEXT: S_ENDPGM 0
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -38,8 +38,9 @@ define amdgpu_ps double @flat_atomic_fadd_f64_rtn_atomicrmw(ptr %ptr, double %da
|
||||
; GFX90A_GFX940-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
|
||||
; GFX90A_GFX940-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
|
||||
; GFX90A_GFX940-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
!0 = !{}
|
||||
!1 = !{i32 5, i32 6}
|
||||
|
@ -1371,7 +1371,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc0 sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1400,7 +1400,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1431,7 +1431,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_system(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc0 sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1458,7 +1458,7 @@ define double @flat_atomic_fadd_f64_rtn_pat(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc0 sc1
|
||||
; GFX940-NEXT: s_setpc_b64 s[30:31]
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
@ -1483,7 +1483,7 @@ define double @flat_atomic_fadd_f64_rtn_pat_agent(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc1
|
||||
; GFX940-NEXT: s_setpc_b64 s[30:31]
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
@ -1512,7 +1512,7 @@ define double @flat_atomic_fadd_f64_rtn_pat_system(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX940-NEXT: s_setpc_b64 s[30:31]
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
@ -1541,7 +1541,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent_safe(ptr %ptr) {
|
||||
; GFX940-NEXT: buffer_inv sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1713,3 +1713,4 @@ attributes #1 = { nounwind }
|
||||
attributes #2 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" }
|
||||
|
||||
!0 = !{}
|
||||
!1 = !{i32 5, i32 6}
|
||||
|
@ -1,8 +1,6 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
|
||||
; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
|
||||
; RUN: llc -mtriple=amdgcn -mcpu=gfx90a -enable-new-pm -stop-after=finalize-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
|
||||
; RUN: llc -mtriple=amdgcn -mcpu=gfx940 -enable-new-pm -stop-after=finalize-isel < %s | FileCheck -check-prefix=GFX90A_GFX940 %s
|
||||
|
||||
define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_intrinsic(ptr %ptr, double %data) {
|
||||
; GFX90A_GFX940-LABEL: name: flat_atomic_fadd_f64_no_rtn_intrinsic
|
||||
@ -73,7 +71,7 @@ define amdgpu_ps void @flat_atomic_fadd_f64_no_rtn_atomicrmw(ptr %ptr, double %d
|
||||
; GFX90A_GFX940-NEXT: [[COPY5:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
|
||||
; GFX90A_GFX940-NEXT: FLAT_ATOMIC_ADD_F64 killed [[COPY4]], killed [[COPY5]], 0, 0, implicit $exec, implicit $flat_scr :: (load store syncscope("wavefront") monotonic (s64) on %ir.ptr)
|
||||
; GFX90A_GFX940-NEXT: S_ENDPGM 0
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -123,7 +121,7 @@ define amdgpu_ps double @flat_atomic_fadd_f64_rtn_atomicrmw(ptr %ptr, double %da
|
||||
; GFX90A_GFX940-NEXT: $sgpr0 = COPY [[COPY6]]
|
||||
; GFX90A_GFX940-NEXT: $sgpr1 = COPY [[COPY7]]
|
||||
; GFX90A_GFX940-NEXT: SI_RETURN_TO_EPILOG $sgpr0, $sgpr1
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double %data syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1278,7 +1278,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc0 sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1307,7 +1307,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1338,7 +1338,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_system(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc0 sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1365,7 +1365,7 @@ define double @flat_atomic_fadd_f64_rtn_pat(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc0 sc1
|
||||
; GFX940-NEXT: s_setpc_b64 s[30:31]
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
@ -1390,7 +1390,7 @@ define double @flat_atomic_fadd_f64_rtn_pat_agent(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: buffer_inv sc1
|
||||
; GFX940-NEXT: s_setpc_b64 s[30:31]
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
@ -1419,7 +1419,7 @@ define double @flat_atomic_fadd_f64_rtn_pat_system(ptr %ptr) #1 {
|
||||
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX940-NEXT: s_setpc_b64 s[30:31]
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !amdgpu.no.fine.grained.memory !0
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("one-as") seq_cst, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %ret
|
||||
}
|
||||
|
||||
@ -1459,7 +1459,7 @@ define amdgpu_kernel void @flat_atomic_fadd_f64_noret_pat_agent_safe(ptr %ptr) {
|
||||
; GFX940-NEXT: buffer_inv sc1
|
||||
; GFX940-NEXT: s_endpgm
|
||||
main_body:
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst
|
||||
%ret = atomicrmw fadd ptr %ptr, double 4.0 syncscope("agent") seq_cst, !noalias.addrspace !1
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -1666,3 +1666,4 @@ attributes #3 = { "denormal-fp-math"="ieee,ieee" }
|
||||
attributes #4 = { "denormal-fp-math"="preserve-sign,preserve-sign" }
|
||||
|
||||
!0 = !{}
|
||||
!1 = !{i32 5, i32 6}
|
||||
|
@ -599,7 +599,7 @@ define float @test_atomicrmw_fadd_f32_flat_unsafe(ptr %ptr, float %value) #3 {
|
||||
; GFX90A-NEXT: br i1 [[IS_SHARED]], label [[ATOMICRMW_SHARED:%.*]], label [[ATOMICRMW_CHECK_PRIVATE:%.*]]
|
||||
; GFX90A: atomicrmw.shared:
|
||||
; GFX90A-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(3)
|
||||
; GFX90A-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr addrspace(3) [[TMP1]], float [[VALUE:%.*]] syncscope("wavefront") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX90A-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr addrspace(3) [[TMP1]], float [[VALUE:%.*]] syncscope("wavefront") monotonic, align 4, !noalias.addrspace [[META1:![0-9]+]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX90A-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GFX90A: atomicrmw.check.private:
|
||||
; GFX90A-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR]])
|
||||
@ -612,7 +612,7 @@ define float @test_atomicrmw_fadd_f32_flat_unsafe(ptr %ptr, float %value) #3 {
|
||||
; GFX90A-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GFX90A: atomicrmw.global:
|
||||
; GFX90A-NEXT: [[TMP4:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(1)
|
||||
; GFX90A-NEXT: [[TMP5:%.*]] = atomicrmw fadd ptr addrspace(1) [[TMP4]], float [[VALUE]] syncscope("wavefront") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX90A-NEXT: [[TMP5:%.*]] = atomicrmw fadd ptr addrspace(1) [[TMP4]], float [[VALUE]] syncscope("wavefront") monotonic, align 4, !noalias.addrspace [[META1]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX90A-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GFX90A: atomicrmw.phi:
|
||||
; GFX90A-NEXT: [[RES:%.*]] = phi float [ [[TMP2]], [[ATOMICRMW_SHARED]] ], [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP5]], [[ATOMICRMW_GLOBAL]] ]
|
||||
@ -621,19 +621,19 @@ define float @test_atomicrmw_fadd_f32_flat_unsafe(ptr %ptr, float %value) #3 {
|
||||
; GFX90A-NEXT: ret float [[RES]]
|
||||
;
|
||||
; GFX940-LABEL: @test_atomicrmw_fadd_f32_flat_unsafe(
|
||||
; GFX940-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], float [[VALUE:%.*]] syncscope("wavefront") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX940-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], float [[VALUE:%.*]] syncscope("wavefront") monotonic, align 4, !noalias.addrspace [[META1:![0-9]+]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX940-NEXT: ret float [[RES]]
|
||||
;
|
||||
; GFX11-LABEL: @test_atomicrmw_fadd_f32_flat_unsafe(
|
||||
; GFX11-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], float [[VALUE:%.*]] syncscope("wavefront") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX11-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], float [[VALUE:%.*]] syncscope("wavefront") monotonic, align 4, !noalias.addrspace [[META1:![0-9]+]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX11-NEXT: ret float [[RES]]
|
||||
;
|
||||
%res = atomicrmw fadd ptr %ptr, float %value syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
|
||||
%res = atomicrmw fadd ptr %ptr, float %value syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret float %res
|
||||
}
|
||||
|
||||
define double @test_atomicrmw_fadd_f64_flat_unsafe(ptr %ptr, double %value) #3 {
|
||||
; CI-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
define double @test_atomicrmw_fadd_f64_flat_unsafe__noprivate(ptr %ptr, double %value) #3 {
|
||||
; CI-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe__noprivate(
|
||||
; CI-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CI: atomicrmw.start:
|
||||
@ -649,7 +649,7 @@ define double @test_atomicrmw_fadd_f64_flat_unsafe(ptr %ptr, double %value) #3 {
|
||||
; CI: atomicrmw.end:
|
||||
; CI-NEXT: ret double [[TMP5]]
|
||||
;
|
||||
; GFX9-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX9-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe__noprivate(
|
||||
; GFX9-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GFX9: atomicrmw.start:
|
||||
@ -665,7 +665,7 @@ define double @test_atomicrmw_fadd_f64_flat_unsafe(ptr %ptr, double %value) #3 {
|
||||
; GFX9: atomicrmw.end:
|
||||
; GFX9-NEXT: ret double [[TMP5]]
|
||||
;
|
||||
; GFX908-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX908-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe__noprivate(
|
||||
; GFX908-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GFX908: atomicrmw.start:
|
||||
@ -681,15 +681,15 @@ define double @test_atomicrmw_fadd_f64_flat_unsafe(ptr %ptr, double %value) #3 {
|
||||
; GFX908: atomicrmw.end:
|
||||
; GFX908-NEXT: ret double [[TMP5]]
|
||||
;
|
||||
; GFX90A-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX90A-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], double [[VALUE:%.*]] syncscope("wavefront") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX90A-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe__noprivate(
|
||||
; GFX90A-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], double [[VALUE:%.*]] syncscope("wavefront") monotonic, align 8, !noalias.addrspace [[META1]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX90A-NEXT: ret double [[RES]]
|
||||
;
|
||||
; GFX940-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX940-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], double [[VALUE:%.*]] syncscope("wavefront") monotonic, align 8, !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX940-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe__noprivate(
|
||||
; GFX940-NEXT: [[RES:%.*]] = atomicrmw fadd ptr [[PTR:%.*]], double [[VALUE:%.*]] syncscope("wavefront") monotonic, align 8, !noalias.addrspace [[META1]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX940-NEXT: ret double [[RES]]
|
||||
;
|
||||
; GFX11-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX11-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe__noprivate(
|
||||
; GFX11-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GFX11: atomicrmw.start:
|
||||
@ -704,6 +704,167 @@ define double @test_atomicrmw_fadd_f64_flat_unsafe(ptr %ptr, double %value) #3 {
|
||||
; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
|
||||
; GFX11: atomicrmw.end:
|
||||
; GFX11-NEXT: ret double [[TMP5]]
|
||||
;
|
||||
%res = atomicrmw fadd ptr %ptr, double %value syncscope("wavefront") monotonic, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %res
|
||||
}
|
||||
|
||||
define double @test_atomicrmw_fadd_f64_flat_unsafe(ptr %ptr, double %value) #3 {
|
||||
; CI-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; CI-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; CI-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; CI: atomicrmw.private:
|
||||
; CI-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; CI-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; CI-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; CI-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; CI-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; CI: atomicrmw.global:
|
||||
; CI-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; CI: atomicrmw.start:
|
||||
; CI-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; CI-NEXT: [[NEW2:%.*]] = fadd double [[LOADED]], [[VALUE]]
|
||||
; CI-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; CI-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; CI-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] syncscope("wavefront") monotonic monotonic, align 8
|
||||
; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; CI-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; CI: atomicrmw.end1:
|
||||
; CI-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; CI: atomicrmw.phi:
|
||||
; CI-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP6]], [[ATOMICRMW_END1]] ]
|
||||
; CI-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; CI: atomicrmw.end:
|
||||
; CI-NEXT: ret double [[RES]]
|
||||
;
|
||||
; GFX9-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX9-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GFX9-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GFX9: atomicrmw.private:
|
||||
; GFX9-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GFX9-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX9-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; GFX9-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX9-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GFX9: atomicrmw.global:
|
||||
; GFX9-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GFX9: atomicrmw.start:
|
||||
; GFX9-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; GFX9-NEXT: [[NEW2:%.*]] = fadd double [[LOADED]], [[VALUE]]
|
||||
; GFX9-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; GFX9-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] syncscope("wavefront") monotonic monotonic, align 8
|
||||
; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; GFX9-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; GFX9: atomicrmw.end1:
|
||||
; GFX9-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GFX9: atomicrmw.phi:
|
||||
; GFX9-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP6]], [[ATOMICRMW_END1]] ]
|
||||
; GFX9-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GFX9: atomicrmw.end:
|
||||
; GFX9-NEXT: ret double [[RES]]
|
||||
;
|
||||
; GFX908-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX908-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GFX908-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GFX908: atomicrmw.private:
|
||||
; GFX908-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GFX908-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX908-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; GFX908-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX908-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GFX908: atomicrmw.global:
|
||||
; GFX908-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; GFX908-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GFX908: atomicrmw.start:
|
||||
; GFX908-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; GFX908-NEXT: [[NEW2:%.*]] = fadd double [[LOADED]], [[VALUE]]
|
||||
; GFX908-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; GFX908-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; GFX908-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] syncscope("wavefront") monotonic monotonic, align 8
|
||||
; GFX908-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; GFX908-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; GFX908-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; GFX908-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; GFX908: atomicrmw.end1:
|
||||
; GFX908-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GFX908: atomicrmw.phi:
|
||||
; GFX908-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP6]], [[ATOMICRMW_END1]] ]
|
||||
; GFX908-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GFX908: atomicrmw.end:
|
||||
; GFX908-NEXT: ret double [[RES]]
|
||||
;
|
||||
; GFX90A-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX90A-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GFX90A-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GFX90A: atomicrmw.private:
|
||||
; GFX90A-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GFX90A-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX90A-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; GFX90A-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX90A-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GFX90A: atomicrmw.global:
|
||||
; GFX90A-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[PTR]], double [[VALUE]] syncscope("wavefront") monotonic, align 8, !noalias.addrspace [[META1]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX90A-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GFX90A: atomicrmw.phi:
|
||||
; GFX90A-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ]
|
||||
; GFX90A-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GFX90A: atomicrmw.end:
|
||||
; GFX90A-NEXT: ret double [[RES]]
|
||||
;
|
||||
; GFX940-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX940-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GFX940-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GFX940: atomicrmw.private:
|
||||
; GFX940-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GFX940-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX940-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; GFX940-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX940-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GFX940: atomicrmw.global:
|
||||
; GFX940-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[PTR]], double [[VALUE]] syncscope("wavefront") monotonic, align 8, !noalias.addrspace [[META1]], !amdgpu.no.fine.grained.memory [[META0]]
|
||||
; GFX940-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GFX940: atomicrmw.phi:
|
||||
; GFX940-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ]
|
||||
; GFX940-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GFX940: atomicrmw.end:
|
||||
; GFX940-NEXT: ret double [[RES]]
|
||||
;
|
||||
; GFX11-LABEL: @test_atomicrmw_fadd_f64_flat_unsafe(
|
||||
; GFX11-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GFX11-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GFX11: atomicrmw.private:
|
||||
; GFX11-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GFX11-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX11-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; GFX11-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GFX11-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GFX11: atomicrmw.global:
|
||||
; GFX11-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; GFX11-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GFX11: atomicrmw.start:
|
||||
; GFX11-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; GFX11-NEXT: [[NEW2:%.*]] = fadd double [[LOADED]], [[VALUE]]
|
||||
; GFX11-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; GFX11-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; GFX11-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] syncscope("wavefront") monotonic monotonic, align 8
|
||||
; GFX11-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; GFX11-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; GFX11-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; GFX11-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; GFX11: atomicrmw.end1:
|
||||
; GFX11-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GFX11: atomicrmw.phi:
|
||||
; GFX11-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP6]], [[ATOMICRMW_END1]] ]
|
||||
; GFX11-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GFX11: atomicrmw.end:
|
||||
; GFX11-NEXT: ret double [[RES]]
|
||||
;
|
||||
%res = atomicrmw fadd ptr %ptr, double %value syncscope("wavefront") monotonic, !amdgpu.no.fine.grained.memory !0
|
||||
ret double %res
|
||||
@ -1067,6 +1228,41 @@ define half @test_atomicrmw_fadd_f16_local(ptr addrspace(3) %ptr, half %value) {
|
||||
|
||||
define double @test_atomicrmw_fadd_f64_flat(ptr %ptr, double %value) {
|
||||
; ALL-LABEL: @test_atomicrmw_fadd_f64_flat(
|
||||
; ALL-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; ALL-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; ALL: atomicrmw.private:
|
||||
; ALL-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; ALL-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; ALL-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; ALL-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; ALL-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; ALL: atomicrmw.global:
|
||||
; ALL-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; ALL-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; ALL: atomicrmw.start:
|
||||
; ALL-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; ALL-NEXT: [[NEW2:%.*]] = fadd double [[LOADED]], [[VALUE]]
|
||||
; ALL-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; ALL-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; ALL-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst, align 8
|
||||
; ALL-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; ALL-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; ALL-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; ALL-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; ALL: atomicrmw.end1:
|
||||
; ALL-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; ALL: atomicrmw.phi:
|
||||
; ALL-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP6]], [[ATOMICRMW_END1]] ]
|
||||
; ALL-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; ALL: atomicrmw.end:
|
||||
; ALL-NEXT: ret double [[RES]]
|
||||
;
|
||||
%res = atomicrmw fadd ptr %ptr, double %value seq_cst
|
||||
ret double %res
|
||||
}
|
||||
|
||||
define double @test_atomicrmw_fadd_f64_flat__noprivate(ptr %ptr, double %value) {
|
||||
; ALL-LABEL: @test_atomicrmw_fadd_f64_flat__noprivate(
|
||||
; ALL-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; ALL-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; ALL: atomicrmw.start:
|
||||
@ -1082,7 +1278,7 @@ define double @test_atomicrmw_fadd_f64_flat(ptr %ptr, double %value) {
|
||||
; ALL: atomicrmw.end:
|
||||
; ALL-NEXT: ret double [[TMP5]]
|
||||
;
|
||||
%res = atomicrmw fadd ptr %ptr, double %value seq_cst
|
||||
%res = atomicrmw fadd ptr %ptr, double %value seq_cst, !noalias.addrspace !1
|
||||
ret double %res
|
||||
}
|
||||
|
||||
@ -2619,18 +2815,31 @@ define float @test_atomicrmw_fadd_f32_flat_system_ret__amdgpu_ignore_denormal_mo
|
||||
|
||||
define void @test_atomicrmw_fadd_f64_dyndenorm_flat_system_noret__amdgpu_ignore_denormal_mode(ptr %ptr, double %value) #5 {
|
||||
; ALL-LABEL: @test_atomicrmw_fadd_f64_dyndenorm_flat_system_noret__amdgpu_ignore_denormal_mode(
|
||||
; ALL-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; ALL-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; ALL-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; ALL: atomicrmw.private:
|
||||
; ALL-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; ALL-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; ALL-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; ALL-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; ALL-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; ALL: atomicrmw.global:
|
||||
; ALL-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; ALL-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; ALL: atomicrmw.start:
|
||||
; ALL-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
; ALL-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
|
||||
; ALL-NEXT: [[TMP2:%.*]] = bitcast double [[NEW]] to i64
|
||||
; ALL-NEXT: [[TMP3:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; ALL-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP3]], i64 [[TMP2]] monotonic monotonic, align 8
|
||||
; ALL-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
|
||||
; ALL-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP4]], 0
|
||||
; ALL-NEXT: [[TMP5]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; ALL-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
|
||||
; ALL-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; ALL-NEXT: [[NEW2:%.*]] = fadd double [[LOADED]], [[VALUE]]
|
||||
; ALL-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; ALL-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; ALL-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] monotonic monotonic, align 8
|
||||
; ALL-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; ALL-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; ALL-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; ALL-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; ALL: atomicrmw.end1:
|
||||
; ALL-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; ALL: atomicrmw.phi:
|
||||
; ALL-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; ALL: atomicrmw.end:
|
||||
; ALL-NEXT: ret void
|
||||
;
|
||||
@ -2640,20 +2849,34 @@ define void @test_atomicrmw_fadd_f64_dyndenorm_flat_system_noret__amdgpu_ignore_
|
||||
|
||||
define double @test_atomicrmw_fadd_f64_dyndenorm_flat_system_ret__amdgpu_ignore_denormal_mode(ptr %ptr, double %value) #5 {
|
||||
; ALL-LABEL: @test_atomicrmw_fadd_f64_dyndenorm_flat_system_ret__amdgpu_ignore_denormal_mode(
|
||||
; ALL-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; ALL-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; ALL-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; ALL: atomicrmw.private:
|
||||
; ALL-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; ALL-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; ALL-NEXT: [[NEW:%.*]] = fadd double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; ALL-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; ALL-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; ALL: atomicrmw.global:
|
||||
; ALL-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; ALL-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; ALL: atomicrmw.start:
|
||||
; ALL-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
|
||||
; ALL-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
|
||||
; ALL-NEXT: [[TMP2:%.*]] = bitcast double [[NEW]] to i64
|
||||
; ALL-NEXT: [[TMP3:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; ALL-NEXT: [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP3]], i64 [[TMP2]] monotonic monotonic, align 8
|
||||
; ALL-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP4]], 1
|
||||
; ALL-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP4]], 0
|
||||
; ALL-NEXT: [[TMP5]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; ALL-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
|
||||
; ALL-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; ALL-NEXT: [[NEW2:%.*]] = fadd double [[LOADED]], [[VALUE]]
|
||||
; ALL-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; ALL-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; ALL-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] monotonic monotonic, align 8
|
||||
; ALL-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; ALL-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; ALL-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; ALL-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; ALL: atomicrmw.end1:
|
||||
; ALL-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; ALL: atomicrmw.phi:
|
||||
; ALL-NEXT: [[RET:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP6]], [[ATOMICRMW_END1]] ]
|
||||
; ALL-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; ALL: atomicrmw.end:
|
||||
; ALL-NEXT: ret double [[TMP5]]
|
||||
; ALL-NEXT: ret double [[RET]]
|
||||
;
|
||||
%ret = atomicrmw fadd ptr %ptr, double %value monotonic, !amdgpu.ignore.denormal.mode !0
|
||||
ret double %ret
|
||||
@ -4495,3 +4718,4 @@ attributes #4 = { "denormal-fp-math-f32"="dynamic,dynamic" }
|
||||
attributes #5 = { "denormal-fp-math"="dynamic,dynamic" }
|
||||
|
||||
!0 = !{}
|
||||
!1 = !{i32 5, i32 6}
|
||||
|
@ -188,6 +188,41 @@ define half @test_atomicrmw_fmax_f16_local(ptr addrspace(3) %ptr, half %value) {
|
||||
|
||||
define double @test_atomicrmw_fmax_f64_flat(ptr %ptr, double %value) {
|
||||
; GCN-LABEL: @test_atomicrmw_fmax_f64_flat(
|
||||
; GCN-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GCN-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GCN: atomicrmw.private:
|
||||
; GCN-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GCN-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GCN-NEXT: [[TMP2:%.*]] = call double @llvm.maxnum.f64(double [[LOADED_PRIVATE]], double [[VALUE:%.*]])
|
||||
; GCN-NEXT: store double [[TMP2]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GCN: atomicrmw.global:
|
||||
; GCN-NEXT: [[TMP3:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GCN: atomicrmw.start:
|
||||
; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP3]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP8:%.*]], [[ATOMICRMW_START]] ]
|
||||
; GCN-NEXT: [[TMP4:%.*]] = call double @llvm.maxnum.f64(double [[LOADED]], double [[VALUE]])
|
||||
; GCN-NEXT: [[TMP5:%.*]] = bitcast double [[TMP4]] to i64
|
||||
; GCN-NEXT: [[TMP6:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; GCN-NEXT: [[TMP7:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP6]], i64 [[TMP5]] seq_cst seq_cst, align 8
|
||||
; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1
|
||||
; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP7]], 0
|
||||
; GCN-NEXT: [[TMP8]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; GCN: atomicrmw.end1:
|
||||
; GCN-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GCN: atomicrmw.phi:
|
||||
; GCN-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP8]], [[ATOMICRMW_END1]] ]
|
||||
; GCN-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GCN: atomicrmw.end:
|
||||
; GCN-NEXT: ret double [[RES]]
|
||||
;
|
||||
%res = atomicrmw fmax ptr %ptr, double %value seq_cst
|
||||
ret double %res
|
||||
}
|
||||
|
||||
define double @test_atomicrmw_fmax_f64_flat__noprivate(ptr %ptr, double %value) {
|
||||
; GCN-LABEL: @test_atomicrmw_fmax_f64_flat__noprivate(
|
||||
; GCN-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GCN: atomicrmw.start:
|
||||
@ -203,7 +238,7 @@ define double @test_atomicrmw_fmax_f64_flat(ptr %ptr, double %value) {
|
||||
; GCN: atomicrmw.end:
|
||||
; GCN-NEXT: ret double [[TMP6]]
|
||||
;
|
||||
%res = atomicrmw fmax ptr %ptr, double %value seq_cst
|
||||
%res = atomicrmw fmax ptr %ptr, double %value seq_cst, !noalias.addrspace !0
|
||||
ret double %res
|
||||
}
|
||||
|
||||
@ -257,6 +292,9 @@ define double @test_atomicrmw_fmax_f64_global_strictfp(ptr addrspace(1) %ptr, do
|
||||
%res = atomicrmw fmax ptr addrspace(1) %ptr, double %value seq_cst
|
||||
ret double %res
|
||||
}
|
||||
|
||||
!0 = !{i32 5, i32 6}
|
||||
|
||||
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
||||
; GFX7: {{.*}}
|
||||
; GFX9: {{.*}}
|
||||
|
@ -188,6 +188,41 @@ define half @test_atomicrmw_fmin_f16_local(ptr addrspace(3) %ptr, half %value) {
|
||||
|
||||
define double @test_atomicrmw_fmin_f64_flat(ptr %ptr, double %value) {
|
||||
; GCN-LABEL: @test_atomicrmw_fmin_f64_flat(
|
||||
; GCN-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GCN-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GCN: atomicrmw.private:
|
||||
; GCN-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GCN-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GCN-NEXT: [[TMP2:%.*]] = call double @llvm.minnum.f64(double [[LOADED_PRIVATE]], double [[VALUE:%.*]])
|
||||
; GCN-NEXT: store double [[TMP2]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GCN: atomicrmw.global:
|
||||
; GCN-NEXT: [[TMP3:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GCN: atomicrmw.start:
|
||||
; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP3]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP8:%.*]], [[ATOMICRMW_START]] ]
|
||||
; GCN-NEXT: [[TMP4:%.*]] = call double @llvm.minnum.f64(double [[LOADED]], double [[VALUE]])
|
||||
; GCN-NEXT: [[TMP5:%.*]] = bitcast double [[TMP4]] to i64
|
||||
; GCN-NEXT: [[TMP6:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; GCN-NEXT: [[TMP7:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP6]], i64 [[TMP5]] seq_cst seq_cst, align 8
|
||||
; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP7]], 1
|
||||
; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP7]], 0
|
||||
; GCN-NEXT: [[TMP8]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; GCN: atomicrmw.end1:
|
||||
; GCN-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GCN: atomicrmw.phi:
|
||||
; GCN-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP8]], [[ATOMICRMW_END1]] ]
|
||||
; GCN-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GCN: atomicrmw.end:
|
||||
; GCN-NEXT: ret double [[RES]]
|
||||
;
|
||||
%res = atomicrmw fmin ptr %ptr, double %value seq_cst
|
||||
ret double %res
|
||||
}
|
||||
|
||||
define double @test_atomicrmw_fmin_f64_flat__noprivate(ptr %ptr, double %value) {
|
||||
; GCN-LABEL: @test_atomicrmw_fmin_f64_flat__noprivate(
|
||||
; GCN-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GCN: atomicrmw.start:
|
||||
@ -203,7 +238,7 @@ define double @test_atomicrmw_fmin_f64_flat(ptr %ptr, double %value) {
|
||||
; GCN: atomicrmw.end:
|
||||
; GCN-NEXT: ret double [[TMP6]]
|
||||
;
|
||||
%res = atomicrmw fmin ptr %ptr, double %value seq_cst
|
||||
%res = atomicrmw fmin ptr %ptr, double %value seq_cst, !noalias.addrspace !0
|
||||
ret double %res
|
||||
}
|
||||
|
||||
@ -257,6 +292,9 @@ define double @test_atomicrmw_fmin_f64_global_strictfp(ptr addrspace(1) %ptr, do
|
||||
%res = atomicrmw fmin ptr addrspace(1) %ptr, double %value seq_cst
|
||||
ret double %res
|
||||
}
|
||||
|
||||
!0 = !{i32 5, i32 6}
|
||||
|
||||
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
||||
; GFX7: {{.*}}
|
||||
; GFX9: {{.*}}
|
||||
|
@ -198,8 +198,8 @@ define half @test_atomicrmw_fsub_f16_local(ptr addrspace(3) %ptr, half %value) {
|
||||
ret half %res
|
||||
}
|
||||
|
||||
define double @test_atomicrmw_fsub_f64_flat(ptr %ptr, double %value) {
|
||||
; GCN-LABEL: @test_atomicrmw_fsub_f64_flat(
|
||||
define double @test_atomicrmw_fsub_f64_flat__noprivate(ptr %ptr, double %value) {
|
||||
; GCN-LABEL: @test_atomicrmw_fsub_f64_flat__noprivate(
|
||||
; GCN-NEXT: [[TMP1:%.*]] = load double, ptr [[PTR:%.*]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GCN: atomicrmw.start:
|
||||
@ -214,6 +214,41 @@ define double @test_atomicrmw_fsub_f64_flat(ptr %ptr, double %value) {
|
||||
; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
|
||||
; GCN: atomicrmw.end:
|
||||
; GCN-NEXT: ret double [[TMP5]]
|
||||
;
|
||||
%res = atomicrmw fsub ptr %ptr, double %value seq_cst, !noalias.addrspace !0
|
||||
ret double %res
|
||||
}
|
||||
|
||||
define double @test_atomicrmw_fsub_f64_flat(ptr %ptr, double %value) {
|
||||
; GCN-LABEL: @test_atomicrmw_fsub_f64_flat(
|
||||
; GCN-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR:%.*]])
|
||||
; GCN-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; GCN: atomicrmw.private:
|
||||
; GCN-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; GCN-NEXT: [[LOADED_PRIVATE:%.*]] = load double, ptr addrspace(5) [[TMP1]], align 8
|
||||
; GCN-NEXT: [[NEW:%.*]] = fsub double [[LOADED_PRIVATE]], [[VALUE:%.*]]
|
||||
; GCN-NEXT: store double [[NEW]], ptr addrspace(5) [[TMP1]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; GCN: atomicrmw.global:
|
||||
; GCN-NEXT: [[TMP2:%.*]] = load double, ptr [[PTR]], align 8
|
||||
; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
|
||||
; GCN: atomicrmw.start:
|
||||
; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP2]], [[ATOMICRMW_GLOBAL]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
|
||||
; GCN-NEXT: [[NEW2:%.*]] = fsub double [[LOADED]], [[VALUE]]
|
||||
; GCN-NEXT: [[TMP3:%.*]] = bitcast double [[NEW2]] to i64
|
||||
; GCN-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
|
||||
; GCN-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst, align 8
|
||||
; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
|
||||
; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
|
||||
; GCN-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
|
||||
; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END1:%.*]], label [[ATOMICRMW_START]]
|
||||
; GCN: atomicrmw.end1:
|
||||
; GCN-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; GCN: atomicrmw.phi:
|
||||
; GCN-NEXT: [[RES:%.*]] = phi double [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[TMP6]], [[ATOMICRMW_END1]] ]
|
||||
; GCN-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; GCN: atomicrmw.end:
|
||||
; GCN-NEXT: ret double [[RES]]
|
||||
;
|
||||
%res = atomicrmw fsub ptr %ptr, double %value seq_cst
|
||||
ret double %res
|
||||
@ -625,3 +660,5 @@ define bfloat @test_atomicrmw_fadd_bf16_flat_system_align4(ptr %ptr, bfloat %val
|
||||
%res = atomicrmw fadd ptr %ptr, bfloat %value monotonic, align 4
|
||||
ret bfloat %res
|
||||
}
|
||||
|
||||
!0 = !{i32 5, i32 6}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,8 +35,22 @@ define i32 @test_atomicrmw_or_0_global_one_as(ptr addrspace(1) %ptr) {
|
||||
define i32 @test_atomicrmw_or_0_flat_system(ptr %ptr) {
|
||||
; CHECK-LABEL: define i32 @test_atomicrmw_or_0_flat_system(
|
||||
; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] {
|
||||
; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr [[PTR]], i32 0 seq_cst, align 4
|
||||
; CHECK-NEXT: ret i32 [[RES]]
|
||||
; CHECK-NEXT: [[IS_PRIVATE:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[PTR]])
|
||||
; CHECK-NEXT: br i1 [[IS_PRIVATE]], label [[ATOMICRMW_PRIVATE:%.*]], label [[ATOMICRMW_GLOBAL:%.*]]
|
||||
; CHECK: atomicrmw.private:
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr [[PTR]] to ptr addrspace(5)
|
||||
; CHECK-NEXT: [[LOADED_PRIVATE:%.*]] = load i32, ptr addrspace(5) [[TMP1]], align 4
|
||||
; CHECK-NEXT: [[NEW:%.*]] = or i32 [[LOADED_PRIVATE]], 0
|
||||
; CHECK-NEXT: store i32 [[NEW]], ptr addrspace(5) [[TMP1]], align 4
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_PHI:%.*]]
|
||||
; CHECK: atomicrmw.global:
|
||||
; CHECK-NEXT: [[RES:%.*]] = atomicrmw add ptr [[PTR]], i32 0 seq_cst, align 4, !noalias.addrspace [[META1:![0-9]+]]
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_PHI]]
|
||||
; CHECK: atomicrmw.phi:
|
||||
; CHECK-NEXT: [[RES1:%.*]] = phi i32 [ [[LOADED_PRIVATE]], [[ATOMICRMW_PRIVATE]] ], [ [[RES]], [[ATOMICRMW_GLOBAL]] ]
|
||||
; CHECK-NEXT: br label [[ATOMICRMW_END:%.*]]
|
||||
; CHECK: atomicrmw.end:
|
||||
; CHECK-NEXT: ret i32 [[RES1]]
|
||||
;
|
||||
%res = atomicrmw or ptr %ptr, i32 0 seq_cst
|
||||
ret i32 %res
|
||||
|
@ -24,7 +24,7 @@ entry:
|
||||
%i = add nsw i32 %a, -1
|
||||
%i.2 = sext i32 %i to i64
|
||||
%i.3 = getelementptr inbounds double, ptr %b, i64 %i.2
|
||||
%i.4 = atomicrmw fadd ptr %i.3, double %c syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory !0
|
||||
%i.4 = atomicrmw fadd ptr %i.3, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ entry:
|
||||
%i.2 = sext i32 %i to i64
|
||||
%i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
|
||||
%i.4 = addrspacecast ptr addrspace(1) %i.3 to ptr
|
||||
%0 = atomicrmw fadd ptr %i.4, double %c syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory !0
|
||||
%0 = atomicrmw fadd ptr %i.4, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -107,9 +107,9 @@ bb1: ; preds = %entry
|
||||
%i.7 = ptrtoint ptr addrspace(1) %i.3 to i64
|
||||
%i.8 = add nsw i64 %i.7, 1
|
||||
%i.9 = inttoptr i64 %i.8 to ptr addrspace(1)
|
||||
%0 = atomicrmw fadd ptr %d, double %c syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory !0
|
||||
%0 = atomicrmw fadd ptr %d, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
%i.11 = addrspacecast ptr addrspace(1) %i.9 to ptr
|
||||
%1 = atomicrmw fadd ptr %i.11, double %c syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory !0
|
||||
%1 = atomicrmw fadd ptr %i.11, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
|
||||
ret void
|
||||
}
|
||||
|
||||
@ -175,3 +175,4 @@ attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite
|
||||
attributes #1 = { mustprogress nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" }
|
||||
|
||||
!0 = !{}
|
||||
!1 = !{i32 5, i32 6}
|
||||
|
Loading…
x
Reference in New Issue
Block a user