
Start considering !amdgpu.no.remote.memory.access and !amdgpu.no.fine.grained.host.memory metadata when deciding to expand integer atomic operations. This does not yet attempt to accurately handle fadd/fmin/fmax, which are trickier and require migrating the old "amdgpu-unsafe-fp-atomics" attribute.
52 lines
2.5 KiB
LLVM
52 lines
2.5 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
|
|
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 < %s | FileCheck -enable-var-scope --check-prefix=GCN %s
|
|
|
|
define void @shl_base_atomicrmw_global_ptr(ptr addrspace(1) %out, ptr addrspace(1) %extra.use, ptr addrspace(1) %ptr) #0 {
|
|
; GCN-LABEL: shl_base_atomicrmw_global_ptr:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_lshlrev_b64 v[0:1], 2, v[4:5]
|
|
; GCN-NEXT: v_mov_b32_e32 v6, 3
|
|
; GCN-NEXT: global_atomic_and v[0:1], v6, off offset:512
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
; GCN-NEXT: buffer_wbinvl1_vol
|
|
; GCN-NEXT: v_add_co_u32_e32 v0, vcc, 0x80, v4
|
|
; GCN-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
|
|
; GCN-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 32
|
|
%cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
|
|
%shl = shl i64 %cast, 2
|
|
%castback = inttoptr i64 %shl to ptr addrspace(1)
|
|
%val = atomicrmw and ptr addrspace(1) %castback, i32 3 syncscope("agent") seq_cst, !amdgpu.no.fine.grained.memory !0
|
|
store volatile i64 %cast, ptr addrspace(1) %extra.use, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @shl_base_global_ptr_global_atomic_fadd(ptr addrspace(1) %out, ptr addrspace(1) %extra.use, ptr addrspace(1) %ptr) #0 {
|
|
; GCN-LABEL: shl_base_global_ptr_global_atomic_fadd:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_lshlrev_b64 v[0:1], 2, v[4:5]
|
|
; GCN-NEXT: v_mov_b32_e32 v6, 0x42c80000
|
|
; GCN-NEXT: global_atomic_add_f32 v[0:1], v6, off offset:512
|
|
; GCN-NEXT: v_add_co_u32_e32 v0, vcc, 0x80, v4
|
|
; GCN-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v5, vcc
|
|
; GCN-NEXT: global_store_dwordx2 v[2:3], v[0:1], off
|
|
; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
%arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(1) %ptr, i64 0, i64 32
|
|
%cast = ptrtoint ptr addrspace(1) %arrayidx0 to i64
|
|
%shl = shl i64 %cast, 2
|
|
%castback = inttoptr i64 %shl to ptr addrspace(1)
|
|
%unused = atomicrmw fadd ptr addrspace(1) %castback, float 100.0 syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory !0, !amdgpu.ignore.denormal.mode !0
|
|
store volatile i64 %cast, ptr addrspace(1) %extra.use, align 4
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { argmemonly nounwind willreturn }
|
|
|
|
!0 = !{}
|