
System scope atomics need to use cmpxchg loops if we know nothing about the allocation the address is from. aea5980e26e6a87dab9f8acb10eb3a59dd143cb1 started this, this expands the set to cover the remaining integer operations. Don't expand xchg and add, those theoretically should work over PCIe. This is a pre-commit which will introduce performance regressions. Subsequent changes will add handling of new atomicrmw metadata, which will avoid the expansion. Note this still isn't conservative enough; we do need to expand some device scope atomics if the memory is in fine-grained remote memory.
11924 lines
467 KiB
LLVM
11924 lines
467 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=amdgcn -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw xchg
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_xchg_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v[0:1], v2, off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_xchg_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v[0:1], v2, off offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_xchg_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v0, v[0:1], v2, off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_xchg_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_xchg_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v0, v1, s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_xchg_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v0, v1, s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_xchg_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v[0:1], v2, off offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_xchg_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw xchg f32
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_xchg_f32_noret(ptr addrspace(1) %ptr, float %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v[0:1], v2, off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, float %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_xchg_f32_noret_offset(ptr addrspace(1) %out, float %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v[0:1], v2, off offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr float, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, float %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define float @global_atomic_xchg_f32_ret(ptr addrspace(1) %ptr, float %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v0, v[0:1], v2, off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, float %in seq_cst
|
|
ret float %result
|
|
}
|
|
|
|
define float @global_atomic_xchg_f32_ret_offset(ptr addrspace(1) %out, float %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr float, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, float %in seq_cst
|
|
ret float %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_f32_noret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, float %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_f32_noret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr float, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, float %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx float @global_atomic_xchg_f32_ret_scalar(ptr addrspace(1) inreg %ptr, float inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v0, v1, s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, float %in seq_cst
|
|
ret float %result
|
|
}
|
|
|
|
define amdgpu_gfx float @global_atomic_xchg_f32_ret_offset_scalar(ptr addrspace(1) inreg %out, float inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_swap v0, v0, v1, s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr float, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, float %in seq_cst
|
|
ret float %result
|
|
}
|
|
|
|
define void @global_atomic_xchg_f32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, float %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v[0:1], v2, off offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, float %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define float @global_atomic_xchg_f32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, float %in) {
|
|
; SI-LABEL: global_atomic_xchg_f32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap v0, v[0:1], v2, off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, float %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret float %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw add
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_add_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_add_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_add v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add v[0:1], v2, off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_add_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_add_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add v[0:1], v2, off offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_add_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_add_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add v0, v[0:1], v2, off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_add_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_add_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add v0, v[0:1], v2, off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_add_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add v0, off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_add v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_add v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_add_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_add v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_add v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_add_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add v0, off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_add v0, v0, v1, s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw add ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_add_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v1, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v1, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add v0, off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v1, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v1, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-NEXT: global_atomic_add v0, v0, v1, s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_add_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_add_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add v[0:1], v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add v[0:1], v2, off offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_add_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_add_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add v0, v[0:1], v2, off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw add ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw sub
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_sub_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB30_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_sub_i32_e32 v3, vcc, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB30_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB30_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_sub_u32_e32 v3, vcc, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB30_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB30_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_sub_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB31_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_sub_i32_e32 v3, vcc, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB31_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB31_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_sub_u32_e32 v3, vcc, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB31_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB31_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB31_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_sub_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB32_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_sub_i32_e32 v4, vcc, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB32_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB32_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_sub_u32_e32 v3, vcc, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB32_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB32_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB32_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_sub_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB33_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_sub_i32_e32 v4, vcc, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB33_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB33_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_sub_u32_e32 v0, vcc, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB33_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB33_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB33_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_sub_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB34_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_subrev_i32_e32 v0, vcc, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB34_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB34_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_subrev_u32_e32 v2, vcc, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB34_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB34_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB34_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_sub_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB35_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_subrev_i32_e32 v0, vcc, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB35_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB35_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_subrev_u32_e32 v2, vcc, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB35_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB35_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_subrev_u32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB35_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_sub_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB36_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_subrev_i32_e32 v1, vcc, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB36_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB36_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_subrev_u32_e32 v3, vcc, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB36_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB36_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB36_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw sub ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_sub_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB37_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_subrev_i32_e32 v1, vcc, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB37_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB37_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_subrev_u32_e32 v3, vcc, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB37_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB37_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_subrev_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB37_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_sub_0_i32_ret(ptr addrspace(1) %ptr) {
|
|
; SI-LABEL: global_atomic_sub_0_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_0_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, 0
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_0_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_add v0, v[0:1], v2, off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw sub ptr addrspace(1) %ptr, i32 0 seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB39_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_sub_i32_e32 v3, vcc, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB39_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB39_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_sub_u32_e32 v3, vcc, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB39_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB39_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB39_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB40_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_sub_i32_e32 v4, vcc, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB40_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB40_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_sub_u32_e32 v0, vcc, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB40_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB40_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB40_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw sub ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw and
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_and_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_and_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB41_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB41_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB41_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB41_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB41_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB41_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_and_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_and_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB42_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB42_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB42_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB42_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB42_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB42_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_and_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_and_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB43_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB43_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB43_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB43_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB43_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB43_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_and_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_and_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB44_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB44_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB44_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB44_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB44_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB44_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_and_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB45_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB45_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB45_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB45_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB45_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB45_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_and_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB46_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB46_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB46_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB46_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB46_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB46_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_and_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB47_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB47_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB47_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_and_b32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB47_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB47_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB47_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw and ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_and_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB48_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB48_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB48_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_and_b32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB48_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB48_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB48_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_and_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_and_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB49_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB49_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB49_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB49_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB49_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB49_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_and_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB50_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB50_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB50_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB50_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB50_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB50_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw and ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw nand
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_nand_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB51_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; SI-NEXT: v_not_b32_e32 v3, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB51_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB51_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: v_not_b32_e32 v3, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB51_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB51_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB51_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_nand_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB52_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; SI-NEXT: v_not_b32_e32 v3, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB52_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB52_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: v_not_b32_e32 v3, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB52_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB52_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB52_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_nand_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB53_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v5, v2
|
|
; SI-NEXT: v_not_b32_e32 v4, v3
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB53_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB53_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: v_not_b32_e32 v3, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB53_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB53_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB53_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_nand_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB54_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v5, v2
|
|
; SI-NEXT: v_not_b32_e32 v4, v3
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB54_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB54_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, v1, v2
|
|
; VI-NEXT: v_not_b32_e32 v0, v0
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB54_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB54_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB54_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_nand_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB55_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v1
|
|
; SI-NEXT: v_not_b32_e32 v0, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB55_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB55_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v2, s6, v3
|
|
; VI-NEXT: v_not_b32_e32 v2, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB55_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB55_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: v_not_b32_e32 v0, v0
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB55_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_nand_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB56_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v1
|
|
; SI-NEXT: v_not_b32_e32 v0, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB56_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB56_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v2, s6, v3
|
|
; VI-NEXT: v_not_b32_e32 v2, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB56_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB56_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: v_not_b32_e32 v0, v0
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB56_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_nand_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB57_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v2
|
|
; SI-NEXT: v_not_b32_e32 v1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB57_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB57_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, s6, v4
|
|
; VI-NEXT: v_not_b32_e32 v3, v0
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB57_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB57_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s6, v3
|
|
; GFX9-NEXT: v_not_b32_e32 v2, v0
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB57_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw nand ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_nand_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB58_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v2
|
|
; SI-NEXT: v_not_b32_e32 v1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB58_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB58_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, s6, v4
|
|
; VI-NEXT: v_not_b32_e32 v3, v0
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB58_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB58_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s6, v3
|
|
; GFX9-NEXT: v_not_b32_e32 v2, v0
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB58_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_nand_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB59_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; SI-NEXT: v_not_b32_e32 v3, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB59_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB59_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; VI-NEXT: v_not_b32_e32 v3, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB59_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB59_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB59_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_nand_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_nand_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB60_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v3, v5, v2
|
|
; SI-NEXT: v_not_b32_e32 v4, v3
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB60_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB60_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, v1, v2
|
|
; VI-NEXT: v_not_b32_e32 v0, v0
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB60_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB60_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB60_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw nand ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw or
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_or_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_or_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB61_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB61_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB61_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB61_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB61_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB61_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_or_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_or_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB62_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB62_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB62_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB62_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB62_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB62_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_or_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_or_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB63_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB63_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB63_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB63_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB63_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB63_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_or_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_or_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB64_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB64_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB64_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_or_b32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB64_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB64_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB64_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_or_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB65_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB65_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB65_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_or_b32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB65_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB65_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB65_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_or_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB66_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB66_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB66_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_or_b32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB66_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB66_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB66_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_or_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB67_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB67_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB67_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_or_b32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB67_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB67_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB67_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw or ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_or_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB68_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB68_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB68_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_or_b32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB68_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB68_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB68_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_or_0_i32_ret(ptr addrspace(1) %ptr) {
|
|
; SI-LABEL: global_atomic_or_0_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_0_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, 0
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_0_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_add v0, v[0:1], v2, off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw or ptr addrspace(1) %ptr, i32 0 seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_or_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_or_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB70_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB70_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB70_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB70_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB70_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB70_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_or_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB71_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_or_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB71_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB71_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_or_b32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB71_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB71_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB71_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw or ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw xor
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_xor_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB72_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB72_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB72_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB72_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB72_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB72_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_xor_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB73_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB73_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB73_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB73_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB73_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB73_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_xor_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB74_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB74_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB74_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB74_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB74_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB74_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_xor_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB75_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB75_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB75_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_xor_b32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB75_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB75_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB75_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xor_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB76_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB76_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB76_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_xor_b32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB76_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB76_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB76_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xor_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB77_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB77_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB77_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_xor_b32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB77_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB77_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB77_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_xor_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB78_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB78_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB78_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_xor_b32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB78_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB78_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB78_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xor ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_xor_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB79_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB79_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB79_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_xor_b32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB79_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB79_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB79_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_xor_0_i32_ret(ptr addrspace(1) %ptr) {
|
|
; SI-LABEL: global_atomic_xor_0_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_0_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, 0
|
|
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_0_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_add v0, v[0:1], v2, off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xor ptr addrspace(1) %ptr, i32 0 seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_xor_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB81_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB81_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB81_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB81_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB81_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB81_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB82_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_xor_b32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB82_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB82_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_xor_b32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB82_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB82_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB82_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xor ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw max
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_max_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_max_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB83_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB83_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB83_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB83_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB83_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB83_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_max_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_max_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB84_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB84_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB84_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB84_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB84_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB84_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_max_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_max_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB85_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB85_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB85_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB85_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB85_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB85_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_max_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_max_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB86_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB86_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB86_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_max_i32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB86_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB86_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB86_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_max_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB87_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB87_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB87_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB87_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB87_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_i32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB87_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_max_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB88_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB88_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB88_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB88_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB88_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_i32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB88_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_max_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB89_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB89_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB89_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_max_i32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB89_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB89_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB89_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_max_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB90_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB90_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB90_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_max_i32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB90_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB90_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB90_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_max_i32_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; SI-NEXT: s_mov_b32 s4, s3
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s3, s[4:5], 0x4
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s3
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB91_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_max_i32_e32 v0, s2, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB91_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i32_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; VI-NEXT: s_mov_b32 s4, s3
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s3, s[4:5], 0x10
|
|
; VI-NEXT: s_add_u32 s4, s4, 16
|
|
; VI-NEXT: s_addc_u32 s5, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: .LBB91_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_max_i32_e32 v2, s2, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB91_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i32_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s3, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s3, s[0:1], 0x10
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: .LBB91_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_max_i32_e32 v0, s2, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB91_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_max_i32_ret_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s9, 31
|
|
; SI-NEXT: s_mov_b32 s4, s9
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0x4
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s6
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB92_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_max_i32_e32 v0, s8, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB92_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i32_ret_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s7, 31
|
|
; VI-NEXT: s_mov_b32 s4, s7
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s7, s[4:5], 0x10
|
|
; VI-NEXT: s_add_u32 s4, s4, 16
|
|
; VI-NEXT: s_addc_u32 s5, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: .LBB92_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB92_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i32_ret_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s7, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s7
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s7, s[0:1], 0x10
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s7
|
|
; GFX9-NEXT: .LBB92_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB92_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_store_dword v1, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst
|
|
store i32 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i32_addr64(ptr addrspace(1) %out, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_max_i32_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; SI-NEXT: s_mov_b32 s4, s3
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s3, s[4:5], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s3
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB93_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_max_i32_e32 v0, s2, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB93_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i32_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; VI-NEXT: s_mov_b32 s4, s3
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s3, s[4:5], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: .LBB93_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_max_i32_e32 v2, s2, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB93_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i32_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s3, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s3, s[0:1], 0x0
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: .LBB93_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_max_i32_e32 v0, s2, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB93_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_max_i32_ret_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s9, 31
|
|
; SI-NEXT: s_mov_b32 s4, s9
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s6
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB94_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_max_i32_e32 v0, s8, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB94_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i32_ret_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s7, 31
|
|
; VI-NEXT: s_mov_b32 s4, s7
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s7, s[4:5], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: .LBB94_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB94_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i32_ret_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s7, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s7
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s7, s[0:1], 0x0
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s7
|
|
; GFX9-NEXT: .LBB94_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB94_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_store_dword v1, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
store i32 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_max_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_max_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB95_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB95_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB95_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB95_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB95_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB95_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_max_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB96_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_i32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB96_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB96_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_max_i32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB96_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB96_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_max_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB96_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw max ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw umax
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_umax_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB97_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB97_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB97_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB97_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB97_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB97_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_umax_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB98_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB98_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB98_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB98_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB98_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB98_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_umax_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB99_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB99_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB99_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB99_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB99_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB99_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_umax_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB100_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB100_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB100_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_max_u32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB100_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB100_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB100_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umax_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB101_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB101_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB101_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB101_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB101_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_u32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB101_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umax_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB102_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB102_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB102_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB102_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB102_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_u32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB102_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_umax_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB103_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB103_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB103_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_max_u32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB103_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB103_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB103_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_umax_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB104_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB104_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB104_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_max_u32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB104_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB104_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB104_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_umax_i32_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; SI-NEXT: s_mov_b32 s4, s3
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s3, s[4:5], 0x4
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s3
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB105_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_max_u32_e32 v0, s2, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB105_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_umax_i32_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; VI-NEXT: s_mov_b32 s4, s3
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s3, s[4:5], 0x10
|
|
; VI-NEXT: s_add_u32 s4, s4, 16
|
|
; VI-NEXT: s_addc_u32 s5, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: .LBB105_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_max_u32_e32 v2, s2, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB105_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_umax_i32_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s3, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s3, s[0:1], 0x10
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: .LBB105_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_max_u32_e32 v0, s2, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB105_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_umax_i32_ret_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s9, 31
|
|
; SI-NEXT: s_mov_b32 s4, s9
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0x4
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s6
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB106_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_max_u32_e32 v0, s8, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB106_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_umax_i32_ret_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s7, 31
|
|
; VI-NEXT: s_mov_b32 s4, s7
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s7, s[4:5], 0x10
|
|
; VI-NEXT: s_add_u32 s4, s4, 16
|
|
; VI-NEXT: s_addc_u32 s5, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: .LBB106_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB106_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_umax_i32_ret_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s7, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s7
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s7, s[0:1], 0x10
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s7
|
|
; GFX9-NEXT: .LBB106_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB106_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_store_dword v1, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst
|
|
store i32 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_umax_i32_ret_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s9, 31
|
|
; SI-NEXT: s_mov_b32 s4, s9
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s6
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB107_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_max_u32_e32 v0, s8, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB107_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_umax_i32_ret_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s7, 31
|
|
; VI-NEXT: s_mov_b32 s4, s7
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s7, s[4:5], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: .LBB107_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB107_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_umax_i32_ret_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s7, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s7
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s7, s[0:1], 0x0
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s7
|
|
; GFX9-NEXT: .LBB107_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_max_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB107_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_store_dword v1, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
store i32 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_umax_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB108_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB108_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB108_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB108_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB108_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB108_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB109_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_max_u32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB109_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB109_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_max_u32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB109_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB109_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_max_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB109_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umax ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw umin
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_umin_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB110_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB110_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB110_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB110_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB110_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB110_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_umin_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB111_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB111_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB111_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB111_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB111_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB111_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_umin_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB112_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB112_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB112_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB112_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB112_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB112_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_umin_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB113_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB113_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB113_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_min_u32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB113_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB113_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB113_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umin_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB114_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB114_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB114_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_u32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB114_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB114_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_u32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB114_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umin_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB115_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB115_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB115_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_u32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB115_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB115_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_u32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB115_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_umin_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB116_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB116_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB116_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_min_u32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB116_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB116_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_min_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB116_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umin ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_umin_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB117_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB117_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB117_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_min_u32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB117_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB117_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_min_u32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB117_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_umin_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB118_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB118_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB118_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB118_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB118_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB118_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB119_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_u32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB119_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB119_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_min_u32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB119_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB119_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_min_u32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB119_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umin ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw min
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_min_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_min_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB120_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB120_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB120_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB120_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB120_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB120_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_min_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_min_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB121_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB121_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB121_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB121_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB121_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB121_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_min_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_min_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB122_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB122_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB122_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB122_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB122_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB122_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_min_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_min_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB123_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB123_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB123_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_min_i32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB123_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB123_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB123_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_min_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB124_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB124_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB124_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB124_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB124_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_i32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB124_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_min_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB125_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v0, s34, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB125_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB125_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB125_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB125_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_i32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB125_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_min_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB126_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB126_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB126_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_min_i32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB126_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB126_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB126_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_min_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB127_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v1, s34, v2
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB127_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB127_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_min_i32_e32 v3, s6, v4
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB127_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB127_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB127_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_min_i32_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; SI-NEXT: s_mov_b32 s4, s3
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s3, s[4:5], 0x4
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s3
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB128_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_min_i32_e32 v0, s2, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB128_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i32_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s3, 31
|
|
; VI-NEXT: s_mov_b32 s4, s3
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s3, s[4:5], 0x10
|
|
; VI-NEXT: s_add_u32 s4, s4, 16
|
|
; VI-NEXT: s_addc_u32 s5, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: .LBB128_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_min_i32_e32 v2, s2, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB128_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i32_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s3, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s3, s[0:1], 0x10
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX9-NEXT: .LBB128_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_min_i32_e32 v0, s2, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB128_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_min_i32_ret_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s9, 31
|
|
; SI-NEXT: s_mov_b32 s4, s9
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0x4
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s6
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB129_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_min_i32_e32 v0, s8, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB129_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i32_ret_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s7, 31
|
|
; VI-NEXT: s_mov_b32 s4, s7
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s7, s[4:5], 0x10
|
|
; VI-NEXT: s_add_u32 s4, s4, 16
|
|
; VI-NEXT: s_addc_u32 s5, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: .LBB129_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB129_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i32_ret_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s7, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s7
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s7, s[0:1], 0x10
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s7
|
|
; GFX9-NEXT: .LBB129_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB129_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_store_dword v1, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst
|
|
store i32 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i32(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: atomic_min_i32:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_load_dword s2, s[0:1], 0x0
|
|
; SI-NEXT: s_mov_b64 s[4:5], 0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s2
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: .LBB130_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_min_i32_e32 v0, s6, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; SI-NEXT: s_cbranch_execnz .LBB130_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i32:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s3, s[6:7], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: .LBB130_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_min_i32_e32 v2, s2, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB130_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i32:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; GFX9-NEXT: s_load_dword s6, s[4:5], 0x2c
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s4
|
|
; GFX9-NEXT: .LBB130_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_min_i32_e32 v0, s6, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB130_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %out, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %index) {
|
|
; SI-LABEL: atomic_min_i32_ret_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_ashr_i32 s5, s9, 31
|
|
; SI-NEXT: s_mov_b32 s4, s9
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s6
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB131_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_min_i32_e32 v0, s8, v1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB131_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dword v2, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i32_ret_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_ashr_i32 s5, s7, 31
|
|
; VI-NEXT: s_mov_b32 s4, s7
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dword s7, s[4:5], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: .LBB131_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB131_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i32_ret_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_ashr_i32 s5, s7, 31
|
|
; GFX9-NEXT: s_mov_b32 s4, s7
|
|
; GFX9-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
|
|
; GFX9-NEXT: s_add_u32 s0, s0, s4
|
|
; GFX9-NEXT: s_addc_u32 s1, s1, s5
|
|
; GFX9-NEXT: s_load_dword s7, s[0:1], 0x0
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s7
|
|
; GFX9-NEXT: .LBB131_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_min_i32_e32 v2, s6, v3
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB131_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_store_dword v1, v0, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i32, ptr addrspace(1) %out, i32 %index
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
store i32 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_min_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_min_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB132_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB132_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB132_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB132_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB132_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB132_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_min_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB133_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_min_i32_e32 v4, v5, v2
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB133_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB133_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_min_i32_e32 v0, v1, v2
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB133_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB133_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_min_i32_e32 v3, v4, v2
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB133_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw min ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw uinc_wrap
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_uinc_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB134_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v4
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB134_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB134_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 1, v4
|
|
; VI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB134_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB134_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB134_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_uinc_wrap_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB135_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v4
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB135_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB135_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 1, v4
|
|
; VI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB135_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB135_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB135_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_uinc_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB136_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v5
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, 0, v3, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB136_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB136_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 1, v4
|
|
; VI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB136_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB136_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB136_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_uinc_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB137_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v5
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, 0, v3, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB137_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB137_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v1
|
|
; VI-NEXT: v_cmp_lt_u32_e32 vcc, v1, v2
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB137_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB137_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB137_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_uinc_wrap_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB138_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v1
|
|
; SI-NEXT: v_cmp_gt_u32_e32 vcc, s34, v1
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB138_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB138_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v3
|
|
; VI-NEXT: v_cmp_gt_u32_e32 vcc, s6, v3
|
|
; VI-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB138_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB138_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 1, v1
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, s6, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB138_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_uinc_wrap_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v4, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v4, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB139_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v1
|
|
; SI-NEXT: v_cmp_gt_u32_e32 vcc, s34, v1
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB139_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v4, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v4, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB139_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v3
|
|
; VI-NEXT: v_cmp_gt_u32_e32 vcc, s6, v3
|
|
; VI-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB139_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB139_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 1, v1
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, s6, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB139_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_uinc_wrap_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB140_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v2
|
|
; SI-NEXT: v_cmp_gt_u32_e32 vcc, s34, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB140_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB140_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v4
|
|
; VI-NEXT: v_cmp_gt_u32_e32 vcc, s6, v4
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, 0, v0, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB140_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB140_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 1, v3
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v0, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB140_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_uinc_wrap_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v3, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v3, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB141_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v2
|
|
; SI-NEXT: v_cmp_gt_u32_e32 vcc, s34, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, 0, v0, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v2
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v2
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB141_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v3, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v3, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB141_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v4
|
|
; VI-NEXT: v_cmp_gt_u32_e32 vcc, s6, v4
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, 0, v0, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB141_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB141_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-NEXT: v_add_u32_e32 v0, 1, v3
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, s6, v3
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v0, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB141_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB142_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v4
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB142_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB142_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 1, v4
|
|
; VI-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB142_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB142_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB142_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB143_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, 1, v5
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, v5, v2
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, 0, v3, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB143_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB143_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 1, v1
|
|
; VI-NEXT: v_cmp_lt_u32_e32 vcc, v1, v2
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB143_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB143_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 1, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, v4, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB143_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw udec_wrap
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_udec_wrap_i32_noret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s10
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[6:7], 0
|
|
; SI-NEXT: .LBB144_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[8:11], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_cbranch_execnz .LBB144_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[6:7], 0
|
|
; VI-NEXT: .LBB144_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_cbranch_execnz .LBB144_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-NEXT: .LBB144_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB144_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_udec_wrap_i32_noret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s10
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[8:11], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[6:7], 0
|
|
; SI-NEXT: .LBB145_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[8:11], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_cbranch_execnz .LBB145_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[6:7], 0
|
|
; VI-NEXT: .LBB145_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_cbranch_execnz .LBB145_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-NEXT: .LBB145_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB145_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_udec_wrap_i32_ret(ptr addrspace(1) %ptr, i32 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s10
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[6:7], 0
|
|
; SI-NEXT: .LBB146_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
|
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[8:11], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_cbranch_execnz .LBB146_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[6:7], 0
|
|
; VI-NEXT: .LBB146_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_cbranch_execnz .LBB146_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v3
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-NEXT: .LBB146_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB146_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @global_atomic_udec_wrap_i32_ret_offset(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s10
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[6:7], 0
|
|
; SI-NEXT: .LBB147_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
|
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[8:11], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_cbranch_execnz .LBB147_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[6:7], 0
|
|
; VI-NEXT: .LBB147_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v1
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
|
|
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_cbranch_execnz .LBB147_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-NEXT: .LBB147_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB147_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v5, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v5, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[38:39], 0
|
|
; SI-NEXT: v_mov_b32_e32 v2, s34
|
|
; SI-NEXT: .LBB148_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
|
|
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v1
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v4, v1
|
|
; SI-NEXT: v_mov_b32_e32 v3, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v1
|
|
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: s_cbranch_execnz .LBB148_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: v_readlane_b32 s7, v5, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v5, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[36:37], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s6
|
|
; VI-NEXT: .LBB148_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
|
|
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; VI-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_cbranch_execnz .LBB148_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[36:37], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s6
|
|
; GFX9-NEXT: .LBB148_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
|
|
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, -1, v1
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB148_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_udec_wrap_i32_noret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v5, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v5, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v1, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[38:39], 0
|
|
; SI-NEXT: v_mov_b32_e32 v2, s34
|
|
; SI-NEXT: .LBB149_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
|
|
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v1
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v4, v1
|
|
; SI-NEXT: v_mov_b32_e32 v3, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v1
|
|
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: s_cbranch_execnz .LBB149_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: v_readlane_b32 s7, v5, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v5, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s34
|
|
; VI-NEXT: v_mov_b32_e32 v1, s35
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[36:37], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s6
|
|
; VI-NEXT: .LBB149_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v2, vcc, -1, v3
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
|
|
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v3
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; VI-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
|
|
; VI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v2
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_cbranch_execnz .LBB149_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v1, v2, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[36:37], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s6
|
|
; GFX9-NEXT: .LBB149_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
|
|
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, -1, v1
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; GFX9-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB149_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_scalar(ptr addrspace(1) inreg %ptr, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v5, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v5, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[38:39], 0
|
|
; SI-NEXT: v_mov_b32_e32 v2, s34
|
|
; SI-NEXT: .LBB150_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v4
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: v_mov_b32_e32 v1, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: s_cbranch_execnz .LBB150_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: v_readlane_b32 s7, v5, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v5, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dword v0, v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: s_mov_b64 s[36:37], 0
|
|
; VI-NEXT: v_mov_b32_e32 v3, s6
|
|
; VI-NEXT: v_mov_b32_e32 v2, s5
|
|
; VI-NEXT: .LBB150_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v5, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v5
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
|
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[4:5] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
|
|
; VI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_cbranch_execnz .LBB150_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[36:37], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s6
|
|
; GFX9-NEXT: .LBB150_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v4
|
|
; GFX9-NEXT: v_add_u32_e32 v0, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[3:4], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; GFX9-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB150_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_gfx i32 @global_atomic_udec_wrap_i32_ret_offset_scalar(ptr addrspace(1) inreg %out, i32 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v5, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v5, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 offset:16
|
|
; SI-NEXT: s_mov_b64 s[38:39], 0
|
|
; SI-NEXT: v_mov_b32_e32 v2, s34
|
|
; SI-NEXT: .LBB151_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v0, vcc, -1, v4
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; SI-NEXT: v_cmp_lt_u32_e64 s[36:37], s34, v4
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[36:37]
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: v_mov_b32_e32 v1, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; SI-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: s_cbranch_execnz .LBB151_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[38:39]
|
|
; SI-NEXT: v_readlane_b32 s7, v5, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v5, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 16
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s34
|
|
; VI-NEXT: v_mov_b32_e32 v2, s35
|
|
; VI-NEXT: flat_load_dword v0, v[1:2]
|
|
; VI-NEXT: s_mov_b64 s[36:37], 0
|
|
; VI-NEXT: v_mov_b32_e32 v3, s6
|
|
; VI-NEXT: .LBB151_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v5, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v5
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
|
; VI-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v5
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v0, v3, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[1:2], v[4:5] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5
|
|
; VI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_cbranch_execnz .LBB151_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-NEXT: global_load_dword v0, v1, s[4:5] offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[36:37], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s6
|
|
; GFX9-NEXT: .LBB151_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_lt_u32_e64 s[34:35], s6, v4
|
|
; GFX9-NEXT: v_add_u32_e32 v0, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[34:35]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v0, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[3:4], s[4:5] offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v4
|
|
; GFX9-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB151_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst
|
|
ret i32 %result
|
|
}
|
|
|
|
define void @global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s10
|
|
; SI-NEXT: buffer_load_dword v4, v[0:1], s[8:11], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[6:7], 0
|
|
; SI-NEXT: .LBB152_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v4
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v4
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: buffer_atomic_cmpswap v[5:6], v[0:1], s[8:11], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4
|
|
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_cbranch_execnz .LBB152_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[6:7], 0
|
|
; VI-NEXT: .LBB152_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, -1, v4
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; VI-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v4, v3
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_cbranch_execnz .LBB152_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v4, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-NEXT: .LBB152_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB152_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i32 @global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i32 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s8, s10
|
|
; SI-NEXT: s_mov_b32 s9, s10
|
|
; SI-NEXT: buffer_load_dword v3, v[0:1], s[8:11], 0 addr64 offset:16
|
|
; SI-NEXT: s_mov_b64 s[6:7], 0
|
|
; SI-NEXT: .LBB153_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_add_i32_e32 v3, vcc, -1, v5
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v5
|
|
; SI-NEXT: v_cmp_gt_u32_e64 s[4:5], v5, v2
|
|
; SI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
|
|
; SI-NEXT: v_mov_b32_e32 v3, v4
|
|
; SI-NEXT: v_mov_b32_e32 v4, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap v[3:4], v[0:1], s[8:11], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; SI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: s_cbranch_execnz .LBB153_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; SI-NEXT: v_mov_b32_e32 v0, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v0, v[3:4]
|
|
; VI-NEXT: s_mov_b64 s[6:7], 0
|
|
; VI-NEXT: .LBB153_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, -1, v1
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
|
|
; VI-NEXT: v_cmp_gt_u32_e64 s[4:5], v1, v2
|
|
; VI-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1
|
|
; VI-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_cbranch_execnz .LBB153_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i32_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v3, v[0:1], off offset:16
|
|
; GFX9-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-NEXT: .LBB153_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v3
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
|
|
; GFX9-NEXT: v_cmp_gt_u32_e64 s[4:5], v4, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -1, v4
|
|
; GFX9-NEXT: s_or_b64 vcc, vcc, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap v3, v[0:1], v[3:4], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB153_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i32 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i32 %result
|
|
}
|
|
|
|
!0 = !{}
|