llvm-project/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
Piotr Sobczak 170c0dac44
[AMDGPU] Fix edge case of buffer OOB handling (#115479)
Strengthen out-of-bounds guarantees for buffer accesses by disallowing
buffer accesses with alignment lower than natural alignment.

This is needed to specifically address the edge case where an access
starts out-of-bounds and then enters in-bounds, as the hardware would
treat the entire access as being out-of-bounds. This is normally not
needed for most users, but at least one graphics device extension
(VK_EXT_robustness2) has very strict requirements - in-bounds accesses
must return correct value, and out-of-bounds accesses must return zero.

The direct consequence of the patch is that a buffer access at negative
address is not merged by load-store-vectorizer with one at a positive
address, which fixes a CTS test.

Targets that do not care about the new behavior are advised to use the
new target feature relaxed-buffer-oob-mode that maintains the state from
before the patch.
2025-03-07 08:56:44 +01:00

103 lines
4.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefix=SDAG %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefix=GISEL %s
; Check that in strict OOB mode for buffers (relaxed-buffer-oob-mode attribute not set) the underaligned loads and stores get split.
; FIXME: The loads/stores do not get split (extend amdgpu-lower-buffer-fat-pointers?).
define amdgpu_ps void @split_underaligned_load(ptr addrspace(7) inreg %p, ptr addrspace(7) inreg %p2) #0 {
; CHECK-LABEL: split_underaligned_load:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v2, s9
; CHECK-NEXT: s_mov_b32 s15, s8
; CHECK-NEXT: s_mov_b32 s14, s7
; CHECK-NEXT: s_mov_b32 s13, s6
; CHECK-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
; CHECK-NEXT: s_mov_b32 s12, s5
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
; CHECK-NEXT: s_endpgm
; SDAG-LABEL: split_underaligned_load:
; SDAG: ; %bb.0: ; %entry
; SDAG-NEXT: v_mov_b32_e32 v0, s4
; SDAG-NEXT: v_mov_b32_e32 v2, s9
; SDAG-NEXT: s_mov_b32 s15, s8
; SDAG-NEXT: s_mov_b32 s14, s7
; SDAG-NEXT: s_mov_b32 s13, s6
; SDAG-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
; SDAG-NEXT: s_mov_b32 s12, s5
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
; SDAG-NEXT: s_endpgm
;
; GISEL-LABEL: split_underaligned_load:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: v_mov_b32_e32 v0, s4
; GISEL-NEXT: v_mov_b32_e32 v2, s9
; GISEL-NEXT: s_mov_b32 s12, s5
; GISEL-NEXT: s_mov_b32 s13, s6
; GISEL-NEXT: s_mov_b32 s14, s7
; GISEL-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
; GISEL-NEXT: s_mov_b32 s15, s8
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
; GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i8, ptr addrspace(7) %p, i32 0
%ld = load i64, ptr addrspace(7) %gep, align 4
%gep2 = getelementptr i8, ptr addrspace(7) %p2, i32 0
store i64 %ld, ptr addrspace(7) %gep2, align 4
ret void
}
; Check that in strict OOB mode for buffers (relaxed-buffer-oob-mode attribute not set) the naturally aligned loads and stores do not get split.
define amdgpu_ps void @do_not_split_aligned_load(ptr addrspace(7) inreg %p, ptr addrspace(7) inreg %p2) #0 {
; CHECK-LABEL: do_not_split_aligned_load:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: v_mov_b32_e32 v0, s4
; CHECK-NEXT: v_mov_b32_e32 v2, s9
; CHECK-NEXT: s_mov_b32 s15, s8
; CHECK-NEXT: s_mov_b32 s14, s7
; CHECK-NEXT: s_mov_b32 s13, s6
; CHECK-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
; CHECK-NEXT: s_mov_b32 s12, s5
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
; CHECK-NEXT: s_endpgm
; SDAG-LABEL: do_not_split_aligned_load:
; SDAG: ; %bb.0: ; %entry
; SDAG-NEXT: v_mov_b32_e32 v0, s4
; SDAG-NEXT: v_mov_b32_e32 v2, s9
; SDAG-NEXT: s_mov_b32 s15, s8
; SDAG-NEXT: s_mov_b32 s14, s7
; SDAG-NEXT: s_mov_b32 s13, s6
; SDAG-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
; SDAG-NEXT: s_mov_b32 s12, s5
; SDAG-NEXT: s_waitcnt vmcnt(0)
; SDAG-NEXT: buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
; SDAG-NEXT: s_endpgm
;
; GISEL-LABEL: do_not_split_aligned_load:
; GISEL: ; %bb.0: ; %entry
; GISEL-NEXT: v_mov_b32_e32 v0, s4
; GISEL-NEXT: v_mov_b32_e32 v2, s9
; GISEL-NEXT: s_mov_b32 s12, s5
; GISEL-NEXT: s_mov_b32 s13, s6
; GISEL-NEXT: s_mov_b32 s14, s7
; GISEL-NEXT: buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
; GISEL-NEXT: s_mov_b32 s15, s8
; GISEL-NEXT: s_waitcnt vmcnt(0)
; GISEL-NEXT: buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
; GISEL-NEXT: s_endpgm
entry:
%gep = getelementptr i8, ptr addrspace(7) %p, i32 0
%ld = load i64, ptr addrspace(7) %gep, align 8
%gep2 = getelementptr i8, ptr addrspace(7) %p2, i32 0
store i64 %ld, ptr addrspace(7) %gep2, align 8
ret void
}