
This is a NFC patch. This patch run a bulk update on CodeGen tests that are impacted by the true16 features. This patch applies: 1. duplicate GFX11plus runlines and apply them with "+mattr=+real-true16" and "+mattr=-real-true16" 2. update the test with the update script For some GISEL runlines, the current CodeGen do not fully support the true16 version. Still update the runlines, but comment out the failing one, and added a "FIXME-TRUE16" comment to that test for easier tracking. These test will be fixed in the following patches. This is in a transition state that we support both "+real-true16/-real-true16" in our code base. We plan to move to "+real-true16" as default, and finally remove "-real-true16" mode and test lines.
420 lines
18 KiB
LLVM
420 lines
18 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
|
|
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
|
|
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL-TRUE16
|
|
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-NEXT: .LBB0_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB0_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 0, i32 0, i32 1)
|
|
%cmp = icmp eq i32 %load, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_const_idx(ptr addrspace(8) %ptr) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: .LBB1_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB1_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 15, i32 0, i32 0, i32 1)
|
|
%cmp = icmp eq i32 %load, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_off:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-NEXT: .LBB2_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB2_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 0, i32 0, i32 1)
|
|
%cmp = icmp eq i32 %load, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-NEXT: .LBB3_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB3_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 4, i32 1)
|
|
%cmp = icmp eq i32 %load, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-NEXT: .LBB4_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB4_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 4)
|
|
%cmp = icmp eq i32 %load, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_nonatomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_nonatomic_buffer_load_i32:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
|
|
; CHECK-NEXT: s_mov_b32 s0, 0
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
|
|
; CHECK-NEXT: .LBB5_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_or_b32 s0, s1, s0
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB5_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
|
|
%cmp = icmp eq i32 %load, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_i64:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v2, s6
|
|
; CHECK-NEXT: .LBB6_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB6_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%id.zext = zext i32 %id to i64
|
|
br label %bb1
|
|
bb1:
|
|
%load = call i64 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i64(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
|
|
%cmp = icmp eq i64 %load, %id.zext
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_v2i16:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-NEXT: .LBB7_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB7_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call <2 x i16> @llvm.amdgcn.struct.ptr.atomic.buffer.load.v2i16(ptr addrspace(8) %ptr, i32 %index, i32 0, i32 0, i32 1)
|
|
%bitcast = bitcast <2 x i16> %load to i32
|
|
%cmp = icmp eq i32 %bitcast, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
|
|
; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
|
|
; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1
|
|
; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
|
|
; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
|
|
; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
|
; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
|
|
; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
|
|
; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-SDAG-TRUE16-NEXT: s_endpgm
|
|
;
|
|
; CHECK-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
|
|
; CHECK-FAKE16: ; %bb.0: ; %bb
|
|
; CHECK-FAKE16-NEXT: s_clause 0x1
|
|
; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1
|
|
; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
|
|
; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
|
; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
|
|
; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
|
|
; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; CHECK-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
|
|
; CHECK-GISEL-TRUE16: ; %bb.0: ; %bb
|
|
; CHECK-GISEL-TRUE16-NEXT: s_clause 0x1
|
|
; CHECK-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
|
|
; CHECK-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
|
|
; CHECK-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
|
|
; CHECK-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
|
|
; CHECK-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
|
|
; CHECK-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-GISEL-TRUE16-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call <4 x i16> @llvm.amdgcn.struct.ptr.atomic.buffer.load.v4i16(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
|
|
%shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
|
|
%bitcast = bitcast <2 x i16> %shortened to i32
|
|
%cmp = icmp eq i32 %bitcast, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_v4i32:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-NEXT: .LBB9_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB9_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call <4 x i32> @llvm.amdgcn.struct.ptr.atomic.buffer.load.v4i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
|
|
%extracted = extractelement <4 x i32> %load, i32 3
|
|
%cmp = icmp eq i32 %extracted, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr, i32 %index) {
|
|
; CHECK-LABEL: struct_ptr_atomic_buffer_load_ptr:
|
|
; CHECK: ; %bb.0: ; %bb
|
|
; CHECK-NEXT: s_clause 0x1
|
|
; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
|
|
; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; CHECK-NEXT: s_mov_b32 s4, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, s6
|
|
; CHECK-NEXT: .LBB10_1: ; %bb1
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: flat_load_b32 v2, v[2:3]
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
|
|
; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
|
|
; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB10_1
|
|
; CHECK-NEXT: ; %bb.2: ; %bb2
|
|
; CHECK-NEXT: s_endpgm
|
|
bb:
|
|
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
br label %bb1
|
|
bb1:
|
|
%load = call ptr @llvm.amdgcn.struct.ptr.atomic.buffer.load.ptr(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
|
|
%elem = load i32, ptr %load
|
|
%cmp = icmp eq i32 %elem, %id
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
bb2:
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.struct.ptr.atom.buffer.load.i32(ptr addrspace(8), i32, i32, i32, i32 immarg)
|
|
declare i64 @llvm.amdgcn.struct.ptr.atom.buffer.load.i64(ptr addrspace(8), i32, i32, i32, i32 immarg)
|
|
declare <2 x i16> @llvm.amdgcn.struct.ptr.atom.buffer.load.v2i16(ptr addrspace(8), i32, i32, i32, i32 immarg)
|
|
declare <4 x i16> @llvm.amdgcn.struct.ptr.atom.buffer.load.v4i16(ptr addrspace(8), i32, i32, i32, i32 immarg)
|
|
declare <4 x i32> @llvm.amdgcn.struct.ptr.atom.buffer.load.v4i32(ptr addrspace(8), i32, i32, i32, i32 immarg)
|
|
declare ptr @llvm.amdgcn.struct.ptr.atom.buffer.load.ptr(ptr addrspace(8), i32, i32, i32, i32 immarg)
|
|
declare i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32, i32 immarg)
|
|
declare i32 @llvm.amdgcn.workitem.id.x()
|