
Recent upstream trends have moved away from explicitly using `-verify-machineinstrs`, as it's already covered by the expensive checks. This PR removes almost all `-verify-machineinstrs` from tests in `llvm/test/CodeGen/AMDGPU/*.ll`, leaving only those tests where its removal currently causes failures.
527 lines
18 KiB
LLVM
527 lines
18 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefix=SI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -enable-var-scope -check-prefix=VI %s
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
|
|
|
|
define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_eq_0(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: sextload_i1_to_i32_trunc_cmp_eq_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: sextload_i1_to_i32_trunc_cmp_eq_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
|
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = sext i1 %load to i32
|
|
%cmp = icmp eq i32 %ext, 0
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
; FIXME: The negate should be inverting the compare.
|
|
define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_eq_0(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: zextload_i1_to_i32_trunc_cmp_eq_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
|
|
; SI-NEXT: s_xor_b64 s[0:1], vcc, -1
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: zextload_i1_to_i32_trunc_cmp_eq_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
|
|
; VI-NEXT: s_xor_b64 s[0:1], vcc, -1
|
|
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = zext i1 %load to i32
|
|
%cmp = icmp eq i32 %ext, 0
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_eq_1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: sextload_i1_to_i32_trunc_cmp_eq_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: sextload_i1_to_i32_trunc_cmp_eq_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = sext i1 %load to i32
|
|
%cmp = icmp eq i32 %ext, 1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_eq_1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: zextload_i1_to_i32_trunc_cmp_eq_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: zextload_i1_to_i32_trunc_cmp_eq_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = zext i1 %load to i32
|
|
%cmp = icmp eq i32 %ext, 1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_eq_neg1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: sextload_i1_to_i32_trunc_cmp_eq_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: sextload_i1_to_i32_trunc_cmp_eq_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = sext i1 %load to i32
|
|
%cmp = icmp eq i32 %ext, -1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_eq_neg1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: zextload_i1_to_i32_trunc_cmp_eq_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: zextload_i1_to_i32_trunc_cmp_eq_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = zext i1 %load to i32
|
|
%cmp = icmp eq i32 %ext, -1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
|
|
define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_ne_0(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: sextload_i1_to_i32_trunc_cmp_ne_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: sextload_i1_to_i32_trunc_cmp_ne_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = sext i1 %load to i32
|
|
%cmp = icmp ne i32 %ext, 0
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_ne_0(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: zextload_i1_to_i32_trunc_cmp_ne_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: zextload_i1_to_i32_trunc_cmp_ne_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = zext i1 %load to i32
|
|
%cmp = icmp ne i32 %ext, 0
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_ne_1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: sextload_i1_to_i32_trunc_cmp_ne_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: sextload_i1_to_i32_trunc_cmp_ne_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = sext i1 %load to i32
|
|
%cmp = icmp ne i32 %ext, 1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_ne_1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: zextload_i1_to_i32_trunc_cmp_ne_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
|
|
; SI-NEXT: s_xor_b64 s[0:1], vcc, -1
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: zextload_i1_to_i32_trunc_cmp_ne_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
|
|
; VI-NEXT: s_xor_b64 s[0:1], vcc, -1
|
|
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = zext i1 %load to i32
|
|
%cmp = icmp ne i32 %ext, 1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
; FIXME: This should be one compare.
|
|
define amdgpu_kernel void @sextload_i1_to_i32_trunc_cmp_ne_neg1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: sextload_i1_to_i32_trunc_cmp_ne_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: sextload_i1_to_i32_trunc_cmp_ne_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 1, v0
|
|
; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
|
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = sext i1 %load to i32
|
|
%cmp = icmp ne i32 %ext, -1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @zextload_i1_to_i32_trunc_cmp_ne_neg1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: zextload_i1_to_i32_trunc_cmp_ne_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: zextload_i1_to_i32_trunc_cmp_ne_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%load = load i1, ptr addrspace(1) %in
|
|
%ext = zext i1 %load to i32
|
|
%cmp = icmp ne i32 %ext, -1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
; FIXME: Need to handle non-uniform case for function below (load without gep).
|
|
define amdgpu_kernel void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(ptr addrspace(1) %out, ptr addrspace(1) %in) nounwind {
|
|
; SI-LABEL: masked_load_i1_to_i32_trunc_cmp_ne_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_sbyte v0, v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: masked_load_i1_to_i32_trunc_cmp_ne_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_sbyte v0, v[0:1]
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ne_u32_e32 vcc, -1, v0
|
|
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; VI-NEXT: buffer_store_byte v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%tid.x = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%in.ptr = getelementptr i8, ptr addrspace(1) %in, i32 %tid.x
|
|
%load = load i8, ptr addrspace(1) %in.ptr
|
|
%masked = and i8 %load, 255
|
|
%ext = sext i8 %masked to i32
|
|
%cmp = icmp ne i32 %ext, -1
|
|
store i1 %cmp, ptr addrspace(1) %out
|
|
ret void
|
|
}
|