
Recent upstream trends have moved away from explicitly using `-verify-machineinstrs`, as it's already covered by the expensive checks. This PR removes almost all `-verify-machineinstrs` from tests in `llvm/test/CodeGen/AMDGPU/*.ll`, leaving only those tests where its removal currently causes failures.
1826 lines
76 KiB
LLVM
1826 lines
76 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=VI %s
|
|
; RUN: llc -mtriple=amdgcn < %s| FileCheck -check-prefix=SI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s| FileCheck -check-prefix=GFX11-FAKE16 %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s| FileCheck -check-prefix=GFX11-TRUE16 %s
|
|
|
|
;;;==========================================================================;;;
|
|
;; 16-bit integer comparisons
|
|
;;;==========================================================================;;;
|
|
define amdgpu_kernel void @i16_eq(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_eq:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_eq_u16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_eq:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_eq:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_eq:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp eq i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ne(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_ne:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ne_u16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ne:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ne_u32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ne:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ne:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp ne i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ugt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_ugt:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_u16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ugt:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_u32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ugt:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ugt:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp ugt i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_uge(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_uge:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_u16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_uge:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_u32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_uge:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_uge:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp uge i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ult(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_ult:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_u16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ult:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ult:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ult:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp ult i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ule(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_ule:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_u16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ule:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_u32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ule:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_le_u16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ule:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_le_u16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp ule i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_sgt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_sgt:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_i16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_sgt:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_sgt:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_sgt:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp sgt i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_sge(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_sge:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_i16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_sge:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_i32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_sge:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_sge:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp sge i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_slt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_slt:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_i16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_slt:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_i32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_slt:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_slt:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp slt i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_sle(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
|
|
; VI-LABEL: i16_sle:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3
|
|
; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: flat_load_ushort v3, v[3:4]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_i16_e32 vcc, v2, v3
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_sle:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_mov_b64 s[6:7], s[10:11]
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64
|
|
; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[10:11]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_i32_e32 vcc, v3, v4
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_sle:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: s_clause 0x1
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_le_i16_e32 vcc_lo, v2, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_sle:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: s_clause 0x1
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3]
|
|
; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_le_i16_e32 vcc_lo, v0.l, v0.h
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%b = load i16, ptr addrspace(1) %b.gep
|
|
%tmp0 = icmp sle i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
; These should be commuted to reduce code size
|
|
define amdgpu_kernel void @i16_eq_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_eq_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_eq_u16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_eq_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_and_b32 s4, s8, 0xffff
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_eq_u32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_eq_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_eq_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp eq i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ne_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_ne_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ne_u16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ne_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_and_b32 s4, s8, 0xffff
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ne_u32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ne_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ne_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp ne i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ugt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_ugt_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_u16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ugt_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_and_b32 s4, s8, 0xffff
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_u32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ugt_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ugt_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp ugt i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_uge_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_uge_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_u16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_uge_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_and_b32 s4, s8, 0xffff
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_u32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_uge_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_le_u16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_uge_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_le_u16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp uge i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ult_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_ult_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_u16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ult_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_and_b32 s4, s8, 0xffff
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_u32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ult_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ult_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp ult i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_ule_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_ule_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_u16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_ule_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_and_b32 s4, s8, 0xffff
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_u32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_ule_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_ule_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp ule i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_sgt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_sgt_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_i16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_sgt_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_sext_i32_i16 s4, s8
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_i32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_sgt_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_sgt_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp sgt i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_sge_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_sge_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_i16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_sge_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_sext_i32_i16 s4, s8
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_i32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_sge_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_le_i16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_sge_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_le_i16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp sge i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_slt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_slt_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_i16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_slt_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_sext_i32_i16 s4, s8
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_i32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_slt_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_slt_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp slt i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @i16_sle_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
|
|
; VI-LABEL: i16_sle_v_s:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dword s4, s[4:5], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_i16_e32 vcc, s4, v2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; SI-LABEL: i16_sle_v_s:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s8, s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: s_sext_i32_i16 s4, s8
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_i32_e32 vcc, s4, v3
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-FAKE16-LABEL: i16_sle_v_s:
|
|
; GFX11-FAKE16: ; %bb.0: ; %entry
|
|
; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
|
|
; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3]
|
|
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-FAKE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, s4, v1
|
|
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
|
|
; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-FAKE16-NEXT: s_endpgm
|
|
;
|
|
; GFX11-TRUE16-LABEL: i16_sle_v_s:
|
|
; GFX11-TRUE16: ; %bb.0: ; %entry
|
|
; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0
|
|
; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34
|
|
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1
|
|
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1
|
|
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3]
|
|
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-TRUE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, s4, v0.l
|
|
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
|
|
; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-TRUE16-NEXT: s_endpgm
|
|
entry:
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%tid.ext = sext i32 %tid to i64
|
|
%a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
|
|
%out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
|
|
%a = load i16, ptr addrspace(1) %a.gep
|
|
%tmp0 = icmp sle i16 %a, %b
|
|
%tmp1 = sext i1 %tmp0 to i32
|
|
store i32 %tmp1, ptr addrspace(1) %out.gep
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { nounwind readnone }
|