llvm-project/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll
Shilei Tian fc0653f31c
[RFC][NFC][AMDGPU] Remove -verify-machineinstrs from llvm/test/CodeGen/AMDGPU/*.ll (#150024)
Recent upstream trends have moved away from explicitly using `-verify-machineinstrs`, as it's already covered by the expensive checks. This PR removes almost all `-verify-machineinstrs` from tests in `llvm/test/CodeGen/AMDGPU/*.ll`, leaving only those tests where its removal currently causes failures.
2025-07-23 13:42:46 -04:00

809 lines
31 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefix=SI %s
declare i1 @llvm.amdgcn.class.f32(float, i32) #1
declare i1 @llvm.amdgcn.class.f64(double, i32) #1
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare float @llvm.fabs.f32(float) #1
declare double @llvm.fabs.f64(double) #1
define amdgpu_kernel void @test_class_f32(ptr addrspace(1) %out, [8 x i32], float %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s6, s[4:5], 0x1c
; SI-NEXT: s_load_dword s7, s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_cmp_class_f32_e32 vcc, s7, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_fabs_f32(ptr addrspace(1) %out, [8 x i32], float %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_fabs_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s6, s[4:5], 0x1c
; SI-NEXT: s_load_dword s7, s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], |s7|, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%a.fabs = call float @llvm.fabs.f32(float %a) #1
%result = call i1 @llvm.amdgcn.class.f32(float %a.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_fneg_f32(ptr addrspace(1) %out, [8 x i32], float %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_fneg_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s6, s[4:5], 0x1c
; SI-NEXT: s_load_dword s7, s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], -s7, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%a.fneg = fsub float -0.0, %a
%result = call i1 @llvm.amdgcn.class.f32(float %a.fneg, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_fneg_fabs_f32(ptr addrspace(1) %out, [8 x i32], float %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_fneg_fabs_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s6, s[4:5], 0x1c
; SI-NEXT: s_load_dword s7, s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], -|s7|, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%a.fabs = call float @llvm.fabs.f32(float %a) #1
%a.fneg.fabs = fsub float -0.0, %a.fabs
%result = call i1 @llvm.amdgcn.class.f32(float %a.fneg.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_1_f32(ptr addrspace(1) %out, float %a) #0 {
; SI-LABEL: test_class_1_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], s6, 1
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 1) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_64_f32(ptr addrspace(1) %out, float %a) #0 {
; SI-LABEL: test_class_64_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], s6, 64
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 64) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
; Set all 10 bits of mask
define amdgpu_kernel void @test_class_full_mask_f32(ptr addrspace(1) %out, float %a) #0 {
; SI-LABEL: test_class_full_mask_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0x3ff
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 1023) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_9bit_mask_f32(ptr addrspace(1) %out, float %a) #0 {
; SI-LABEL: test_class_9bit_mask_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0x1ff
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @v_test_class_full_mask_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: v_test_class_full_mask_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
; SI-NEXT: s_movk_i32 s4, 0x1ff
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], v2, s4
; SI-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load float, ptr addrspace(1) %gep.in
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %gep.out, align 4
ret void
}
define amdgpu_kernel void @test_class_inline_imm_constant_dynamic_mask_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_class_inline_imm_constant_dynamic_mask_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e32 vcc, 1.0, v2
; SI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%b = load i32, ptr addrspace(1) %gep.in
%result = call i1 @llvm.amdgcn.class.f32(float 1.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %gep.out, align 4
ret void
}
; FIXME: Why isn't this using a literal constant operand?
define amdgpu_kernel void @test_class_lit_constant_dynamic_mask_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_class_lit_constant_dynamic_mask_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
; SI-NEXT: s_mov_b32 s4, 0x44800000
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v2
; SI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%b = load i32, ptr addrspace(1) %gep.in
%result = call i1 @llvm.amdgcn.class.f32(float 1024.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %gep.out, align 4
ret void
}
define amdgpu_kernel void @test_class_f64(ptr addrspace(1) %out, [8 x i32], double %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s8, s[4:5], 0x1d
; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_cmp_class_f64_e32 vcc, s[6:7], v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_fabs_f64(ptr addrspace(1) %out, [8 x i32], double %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_fabs_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s8, s[4:5], 0x1d
; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_cmp_class_f64_e64 s[4:5], |s[6:7]|, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%a.fabs = call double @llvm.fabs.f64(double %a) #1
%result = call i1 @llvm.amdgcn.class.f64(double %a.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_fneg_f64(ptr addrspace(1) %out, [8 x i32], double %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_fneg_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s8, s[4:5], 0x1d
; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_cmp_class_f64_e64 s[4:5], -s[6:7], v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%a.fneg = fsub double -0.0, %a
%result = call i1 @llvm.amdgcn.class.f64(double %a.fneg, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_fneg_fabs_f64(ptr addrspace(1) %out, [8 x i32], double %a, [8 x i32], i32 %b) #0 {
; SI-LABEL: test_class_fneg_fabs_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s8, s[4:5], 0x1d
; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x13
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_cmp_class_f64_e64 s[4:5], -|s[6:7]|, v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%a.fabs = call double @llvm.fabs.f64(double %a) #1
%a.fneg.fabs = fsub double -0.0, %a.fabs
%result = call i1 @llvm.amdgcn.class.f64(double %a.fneg.fabs, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_1_f64(ptr addrspace(1) %out, double %a) #0 {
; SI-LABEL: test_class_1_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: v_cmp_class_f64_e64 s[0:1], s[2:3], 1
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 1) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_64_f64(ptr addrspace(1) %out, double %a) #0 {
; SI-LABEL: test_class_64_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: v_cmp_class_f64_e64 s[0:1], s[2:3], 64
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 64) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
; Set all 9 bits of mask
define amdgpu_kernel void @test_class_full_mask_f64(ptr addrspace(1) %out, [8 x i32], double %a) #0 {
; SI-LABEL: test_class_full_mask_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x13
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0x1ff
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_class_f64_e32 vcc, s[4:5], v0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @v_test_class_full_mask_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: v_test_class_full_mask_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_movk_i32 s4, 0x1ff
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], s4
; SI-NEXT: v_cndmask_b32_e64 v2, 0, -1, s[4:5]
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr double, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load double, ptr addrspace(1) %in
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 511) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %gep.out, align 4
ret void
}
define amdgpu_kernel void @test_class_inline_imm_constant_dynamic_mask_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_class_inline_imm_constant_dynamic_mask_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f64_e32 vcc, 1.0, v2
; SI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%b = load i32, ptr addrspace(1) %gep.in
%result = call i1 @llvm.amdgcn.class.f64(double 1.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %gep.out, align 4
ret void
}
define amdgpu_kernel void @test_class_lit_constant_dynamic_mask_f64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_class_lit_constant_dynamic_mask_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
; SI-NEXT: s_mov_b32 s4, 0
; SI-NEXT: s_mov_b32 s5, 0x40900000
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f64_e32 vcc, s[4:5], v2
; SI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr i32, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%b = load i32, ptr addrspace(1) %gep.in
%result = call i1 @llvm.amdgcn.class.f64(double 1024.0, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %gep.out, align 4
ret void
}
define amdgpu_kernel void @test_fold_or_class_f32_0(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_fold_or_class_f32_0:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s10, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, 3
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load float, ptr addrspace(1) %gep.in
%class0 = call i1 @llvm.amdgcn.class.f32(float %a, i32 1) #1
%class1 = call i1 @llvm.amdgcn.class.f32(float %a, i32 3) #1
%or = or i1 %class0, %class1
%sext = sext i1 %or to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_fold_or3_class_f32_0(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_fold_or3_class_f32_0:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s10, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, 7
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load float, ptr addrspace(1) %gep.in
%class0 = call i1 @llvm.amdgcn.class.f32(float %a, i32 1) #1
%class1 = call i1 @llvm.amdgcn.class.f32(float %a, i32 2) #1
%class2 = call i1 @llvm.amdgcn.class.f32(float %a, i32 4) #1
%or.0 = or i1 %class0, %class1
%or.1 = or i1 %or.0, %class2
%sext = sext i1 %or.1 to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_fold_or_all_tests_class_f32_0(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_fold_or_all_tests_class_f32_0:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s10, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_movk_i32 s2, 0x3ff
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, s2
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load float, ptr addrspace(1) %gep.in
%class0 = call i1 @llvm.amdgcn.class.f32(float %a, i32 1) #1
%class1 = call i1 @llvm.amdgcn.class.f32(float %a, i32 2) #1
%class2 = call i1 @llvm.amdgcn.class.f32(float %a, i32 4) #1
%class3 = call i1 @llvm.amdgcn.class.f32(float %a, i32 8) #1
%class4 = call i1 @llvm.amdgcn.class.f32(float %a, i32 16) #1
%class5 = call i1 @llvm.amdgcn.class.f32(float %a, i32 32) #1
%class6 = call i1 @llvm.amdgcn.class.f32(float %a, i32 64) #1
%class7 = call i1 @llvm.amdgcn.class.f32(float %a, i32 128) #1
%class8 = call i1 @llvm.amdgcn.class.f32(float %a, i32 256) #1
%class9 = call i1 @llvm.amdgcn.class.f32(float %a, i32 512) #1
%or.0 = or i1 %class0, %class1
%or.1 = or i1 %or.0, %class2
%or.2 = or i1 %or.1, %class3
%or.3 = or i1 %or.2, %class4
%or.4 = or i1 %or.3, %class5
%or.5 = or i1 %or.4, %class6
%or.6 = or i1 %or.5, %class7
%or.7 = or i1 %or.6, %class8
%or.8 = or i1 %or.7, %class9
%sext = sext i1 %or.8 to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_fold_or_class_f32_1(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_fold_or_class_f32_1:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s10, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, 12
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load float, ptr addrspace(1) %gep.in
%class0 = call i1 @llvm.amdgcn.class.f32(float %a, i32 4) #1
%class1 = call i1 @llvm.amdgcn.class.f32(float %a, i32 8) #1
%or = or i1 %class0, %class1
%sext = sext i1 %or to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_fold_or_class_f32_2(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
; SI-LABEL: test_fold_or_class_f32_2:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s10, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], v0, 7
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load float, ptr addrspace(1) %gep.in
%class0 = call i1 @llvm.amdgcn.class.f32(float %a, i32 7) #1
%class1 = call i1 @llvm.amdgcn.class.f32(float %a, i32 7) #1
%or = or i1 %class0, %class1
%sext = sext i1 %or to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_no_fold_or_class_f32_0(ptr addrspace(1) %out, ptr addrspace(1) %in, float %b) #0 {
; SI-LABEL: test_no_fold_or_class_f32_0:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_load_dword s12, s[4:5], 0xd
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s10, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], s12, 8
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[2:3], v0, 4
; SI-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.in = getelementptr float, ptr addrspace(1) %in, i32 %tid
%gep.out = getelementptr i32, ptr addrspace(1) %out, i32 %tid
%a = load float, ptr addrspace(1) %gep.in
%class0 = call i1 @llvm.amdgcn.class.f32(float %a, i32 4) #1
%class1 = call i1 @llvm.amdgcn.class.f32(float %b, i32 8) #1
%or = or i1 %class0, %class1
%sext = sext i1 %or to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_0_f32(ptr addrspace(1) %out, float %a) #0 {
; SI-LABEL: test_class_0_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f32(float %a, i32 0) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_0_f64(ptr addrspace(1) %out, double %a) #0 {
; SI-LABEL: test_class_0_f64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f64(double %a, i32 0) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define amdgpu_kernel void @test_class_undef_f32(ptr addrspace(1) %out, float %a, i32 %b) #0 {
; SI-LABEL: test_class_undef_f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
%result = call i1 @llvm.amdgcn.class.f32(float poison, i32 %b) #1
%sext = sext i1 %result to i32
store i32 %sext, ptr addrspace(1) %out, align 4
ret void
}
define i1 @test_fold_and_ord(float %a) {
; SI-LABEL: test_fold_and_ord:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], v0, 32
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: s_setpc_b64 s[30:31]
%class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1
%ord = fcmp ord float %a, %a
%and = and i1 %ord, %class
ret i1 %and
}
define i1 @test_fold_and_unord(float %a) {
; SI-LABEL: test_fold_and_unord:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], v0, 3
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: s_setpc_b64 s[30:31]
%class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1
%ord = fcmp uno float %a, %a
%and = and i1 %ord, %class
ret i1 %and
}
define i1 @test_fold_and_ord_multi_use(float %a) {
; SI-LABEL: test_fold_and_ord_multi_use:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_class_f32_e64 s[4:5], v0, 35
; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_byte v1, off, s[4:7], 0
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
%class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1
store volatile i1 %class, ptr addrspace(1) poison
%ord = fcmp ord float %a, %a
%and = and i1 %ord, %class
ret i1 %and
}
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }