
HW will emulate unsupported PCIe atomics via CAS loop, we do not need to expand these anymore.
294 lines
11 KiB
LLVM
294 lines
11 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
|
|
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,GCN-SDAG %s
|
|
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefixes=GCN,GCN-GISEL %s
|
|
|
|
define amdgpu_ps i64 @s_add_u64(i64 inreg %a) {
|
|
; GCN-LABEL: s_add_u64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], lit64(0xf12345678)
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
%result = add i64 %a, 64729929336
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_ps void @v_add_u64(i64 %a, ptr addrspace(1) %out) {
|
|
; GCN-LABEL: v_add_u64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0xf12345678), v[0:1]
|
|
; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
|
|
; GCN-NEXT: s_endpgm
|
|
%result = add i64 %a, 64729929336
|
|
store i64 %result, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps i64 @s_add_neg_u64(i64 inreg %a) {
|
|
; GCN-LABEL: s_add_neg_u64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_add_nc_u64 s[0:1], s[0:1], lit64(0xfffffff0edcba988)
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
%result = sub i64 %a, 64729929336
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_ps void @v_add_neg_u64(i64 %a, ptr addrspace(1) %out) {
|
|
; GCN-LABEL: v_add_neg_u64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: v_add_nc_u64_e32 v[0:1], lit64(0xfffffff0edcba988), v[0:1]
|
|
; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
|
|
; GCN-NEXT: s_endpgm
|
|
%result = sub i64 %a, 64729929336
|
|
store i64 %result, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps i64 @s_sub_u64(i64 inreg %a) {
|
|
; GCN-LABEL: s_sub_u64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_sub_nc_u64 s[0:1], lit64(0xf12345678), s[0:1]
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
%result = sub i64 64729929336, %a
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_ps void @v_sub_u64(i64 %a, ptr addrspace(1) %out) {
|
|
; GCN-LABEL: v_sub_u64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: v_sub_nc_u64_e32 v[0:1], lit64(0xf12345678), v[0:1]
|
|
; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
|
|
; GCN-NEXT: s_endpgm
|
|
%result = sub i64 64729929336, %a
|
|
store i64 %result, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define void @v_mov_b64_double(ptr addrspace(1) %ptr) {
|
|
; GCN-LABEL: v_mov_b64_double:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
|
|
; GCN-NEXT: s_wait_kmcnt 0x0
|
|
; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333)
|
|
; GCN-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off scope:SCOPE_SYS
|
|
; GCN-NEXT: s_set_pc_i64 s[30:31]
|
|
%result = atomicrmw fadd ptr addrspace(1) %ptr, double 153.1 monotonic
|
|
ret void
|
|
}
|
|
|
|
define void @v_mov_b64_int(ptr addrspace(1) %ptr) {
|
|
; GCN-LABEL: v_mov_b64_int:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
|
|
; GCN-NEXT: s_wait_kmcnt 0x0
|
|
; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0xf12345678)
|
|
; GCN-NEXT: global_atomic_add_u64 v[0:1], v[2:3], off scope:SCOPE_SYS
|
|
; GCN-NEXT: s_set_pc_i64 s[30:31]
|
|
%result = atomicrmw add ptr addrspace(1) %ptr, i64 64729929336 monotonic
|
|
ret void
|
|
}
|
|
|
|
define void @store_double(ptr addrspace(1) %ptr) {
|
|
; GCN-LABEL: store_double:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
|
|
; GCN-NEXT: s_wait_kmcnt 0x0
|
|
; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333)
|
|
; GCN-NEXT: global_store_b64 v[0:1], v[2:3], off
|
|
; GCN-NEXT: s_set_pc_i64 s[30:31]
|
|
store double 153.1, ptr addrspace(1) %ptr
|
|
ret void
|
|
}
|
|
|
|
define i1 @class_f64() noinline optnone {
|
|
; GCN-SDAG-LABEL: class_f64:
|
|
; GCN-SDAG: ; %bb.0:
|
|
; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
|
|
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
|
|
; GCN-SDAG-NEXT: s_mov_b32 s2, 1
|
|
; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0x4063233333333333)
|
|
; GCN-SDAG-NEXT: v_cmp_class_f64_e64 s0, s[0:1], s2
|
|
; GCN-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
|
|
; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31]
|
|
;
|
|
; GCN-GISEL-LABEL: class_f64:
|
|
; GCN-GISEL: ; %bb.0:
|
|
; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
|
|
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
|
|
; GCN-GISEL-NEXT: s_mov_b32 s2, 1
|
|
; GCN-GISEL-NEXT: s_mov_b64 s[0:1], lit64(0x4063233333333333)
|
|
; GCN-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
|
|
; GCN-GISEL-NEXT: v_mov_b32_e32 v2, s2
|
|
; GCN-GISEL-NEXT: v_cmp_class_f64_e64 s0, v[0:1], v2
|
|
; GCN-GISEL-NEXT: v_mov_b32_e32 v0, 1
|
|
; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0
|
|
; GCN-GISEL-NEXT: v_cndmask_b32_e64 v0, v1, v0, s0
|
|
; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31]
|
|
%result = call i1 @llvm.amdgcn.class.f64(double 153.1, i32 1) nounwind readnone
|
|
ret i1 %result
|
|
}
|
|
|
|
define double @rsq_f64() {
|
|
; GCN-LABEL: rsq_f64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
|
|
; GCN-NEXT: s_wait_kmcnt 0x0
|
|
; GCN-NEXT: v_rsq_f64_e32 v[0:1], lit64(0x4063233333333333)
|
|
; GCN-NEXT: s_set_pc_i64 s[30:31]
|
|
%result = call double @llvm.amdgcn.rsq.f64(double 153.1) nounwind readnone
|
|
ret double %result
|
|
}
|
|
|
|
define amdgpu_ps i64 @s_and_b64(i64 inreg %a) {
|
|
; GCN-LABEL: s_and_b64:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_and_b64 s[0:1], s[0:1], lit64(0xf12345678)
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
%result = and i64 %a, 64729929336
|
|
ret i64 %result
|
|
}
|
|
|
|
; No V_AND_B64 instruction, it has to be split
|
|
|
|
define amdgpu_ps void @v_and_b64(i64 %a, ptr addrspace(1) %out) {
|
|
; GCN-SDAG-LABEL: v_and_b64:
|
|
; GCN-SDAG: ; %bb.0:
|
|
; GCN-SDAG-NEXT: v_and_b32_e32 v1, 15, v1
|
|
; GCN-SDAG-NEXT: v_and_b32_e32 v0, 0x12345678, v0
|
|
; GCN-SDAG-NEXT: global_store_b64 v[2:3], v[0:1], off
|
|
; GCN-SDAG-NEXT: s_endpgm
|
|
;
|
|
; GCN-GISEL-LABEL: v_and_b64:
|
|
; GCN-GISEL: ; %bb.0:
|
|
; GCN-GISEL-NEXT: v_and_b32_e32 v0, 0x12345678, v0
|
|
; GCN-GISEL-NEXT: v_and_b32_e32 v1, 15, v1
|
|
; GCN-GISEL-NEXT: global_store_b64 v[2:3], v[0:1], off
|
|
; GCN-GISEL-NEXT: s_endpgm
|
|
%result = and i64 %a, 64729929336
|
|
store i64 %result, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_ps <2 x float> @v_add_f64_200.1(double %a) {
|
|
; GCN-LABEL: v_add_f64_200.1:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: v_add_f64_e32 v[0:1], lit64(0x4069033333333333), v[0:1]
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
%add = fadd double %a, 200.1
|
|
%ret = bitcast double %add to <2 x float>
|
|
ret <2 x float> %ret
|
|
}
|
|
|
|
; 200.0 can be encoded as 32-bit literal
|
|
|
|
define amdgpu_ps <2 x float> @v_add_f64_200.0(double %a) {
|
|
; GCN-LABEL: v_add_f64_200.0:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: v_add_f64_e32 v[0:1], 0x40690000, v[0:1]
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
%add = fadd double %a, 200.0
|
|
%ret = bitcast double %add to <2 x float>
|
|
ret <2 x float> %ret
|
|
}
|
|
|
|
; No folding into VOP3
|
|
|
|
define amdgpu_ps <2 x float> @v_lshl_add_u64(i64 %a) {
|
|
; GCN-SDAG-LABEL: v_lshl_add_u64:
|
|
; GCN-SDAG: ; %bb.0:
|
|
; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0xf12345678)
|
|
; GCN-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GCN-SDAG-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 1, s[0:1]
|
|
; GCN-SDAG-NEXT: ; return to shader part epilog
|
|
;
|
|
; GCN-GISEL-LABEL: v_lshl_add_u64:
|
|
; GCN-GISEL: ; %bb.0:
|
|
; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0xf12345678)
|
|
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GCN-GISEL-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 1, v[2:3]
|
|
; GCN-GISEL-NEXT: ; return to shader part epilog
|
|
%shl = shl i64 %a, 1
|
|
%add = add i64 %shl, 64729929336
|
|
%ret = bitcast i64 %add to <2 x float>
|
|
ret <2 x float> %ret
|
|
}
|
|
|
|
; No folding into VOP2 promoted to VOP3
|
|
|
|
define amdgpu_ps <2 x float> @v_fma_f64(double %a, double %b) {
|
|
; GCN-SDAG-LABEL: v_fma_f64:
|
|
; GCN-SDAG: ; %bb.0:
|
|
; GCN-SDAG-NEXT: v_fmaak_f64 v[4:5], v[0:1], v[2:3], lit64(0x4063233333333333)
|
|
; GCN-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333)
|
|
; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GCN-SDAG-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], lit64(0x4069033333333333)
|
|
; GCN-SDAG-NEXT: v_fmac_f64_e32 v[2:3], v[0:1], v[4:5]
|
|
; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GCN-SDAG-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
|
|
; GCN-SDAG-NEXT: ; return to shader part epilog
|
|
;
|
|
; GCN-GISEL-LABEL: v_fma_f64:
|
|
; GCN-GISEL: ; %bb.0:
|
|
; GCN-GISEL-NEXT: v_mov_b64_e32 v[4:5], lit64(0x4063233333333333)
|
|
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GCN-GISEL-NEXT: v_fmac_f64_e32 v[4:5], v[0:1], v[2:3]
|
|
; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333)
|
|
; GCN-GISEL-NEXT: v_fmaak_f64 v[0:1], v[0:1], v[4:5], lit64(0x4069033333333333)
|
|
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GCN-GISEL-NEXT: v_fmac_f64_e32 v[2:3], v[0:1], v[4:5]
|
|
; GCN-GISEL-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3
|
|
; GCN-GISEL-NEXT: ; return to shader part epilog
|
|
%r1 = call double @llvm.fma.f64(double %a, double %b, double 153.1) nounwind readnone
|
|
%r2 = call double @llvm.fma.f64(double %a, double %r1, double 200.1) nounwind readnone
|
|
%r3 = call double @llvm.fma.f64(double %r2, double %r1, double 200.1) nounwind readnone
|
|
%ret = bitcast double %r3 to <2 x float>
|
|
ret <2 x float> %ret
|
|
}
|
|
|
|
define amdgpu_ps <2 x float> @v_add_neg_f64(double %a) {
|
|
; GCN-SDAG-LABEL: v_add_neg_f64:
|
|
; GCN-SDAG: ; %bb.0:
|
|
; GCN-SDAG-NEXT: s_mov_b64 s[0:1], lit64(0x4069033333333333)
|
|
; GCN-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GCN-SDAG-NEXT: v_add_f64_e64 v[0:1], -v[0:1], s[0:1]
|
|
; GCN-SDAG-NEXT: ; return to shader part epilog
|
|
;
|
|
; GCN-GISEL-LABEL: v_add_neg_f64:
|
|
; GCN-GISEL: ; %bb.0:
|
|
; GCN-GISEL-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[0:1]
|
|
; GCN-GISEL-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4069033333333333)
|
|
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GCN-GISEL-NEXT: v_add_f64_e64 v[0:1], -v[0:1], v[2:3]
|
|
; GCN-GISEL-NEXT: ; return to shader part epilog
|
|
%fneg = fsub double -0.0, %a
|
|
%add = fadd double %fneg, 200.1
|
|
%ret = bitcast double %add to <2 x float>
|
|
ret <2 x float> %ret
|
|
}
|
|
|
|
define amdgpu_ps <2 x float> @v_cndmask(double %a) {
|
|
; GCN-SDAG-LABEL: v_cndmask:
|
|
; GCN-SDAG: ; %bb.0:
|
|
; GCN-SDAG-NEXT: v_cmp_eq_f64_e32 vcc_lo, 0, v[0:1]
|
|
; GCN-SDAG-NEXT: v_mov_b32_e32 v1, 0x40632000
|
|
; GCN-SDAG-NEXT: v_cndmask_b32_e64 v0, 0x33333333, 0, vcc_lo
|
|
; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GCN-SDAG-NEXT: v_cndmask_b32_e32 v1, 0x40690333, v1, vcc_lo
|
|
; GCN-SDAG-NEXT: ; return to shader part epilog
|
|
;
|
|
; GCN-GISEL-LABEL: v_cndmask:
|
|
; GCN-GISEL: ; %bb.0:
|
|
; GCN-GISEL-NEXT: v_cmp_eq_f64_e32 vcc_lo, 0, v[0:1]
|
|
; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0x40690333
|
|
; GCN-GISEL-NEXT: v_cndmask_b32_e64 v0, 0x33333333, 0, vcc_lo
|
|
; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GCN-GISEL-NEXT: v_cndmask_b32_e64 v1, v1, 0x40632000, vcc_lo
|
|
; GCN-GISEL-NEXT: ; return to shader part epilog
|
|
%cmp = fcmp oeq double %a, 0.0
|
|
%sel = select i1 %cmp, double 153.0, double 200.1
|
|
%ret = bitcast double %sel to <2 x float>
|
|
ret <2 x float> %ret
|
|
}
|
|
|
|
declare i1 @llvm.amdgcn.class.f64(double, i32) nounwind readnone
|
|
declare double @llvm.amdgcn.rsq.f64(double) nounwind readnone
|
|
declare double @llvm.fma.f64(double, double, double) nounwind readnone
|