
The existing way of managing clustered nodes was done through adding weak edges between the neighbouring cluster nodes, which is a sort of ordered queue. And this will be later recorded as `NextClusterPred` or `NextClusterSucc` in `ScheduleDAGMI`. But actually the instruction may be picked not in the exact order of the queue. For example, we have a queue of cluster nodes A B C. But during scheduling, node B might be picked first, then it will be very likely that we only cluster B and C for Top-Down scheduling (leaving A alone). Another issue is: ``` if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum) std::swap(SUa, SUb); if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) ``` may break the cluster queue. For example, we want to cluster nodes (order as in `MemOpRecords`): 1 3 2. 1(SUa) will be pred of 3(SUb) normally. But when it comes to (3, 2), As 3(SUa) > 2(SUb), we would reorder the two nodes, which makes 2 be pred of 3. This makes both 1 and 2 become preds of 3, but there is no edge between 1 and 2. Thus we get a broken cluster chain. To fix both issues, we introduce an unordered set in the change. This could help improve clustering in some hard case. One key reason the change causes so many test check changes is: As the cluster candidates are not ordered now, the candidates might be picked in different order from before. The most affected targets are: AMDGPU, AArch64, RISCV. For RISCV, it seems to me most are just minor instruction reorder, don't see obvious regression. For AArch64, there were some combining of ldr into ldp being affected. With two cases being regressed and two being improved. This has more deeper reason that machine scheduler cannot cluster them well both before and after the change, and the load combine algorithm later is also not smart enough. For AMDGPU, some cases have more v_dual instructions used while some are regressed. It seems less critical. Seems like test `v_vselect_v32bf16` gets more buffer_load being claused.
1117 lines
41 KiB
LLVM
1117 lines
41 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=SIVI,SI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=SIVI,VI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefixes=GFX11 %s
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32(ptr addrspace(1) %out, float %mag, float %sign) {
|
|
; SI-LABEL: s_test_copysign_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_brev_b32 s8, -2
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, s2
|
|
; SI-NEXT: v_mov_b32_e32 v1, s3
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: v_bfi_b32 v0, s8, v0, v1
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_brev_b32 s4, -2
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: v_bfi_b32 v2, s4, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, s2, v0
|
|
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float %mag, float %sign)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_0(ptr addrspace(1) %out, float %mag) {
|
|
; SI-LABEL: s_test_copysign_f32_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x7fffffff
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_bitset0_b32 s2, 31
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_bitset0_b32 s2, 31
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float %mag, float 0.0)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_1(ptr addrspace(1) %out, float %mag) {
|
|
; SI-LABEL: s_test_copysign_f32_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x7fffffff
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_bitset0_b32 s2, 31
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_bitset0_b32 s2, 31
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float %mag, float 1.0)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_10.0(ptr addrspace(1) %out, float %mag) {
|
|
; SI-LABEL: s_test_copysign_f32_10.0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x7fffffff
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_10.0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_bitset0_b32 s2, 31
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_10.0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_bitset0_b32 s2, 31
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float %mag, float 10.0)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_neg1(ptr addrspace(1) %out, float %mag) {
|
|
; SI-LABEL: s_test_copysign_f32_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_or_b32 s4, s6, 0x80000000
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_bitset1_b32 s2, 31
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_neg1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_bitset1_b32 s2, 31
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float %mag, float -1.0)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_neg10(ptr addrspace(1) %out, float %mag) {
|
|
; SI-LABEL: s_test_copysign_f32_neg10:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_or_b32 s4, s6, 0x80000000
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_neg10:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_bitset1_b32 s2, 31
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_neg10:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_bitset1_b32 s2, 31
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float %mag, float -10.0)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_0_mag(ptr addrspace(1) %out, float %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_0_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x80000000
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_0_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_0_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float 0.0, float %sign)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_1_mag(ptr addrspace(1) %out, float %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_1_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x80000000
|
|
; SI-NEXT: s_or_b32 s4, s4, 1.0
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_1_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; VI-NEXT: s_or_b32 s2, s2, 1.0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_1_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s2, s2, 1.0
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float 1.0, float %sign)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_10_mag(ptr addrspace(1) %out, float %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_10_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x80000000
|
|
; SI-NEXT: s_or_b32 s4, s4, 0x41200000
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_10_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; VI-NEXT: s_or_b32 s2, s2, 0x41200000
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_10_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s2, s2, 0x41200000
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float 10.0, float %sign)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_neg1_mag(ptr addrspace(1) %out, float %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_neg1_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x80000000
|
|
; SI-NEXT: s_or_b32 s4, s4, 1.0
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_neg1_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; VI-NEXT: s_or_b32 s2, s2, 1.0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_neg1_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s2, s2, 1.0
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float -1.0, float %sign)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_neg10_mag(ptr addrspace(1) %out, float %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_neg10_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x80000000
|
|
; SI-NEXT: s_or_b32 s4, s4, 0x41200000
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_neg10_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; VI-NEXT: s_or_b32 s2, s2, 0x41200000
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_neg10_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s2, s2, 0x41200000
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call float @llvm.copysign.f32(float -10.0, float %sign)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_v2f32(ptr addrspace(1) %out, <2 x float> %mag, <2 x float> %sign) {
|
|
; SI-LABEL: s_test_copysign_v2f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
|
|
; SI-NEXT: s_brev_b32 s8, -2
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, s1
|
|
; SI-NEXT: v_mov_b32_e32 v1, s3
|
|
; SI-NEXT: v_bfi_b32 v1, s8, v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s0
|
|
; SI-NEXT: v_mov_b32_e32 v2, s2
|
|
; SI-NEXT: v_bfi_b32 v0, s8, v0, v2
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_v2f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
|
|
; VI-NEXT: s_brev_b32 s6, -2
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s1
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: v_bfi_b32 v1, s6, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_bfi_b32 v0, s6, v2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_v2f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, s1, v0
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, s0, v2
|
|
; GFX11-NEXT: global_store_b64 v3, v[0:1], s[4:5]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
|
|
store <2 x float> %result, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_v3f32(ptr addrspace(1) %out, <3 x float> %mag, <3 x float> %sign) {
|
|
; SI-LABEL: s_test_copysign_v3f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0xd
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_brev_b32 s6, -2
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, s9
|
|
; SI-NEXT: v_mov_b32_e32 v1, s13
|
|
; SI-NEXT: v_bfi_b32 v1, s6, v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s8
|
|
; SI-NEXT: v_mov_b32_e32 v2, s12
|
|
; SI-NEXT: v_bfi_b32 v0, s6, v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v2, s10
|
|
; SI-NEXT: v_mov_b32_e32 v3, s14
|
|
; SI-NEXT: v_bfi_b32 v2, s6, v2, v3
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 offset:8
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_v3f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_brev_b32 s2, -2
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s10
|
|
; VI-NEXT: v_mov_b32_e32 v1, s14
|
|
; VI-NEXT: v_mov_b32_e32 v3, s9
|
|
; VI-NEXT: v_bfi_b32 v2, s2, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s13
|
|
; VI-NEXT: v_bfi_b32 v1, s2, v3, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s8
|
|
; VI-NEXT: v_mov_b32_e32 v3, s12
|
|
; VI-NEXT: v_bfi_b32 v0, s2, v0, v3
|
|
; VI-NEXT: v_mov_b32_e32 v4, s1
|
|
; VI-NEXT: v_mov_b32_e32 v3, s0
|
|
; VI-NEXT: flat_store_dwordx3 v[3:4], v[0:2]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_v3f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b256 s[8:15], s[4:5], 0x34
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s12
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s14 :: v_dual_mov_b32 v1, s13
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_bfi_b32 v2, 0x7fffffff, s10, v0
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, s9, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, s8, v3
|
|
; GFX11-NEXT: global_store_b96 v4, v[0:2], s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call <3 x float> @llvm.copysign.v3f32(<3 x float> %mag, <3 x float> %sign)
|
|
store <3 x float> %result, ptr addrspace(1) %out, align 16
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_v4f32(ptr addrspace(1) %out, <4 x float> %mag, <4 x float> %sign) {
|
|
; SI-LABEL: s_test_copysign_v4f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0xd
|
|
; SI-NEXT: s_brev_b32 s6, -2
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, s11
|
|
; SI-NEXT: v_mov_b32_e32 v1, s15
|
|
; SI-NEXT: v_bfi_b32 v3, s6, v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s10
|
|
; SI-NEXT: v_mov_b32_e32 v1, s14
|
|
; SI-NEXT: v_bfi_b32 v2, s6, v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s9
|
|
; SI-NEXT: v_mov_b32_e32 v1, s13
|
|
; SI-NEXT: v_bfi_b32 v1, s6, v0, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s8
|
|
; SI-NEXT: v_mov_b32_e32 v4, s12
|
|
; SI-NEXT: v_bfi_b32 v0, s6, v0, v4
|
|
; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_v4f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_brev_b32 s2, -2
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s11
|
|
; VI-NEXT: v_mov_b32_e32 v1, s15
|
|
; VI-NEXT: v_mov_b32_e32 v2, s10
|
|
; VI-NEXT: v_bfi_b32 v3, s2, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s14
|
|
; VI-NEXT: v_bfi_b32 v2, s2, v2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s9
|
|
; VI-NEXT: v_mov_b32_e32 v1, s13
|
|
; VI-NEXT: v_bfi_b32 v1, s2, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s8
|
|
; VI-NEXT: v_mov_b32_e32 v4, s12
|
|
; VI-NEXT: v_bfi_b32 v0, s2, v0, v4
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_v4f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b256 s[8:15], s[4:5], 0x34
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s15 :: v_dual_mov_b32 v1, s14
|
|
; GFX11-NEXT: v_dual_mov_b32 v4, s13 :: v_dual_mov_b32 v5, s12
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_bfi_b32 v3, 0x7fffffff, s11, v0
|
|
; GFX11-NEXT: v_bfi_b32 v2, 0x7fffffff, s10, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, s9, v4
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, s8, v5
|
|
; GFX11-NEXT: global_store_b128 v6, v[0:3], s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
|
|
store <4 x float> %result, ptr addrspace(1) %out, align 16
|
|
ret void
|
|
}
|
|
|
|
define float @v_test_copysign_f32(float %mag, float %sign) {
|
|
; SIVI-LABEL: v_test_copysign_f32:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: s_brev_b32 s4, -2
|
|
; SIVI-NEXT: v_bfi_b32 v0, s4, v0, v1
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call float @llvm.copysign.f32(float %mag, float %sign)
|
|
ret float %result
|
|
}
|
|
|
|
define float @v_test_copysign_f32_0(float %mag) {
|
|
; SIVI-LABEL: v_test_copysign_f32_0:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f32_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call float @llvm.copysign.f32(float %mag, float 0.0)
|
|
ret float %result
|
|
}
|
|
|
|
define float @v_test_copysign_f32_1(float %mag) {
|
|
; SIVI-LABEL: v_test_copysign_f32_1:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f32_1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call float @llvm.copysign.f32(float %mag, float 1.0)
|
|
ret float %result
|
|
}
|
|
|
|
define float @v_test_copysign_f32_10(float %mag) {
|
|
; SIVI-LABEL: v_test_copysign_f32_10:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f32_10:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call float @llvm.copysign.f32(float %mag, float 10.0)
|
|
ret float %result
|
|
}
|
|
|
|
define float @v_test_copysign_f32_neg1(float %mag) {
|
|
; SIVI-LABEL: v_test_copysign_f32_neg1:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: v_or_b32_e32 v0, 0x80000000, v0
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f32_neg1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x80000000, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call float @llvm.copysign.f32(float %mag, float -1.0)
|
|
ret float %result
|
|
}
|
|
|
|
define float @v_test_copysign_f32_neg10(float %mag) {
|
|
; SIVI-LABEL: v_test_copysign_f32_neg10:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: v_or_b32_e32 v0, 0x80000000, v0
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f32_neg10:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x80000000, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call float @llvm.copysign.f32(float %mag, float -10.0)
|
|
ret float %result
|
|
}
|
|
|
|
define <2 x float> @v_test_copysign_v2f32(<2 x float> %mag, <2 x float> %sign) {
|
|
; SIVI-LABEL: v_test_copysign_v2f32:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: s_brev_b32 s4, -2
|
|
; SIVI-NEXT: v_bfi_b32 v0, s4, v0, v2
|
|
; SIVI-NEXT: v_bfi_b32 v1, s4, v1, v3
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_v2f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v2
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, v1, v3
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> %sign)
|
|
ret <2 x float> %result
|
|
}
|
|
|
|
define <2 x float> @v_test_copysign_v2f32_0(<2 x float> %mag) {
|
|
; SIVI-LABEL: v_test_copysign_v2f32_0:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; SIVI-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_v2f32_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> zeroinitializer)
|
|
ret <2 x float> %result
|
|
}
|
|
|
|
define <2 x float> @v_test_copysign_v2f32_neg1(<2 x float> %mag) {
|
|
; SIVI-LABEL: v_test_copysign_v2f32_neg1:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: v_or_b32_e32 v0, 0x80000000, v0
|
|
; SIVI-NEXT: v_or_b32_e32 v1, 0x80000000, v1
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_v2f32_neg1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x80000000, v0
|
|
; GFX11-NEXT: v_or_b32_e32 v1, 0x80000000, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x float> @llvm.copysign.v2f32(<2 x float> %mag, <2 x float> <float -1.0, float -1.0>)
|
|
ret <2 x float> %result
|
|
}
|
|
|
|
define <3 x float> @v_test_copysign_v3f32(<3 x float> %mag, <3 x float> %sign) {
|
|
; SIVI-LABEL: v_test_copysign_v3f32:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: s_brev_b32 s4, -2
|
|
; SIVI-NEXT: v_bfi_b32 v0, s4, v0, v3
|
|
; SIVI-NEXT: v_bfi_b32 v1, s4, v1, v4
|
|
; SIVI-NEXT: v_bfi_b32 v2, s4, v2, v5
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_v3f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v3
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, v1, v4
|
|
; GFX11-NEXT: v_bfi_b32 v2, 0x7fffffff, v2, v5
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <3 x float> @llvm.copysign.v3f32(<3 x float> %mag, <3 x float> %sign)
|
|
ret <3 x float> %result
|
|
}
|
|
|
|
define <4 x float> @v_test_copysign_v4f32(<4 x float> %mag, <4 x float> %sign) {
|
|
; SIVI-LABEL: v_test_copysign_v4f32:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: s_brev_b32 s4, -2
|
|
; SIVI-NEXT: v_bfi_b32 v0, s4, v0, v4
|
|
; SIVI-NEXT: v_bfi_b32 v1, s4, v1, v5
|
|
; SIVI-NEXT: v_bfi_b32 v2, s4, v2, v6
|
|
; SIVI-NEXT: v_bfi_b32 v3, s4, v3, v7
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_v4f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v4
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, v1, v5
|
|
; GFX11-NEXT: v_bfi_b32 v2, 0x7fffffff, v2, v6
|
|
; GFX11-NEXT: v_bfi_b32 v3, 0x7fffffff, v3, v7
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <4 x float> @llvm.copysign.v4f32(<4 x float> %mag, <4 x float> %sign)
|
|
ret <4 x float> %result
|
|
}
|
|
|
|
define <5 x float> @v_test_copysign_v5f32(<5 x float> %mag, <5 x float> %sign) {
|
|
; SIVI-LABEL: v_test_copysign_v5f32:
|
|
; SIVI: ; %bb.0:
|
|
; SIVI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SIVI-NEXT: s_brev_b32 s4, -2
|
|
; SIVI-NEXT: v_bfi_b32 v0, s4, v0, v5
|
|
; SIVI-NEXT: v_bfi_b32 v1, s4, v1, v6
|
|
; SIVI-NEXT: v_bfi_b32 v2, s4, v2, v7
|
|
; SIVI-NEXT: v_bfi_b32 v3, s4, v3, v8
|
|
; SIVI-NEXT: v_bfi_b32 v4, s4, v4, v9
|
|
; SIVI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_v5f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v5
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, v1, v6
|
|
; GFX11-NEXT: v_bfi_b32 v2, 0x7fffffff, v2, v7
|
|
; GFX11-NEXT: v_bfi_b32 v3, 0x7fffffff, v3, v8
|
|
; GFX11-NEXT: v_bfi_b32 v4, 0x7fffffff, v4, v9
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <5 x float> @llvm.copysign.v5f32(<5 x float> %mag, <5 x float> %sign)
|
|
ret <5 x float> %result
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_fptrunc_f64(ptr addrspace(1) %out, float %mag, double %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_fptrunc_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_brev_b32 s4, -2
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s5
|
|
; SI-NEXT: v_bfi_b32 v0, s4, v0, v1
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_fptrunc_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s6, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_brev_b32 s0, -2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_bfi_b32 v2, s0, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_fptrunc_f64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[2:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s1
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, s0, v0
|
|
; GFX11-NEXT: global_store_b32 v1, v0, s[2:3]
|
|
; GFX11-NEXT: s_endpgm
|
|
%sign.trunc = fptrunc double %sign to float
|
|
%result = call float @llvm.copysign.f32(float %mag, float %sign.trunc)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_1_fptrunc_f64(ptr addrspace(1) %out, double %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_1_fptrunc_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_and_b32 s0, s3, 0x80000000
|
|
; SI-NEXT: s_or_b32 s0, s0, 1.0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_1_fptrunc_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: s_and_b32 s0, s3, 0x80000000
|
|
; VI-NEXT: s_or_b32 s0, s0, 1.0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_1_fptrunc_f64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s3, 0x80000000
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s2, s2, 1.0
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%sign.trunc = fptrunc double %sign to float
|
|
%result = call float @llvm.copysign.f32(float 1.0, float %sign.trunc)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_fpext_f16(ptr addrspace(1) %out, float %mag, half %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_fpext_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: v_mov_b32_e32 v1, s2
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: v_bfi_b32 v0, s0, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_fpext_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_brev_b32 s4, -2
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_lshlrev_b32_e64 v0, 16, s3
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
; VI-NEXT: v_bfi_b32 v2, s4, v1, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_fpext_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_lshlrev_b32_e64 v0, 16, s3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, s2, v0
|
|
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%sign.ext = fpext half %sign to float
|
|
%result = call float @llvm.copysign.f32(float %mag, float %sign.ext)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_1_fpext_f16(ptr addrspace(1) %out, half %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_1_fpext_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s0, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: v_and_b32_e32 v0, 0x80000000, v0
|
|
; SI-NEXT: v_or_b32_e32 v0, 1.0, v0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_1_fpext_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b32 s2, s2, 16
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; VI-NEXT: s_or_b32 s2, s2, 1.0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_1_fpext_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x80000000
|
|
; GFX11-NEXT: s_or_b32 s2, s2, 1.0
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%sign.ext = fpext half %sign to float
|
|
%result = call float @llvm.copysign.f32(float 1.0, float %sign.ext)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f32_fpext_bf16(ptr addrspace(1) %out, float %mag, bfloat %sign) {
|
|
; SI-LABEL: s_test_copysign_f32_fpext_bf16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_lshl_b32 s0, s3, 16
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_brev_b32 s1, -2
|
|
; SI-NEXT: v_mov_b32_e32 v0, s2
|
|
; SI-NEXT: v_mov_b32_e32 v1, s0
|
|
; SI-NEXT: v_bfi_b32 v0, s1, v0, v1
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f32_fpext_bf16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_brev_b32 s4, -2
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_lshlrev_b32_e64 v0, 16, s3
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
; VI-NEXT: v_bfi_b32 v2, s4, v1, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f32_fpext_bf16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_lshlrev_b32_e64 v0, 16, s3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, s2, v0
|
|
; GFX11-NEXT: global_store_b32 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%sign.ext = fpext bfloat %sign to float
|
|
%result = call float @llvm.copysign.f32(float %mag, float %sign.ext)
|
|
store float %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
declare float @llvm.copysign.f32(float, float) #0
|
|
declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) #0
|
|
declare <3 x float> @llvm.copysign.v3f32(<3 x float>, <3 x float>) #0
|
|
declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) #0
|
|
declare <5 x float> @llvm.copysign.v5f32(<5 x float>, <5 x float>) #0
|
|
|
|
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
|