
Recent upstream trends have moved away from explicitly using `-verify-machineinstrs`, as it's already covered by the expensive checks. This PR removes almost all `-verify-machineinstrs` from tests in `llvm/test/CodeGen/AMDGPU/*.ll`, leaving only those tests where its removal currently causes failures.
918 lines
33 KiB
LLVM
918 lines
33 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI,SIVI %s
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,SIVI,VIGFX9 %s
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,VIGFX9 %s
|
|
|
|
declare half @llvm.fma.f16(half %a, half %b, half %c)
|
|
declare <2 x half> @llvm.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
|
|
declare <4 x half> @llvm.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c)
|
|
|
|
define amdgpu_kernel void @fma_f16(
|
|
; SI-LABEL: fma_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s14, s10
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s16, s4
|
|
; SI-NEXT: s_mov_b32 s17, s5
|
|
; SI-NEXT: s_mov_b32 s18, s10
|
|
; SI-NEXT: s_mov_b32 s19, s11
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[16:19], 0
|
|
; SI-NEXT: buffer_load_ushort v2, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b32 s8, s0
|
|
; SI-NEXT: s_mov_b32 s9, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(2)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, v0, v1, v2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s11, 0xf000
|
|
; VI-NEXT: s_mov_b32 s10, -1
|
|
; VI-NEXT: s_mov_b32 s14, s10
|
|
; VI-NEXT: s_mov_b32 s15, s11
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s16, s4
|
|
; VI-NEXT: s_mov_b32 s17, s5
|
|
; VI-NEXT: s_mov_b32 s18, s10
|
|
; VI-NEXT: s_mov_b32 s19, s11
|
|
; VI-NEXT: s_mov_b32 s4, s6
|
|
; VI-NEXT: s_mov_b32 s5, s7
|
|
; VI-NEXT: s_mov_b32 s6, s10
|
|
; VI-NEXT: s_mov_b32 s7, s11
|
|
; VI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; VI-NEXT: buffer_load_ushort v1, off, s[16:19], 0
|
|
; VI-NEXT: buffer_load_ushort v2, off, s[4:7], 0
|
|
; VI-NEXT: s_mov_b32 s8, s0
|
|
; VI-NEXT: s_mov_b32 s9, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_fma_f16 v0, v0, v1, v2
|
|
; VI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s4, s10
|
|
; GFX9-NEXT: s_mov_b32 s5, s11
|
|
; GFX9-NEXT: s_mov_b32 s16, s12
|
|
; GFX9-NEXT: s_mov_b32 s17, s13
|
|
; GFX9-NEXT: s_mov_b32 s18, s2
|
|
; GFX9-NEXT: s_mov_b32 s19, s3
|
|
; GFX9-NEXT: s_mov_b32 s12, s14
|
|
; GFX9-NEXT: s_mov_b32 s13, s15
|
|
; GFX9-NEXT: s_mov_b32 s14, s2
|
|
; GFX9-NEXT: s_mov_b32 s15, s3
|
|
; GFX9-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
|
; GFX9-NEXT: buffer_load_ushort v1, off, s[16:19], 0
|
|
; GFX9-NEXT: buffer_load_ushort v2, off, s[12:15], 0
|
|
; GFX9-NEXT: s_mov_b32 s0, s8
|
|
; GFX9-NEXT: s_mov_b32 s1, s9
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_fma_f16 v0, v0, v1, v2
|
|
; GFX9-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %a,
|
|
ptr addrspace(1) %b,
|
|
ptr addrspace(1) %c) {
|
|
%a.val = load half, ptr addrspace(1) %a
|
|
%b.val = load half, ptr addrspace(1) %b
|
|
%c.val = load half, ptr addrspace(1) %c
|
|
%r.val = call half @llvm.fma.f16(half %a.val, half %b.val, half %c.val)
|
|
store half %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_f16_imm_a(
|
|
; SI-LABEL: fma_f16_imm_a:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s14, s6
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s15, s7
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s2, 0x40400000
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_fma_f32 v0, v0, s2, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_f16_imm_a:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s14, s6
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s15, s7
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_movk_i32 s0, 0x4200
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_fma_f16 v0, v0, s0, v1
|
|
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_f16_imm_a:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-NEXT: s_mov_b32 s14, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s12, s2
|
|
; GFX9-NEXT: s_mov_b32 s13, s3
|
|
; GFX9-NEXT: s_mov_b32 s15, s7
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
; GFX9-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; GFX9-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x4200
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_fma_f16 v0, v0, s0, v1
|
|
; GFX9-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %b,
|
|
ptr addrspace(1) %c) {
|
|
%b.val = load half, ptr addrspace(1) %b
|
|
%c.val = load half, ptr addrspace(1) %c
|
|
%r.val = call half @llvm.fma.f16(half 3.0, half %b.val, half %c.val)
|
|
store half %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_f16_imm_b(
|
|
; SI-LABEL: fma_f16_imm_b:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s14, s6
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s15, s7
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s2, 0x40400000
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_fma_f32 v0, v0, s2, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_f16_imm_b:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s14, s6
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s15, s7
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_movk_i32 s0, 0x4200
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_fma_f16 v0, v0, s0, v1
|
|
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_f16_imm_b:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-NEXT: s_mov_b32 s14, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s12, s2
|
|
; GFX9-NEXT: s_mov_b32 s13, s3
|
|
; GFX9-NEXT: s_mov_b32 s15, s7
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
; GFX9-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; GFX9-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x4200
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_fma_f16 v0, v0, s0, v1
|
|
; GFX9-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %a,
|
|
ptr addrspace(1) %c) {
|
|
%a.val = load half, ptr addrspace(1) %a
|
|
%c.val = load half, ptr addrspace(1) %c
|
|
%r.val = call half @llvm.fma.f16(half %a.val, half 3.0, half %c.val)
|
|
store half %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_f16_imm_c(
|
|
; SI-LABEL: fma_f16_imm_c:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s14, s6
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s15, s7
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s2, 0x40400000
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_fma_f32 v0, v0, v1, s2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_f16_imm_c:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s14, s6
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s15, s7
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_movk_i32 s0, 0x4200
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_fma_f16 v0, v0, v1, s0
|
|
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_f16_imm_c:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-NEXT: s_mov_b32 s14, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s12, s2
|
|
; GFX9-NEXT: s_mov_b32 s13, s3
|
|
; GFX9-NEXT: s_mov_b32 s15, s7
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
; GFX9-NEXT: buffer_load_ushort v0, off, s[12:15], 0
|
|
; GFX9-NEXT: buffer_load_ushort v1, off, s[8:11], 0
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x4200
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_fma_f16 v0, v0, v1, s0
|
|
; GFX9-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %a,
|
|
ptr addrspace(1) %b) {
|
|
%a.val = load half, ptr addrspace(1) %a
|
|
%b.val = load half, ptr addrspace(1) %b
|
|
%r.val = call half @llvm.fma.f16(half %a.val, half %b.val, half 3.0)
|
|
store half %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_v2f16(
|
|
; SI-LABEL: fma_v2f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s14, s10
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s16, s4
|
|
; SI-NEXT: s_mov_b32 s17, s5
|
|
; SI-NEXT: s_mov_b32 s18, s10
|
|
; SI-NEXT: s_mov_b32 s19, s11
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: buffer_load_dword v0, off, s[12:15], 0
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: buffer_load_dword v1, off, s[16:19], 0
|
|
; SI-NEXT: buffer_load_dword v2, off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b32 s8, s0
|
|
; SI-NEXT: s_mov_b32 s9, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(2)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v0
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, v0, v4, v5
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_fma_f32 v1, v3, v1, v2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_v2f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s11, 0xf000
|
|
; VI-NEXT: s_mov_b32 s10, -1
|
|
; VI-NEXT: s_mov_b32 s14, s10
|
|
; VI-NEXT: s_mov_b32 s15, s11
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s16, s4
|
|
; VI-NEXT: s_mov_b32 s17, s5
|
|
; VI-NEXT: s_mov_b32 s4, s6
|
|
; VI-NEXT: s_mov_b32 s5, s7
|
|
; VI-NEXT: s_mov_b32 s6, s10
|
|
; VI-NEXT: s_mov_b32 s7, s11
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s18, s10
|
|
; VI-NEXT: s_mov_b32 s19, s11
|
|
; VI-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; VI-NEXT: buffer_load_dword v1, off, s[16:19], 0
|
|
; VI-NEXT: buffer_load_dword v2, off, s[12:15], 0
|
|
; VI-NEXT: s_mov_b32 s8, s0
|
|
; VI-NEXT: s_mov_b32 s9, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(2)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v2
|
|
; VI-NEXT: v_fma_f16 v3, v5, v4, v3
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
; VI-NEXT: v_fma_f16 v0, v2, v1, v0
|
|
; VI-NEXT: v_or_b32_e32 v0, v0, v3
|
|
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_v2f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s4, s10
|
|
; GFX9-NEXT: s_mov_b32 s5, s11
|
|
; GFX9-NEXT: s_mov_b32 s16, s12
|
|
; GFX9-NEXT: s_mov_b32 s17, s13
|
|
; GFX9-NEXT: s_mov_b32 s18, s2
|
|
; GFX9-NEXT: s_mov_b32 s19, s3
|
|
; GFX9-NEXT: s_mov_b32 s12, s14
|
|
; GFX9-NEXT: s_mov_b32 s13, s15
|
|
; GFX9-NEXT: s_mov_b32 s14, s2
|
|
; GFX9-NEXT: s_mov_b32 s15, s3
|
|
; GFX9-NEXT: buffer_load_dword v0, off, s[4:7], 0
|
|
; GFX9-NEXT: buffer_load_dword v1, off, s[16:19], 0
|
|
; GFX9-NEXT: buffer_load_dword v2, off, s[12:15], 0
|
|
; GFX9-NEXT: s_mov_b32 s0, s8
|
|
; GFX9-NEXT: s_mov_b32 s1, s9
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_pk_fma_f16 v0, v0, v1, v2
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %a,
|
|
ptr addrspace(1) %b,
|
|
ptr addrspace(1) %c) {
|
|
%a.val = load <2 x half>, ptr addrspace(1) %a
|
|
%b.val = load <2 x half>, ptr addrspace(1) %b
|
|
%c.val = load <2 x half>, ptr addrspace(1) %c
|
|
%r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> %b.val, <2 x half> %c.val)
|
|
store <2 x half> %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_v2f16_imm_a(
|
|
; SI-LABEL: fma_v2f16_imm_a:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s14, s6
|
|
; SI-NEXT: s_mov_b32 s15, s7
|
|
; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: buffer_load_dword v1, off, s[12:15], 0
|
|
; SI-NEXT: s_mov_b32 s2, 0x40400000
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_fma_f32 v2, v3, s2, v2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, v1, s2, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
|
|
; SI-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_v2f16_imm_a:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s14, s6
|
|
; VI-NEXT: s_mov_b32 s15, s7
|
|
; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; VI-NEXT: buffer_load_dword v1, off, s[12:15], 0
|
|
; VI-NEXT: s_movk_i32 s2, 0x4200
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
|
|
; VI-NEXT: v_fma_f16 v2, v3, s2, v2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; VI-NEXT: v_fma_f16 v0, v1, s2, v0
|
|
; VI-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_v2f16_imm_a:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-NEXT: s_mov_b32 s14, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s12, s2
|
|
; GFX9-NEXT: s_mov_b32 s13, s3
|
|
; GFX9-NEXT: s_mov_b32 s15, s7
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
; GFX9-NEXT: buffer_load_dword v0, off, s[12:15], 0
|
|
; GFX9-NEXT: buffer_load_dword v1, off, s[8:11], 0
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x4200
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_pk_fma_f16 v0, v0, s0, v1 op_sel_hi:[1,0,1]
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %b,
|
|
ptr addrspace(1) %c) {
|
|
%b.val = load <2 x half>, ptr addrspace(1) %b
|
|
%c.val = load <2 x half>, ptr addrspace(1) %c
|
|
%r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> <half 3.0, half 3.0>, <2 x half> %b.val, <2 x half> %c.val)
|
|
store <2 x half> %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_v2f16_imm_b(
|
|
; SI-LABEL: fma_v2f16_imm_b:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s14, s6
|
|
; SI-NEXT: s_mov_b32 s15, s7
|
|
; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: buffer_load_dword v1, off, s[12:15], 0
|
|
; SI-NEXT: s_mov_b32 s2, 0x40400000
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_fma_f32 v2, v3, s2, v2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, v1, s2, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
|
|
; SI-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_v2f16_imm_b:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s14, s6
|
|
; VI-NEXT: s_mov_b32 s15, s7
|
|
; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; VI-NEXT: buffer_load_dword v1, off, s[12:15], 0
|
|
; VI-NEXT: s_movk_i32 s2, 0x4200
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
|
|
; VI-NEXT: v_fma_f16 v2, v3, s2, v2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; VI-NEXT: v_fma_f16 v0, v1, s2, v0
|
|
; VI-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_v2f16_imm_b:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-NEXT: s_mov_b32 s14, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s12, s2
|
|
; GFX9-NEXT: s_mov_b32 s13, s3
|
|
; GFX9-NEXT: s_mov_b32 s15, s7
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
; GFX9-NEXT: buffer_load_dword v0, off, s[12:15], 0
|
|
; GFX9-NEXT: buffer_load_dword v1, off, s[8:11], 0
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x4200
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_pk_fma_f16 v0, v0, s0, v1 op_sel_hi:[1,0,1]
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %a,
|
|
ptr addrspace(1) %c) {
|
|
%a.val = load <2 x half>, ptr addrspace(1) %a
|
|
%c.val = load <2 x half>, ptr addrspace(1) %c
|
|
%r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> <half 3.0, half 3.0>, <2 x half> %c.val)
|
|
store <2 x half> %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_v2f16_imm_c(
|
|
; SI-LABEL: fma_v2f16_imm_c:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s14, s6
|
|
; SI-NEXT: s_mov_b32 s15, s7
|
|
; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: buffer_load_dword v1, off, s[12:15], 0
|
|
; SI-NEXT: s_mov_b32 s2, 0x40400000
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_fma_f32 v2, v3, v2, s2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; SI-NEXT: v_fma_f32 v0, v1, v0, s2
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
|
|
; SI-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_v2f16_imm_c:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s12, s2
|
|
; VI-NEXT: s_mov_b32 s13, s3
|
|
; VI-NEXT: s_mov_b32 s14, s6
|
|
; VI-NEXT: s_mov_b32 s15, s7
|
|
; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; VI-NEXT: buffer_load_dword v1, off, s[12:15], 0
|
|
; VI-NEXT: s_movk_i32 s2, 0x4200
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
|
|
; VI-NEXT: v_fma_f16 v2, v3, v2, s2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; VI-NEXT: v_fma_f16 v0, v1, v0, s2
|
|
; VI-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_v2f16_imm_c:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
|
|
; GFX9-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-NEXT: s_mov_b32 s14, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s12, s2
|
|
; GFX9-NEXT: s_mov_b32 s13, s3
|
|
; GFX9-NEXT: s_mov_b32 s15, s7
|
|
; GFX9-NEXT: s_mov_b32 s10, s6
|
|
; GFX9-NEXT: s_mov_b32 s11, s7
|
|
; GFX9-NEXT: buffer_load_dword v0, off, s[12:15], 0
|
|
; GFX9-NEXT: buffer_load_dword v1, off, s[8:11], 0
|
|
; GFX9-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x4200
|
|
; GFX9-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_pk_fma_f16 v0, v0, v1, s0 op_sel_hi:[1,1,0]
|
|
; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %a,
|
|
ptr addrspace(1) %b) {
|
|
%a.val = load <2 x half>, ptr addrspace(1) %a
|
|
%b.val = load <2 x half>, ptr addrspace(1) %b
|
|
%r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> %b.val, <2 x half> <half 3.0, half 3.0>)
|
|
store <2 x half> %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fma_v4f16(
|
|
; SI-LABEL: fma_v4f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s14, s10
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s2
|
|
; SI-NEXT: s_mov_b32 s13, s3
|
|
; SI-NEXT: s_mov_b32 s16, s4
|
|
; SI-NEXT: s_mov_b32 s17, s5
|
|
; SI-NEXT: s_mov_b32 s18, s10
|
|
; SI-NEXT: s_mov_b32 s19, s11
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s7
|
|
; SI-NEXT: s_mov_b32 s6, s10
|
|
; SI-NEXT: s_mov_b32 s7, s11
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0
|
|
; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b32 s8, s0
|
|
; SI-NEXT: s_mov_b32 s9, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(2)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v6, v0
|
|
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
|
|
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v8, v2
|
|
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v9, v3
|
|
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v10, v4
|
|
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v11, v5
|
|
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
|
|
; SI-NEXT: v_fma_f32 v7, v7, v9, v11
|
|
; SI-NEXT: v_fma_f32 v6, v6, v8, v10
|
|
; SI-NEXT: v_fma_f32 v1, v1, v3, v5
|
|
; SI-NEXT: v_fma_f32 v0, v0, v2, v4
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v7
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v3, v6
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; SI-NEXT: v_or_b32_e32 v0, v3, v0
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fma_v4f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx8 s[4:11], s[4:5], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_mov_b32 s14, s2
|
|
; VI-NEXT: s_mov_b32 s15, s3
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s16, s8
|
|
; VI-NEXT: s_mov_b32 s17, s9
|
|
; VI-NEXT: s_mov_b32 s8, s10
|
|
; VI-NEXT: s_mov_b32 s9, s11
|
|
; VI-NEXT: s_mov_b32 s10, s2
|
|
; VI-NEXT: s_mov_b32 s11, s3
|
|
; VI-NEXT: s_mov_b32 s12, s6
|
|
; VI-NEXT: s_mov_b32 s13, s7
|
|
; VI-NEXT: s_mov_b32 s18, s2
|
|
; VI-NEXT: s_mov_b32 s19, s3
|
|
; VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
|
; VI-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0
|
|
; VI-NEXT: buffer_load_dwordx2 v[4:5], off, s[12:15], 0
|
|
; VI-NEXT: s_mov_b32 s0, s4
|
|
; VI-NEXT: s_mov_b32 s1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(2)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v1
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v5
|
|
; VI-NEXT: v_fma_f16 v1, v5, v3, v1
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
|
|
; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v2
|
|
; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v4
|
|
; VI-NEXT: v_fma_f16 v0, v4, v2, v0
|
|
; VI-NEXT: v_fma_f16 v2, v8, v7, v6
|
|
; VI-NEXT: v_fma_f16 v3, v9, v5, v3
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
; VI-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; VI-NEXT: v_or_b32_e32 v0, v0, v3
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: fma_v4f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
|
|
; GFX9-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX9-NEXT: s_mov_b32 s2, -1
|
|
; GFX9-NEXT: s_mov_b32 s6, s2
|
|
; GFX9-NEXT: s_mov_b32 s7, s3
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_mov_b32 s4, s10
|
|
; GFX9-NEXT: s_mov_b32 s5, s11
|
|
; GFX9-NEXT: s_mov_b32 s16, s12
|
|
; GFX9-NEXT: s_mov_b32 s17, s13
|
|
; GFX9-NEXT: s_mov_b32 s18, s2
|
|
; GFX9-NEXT: s_mov_b32 s19, s3
|
|
; GFX9-NEXT: s_mov_b32 s12, s14
|
|
; GFX9-NEXT: s_mov_b32 s13, s15
|
|
; GFX9-NEXT: s_mov_b32 s14, s2
|
|
; GFX9-NEXT: s_mov_b32 s15, s3
|
|
; GFX9-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; GFX9-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0
|
|
; GFX9-NEXT: buffer_load_dwordx2 v[4:5], off, s[12:15], 0
|
|
; GFX9-NEXT: s_mov_b32 s0, s8
|
|
; GFX9-NEXT: s_mov_b32 s1, s9
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_pk_fma_f16 v1, v1, v3, v5
|
|
; GFX9-NEXT: v_pk_fma_f16 v0, v0, v2, v4
|
|
; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; GFX9-NEXT: s_endpgm
|
|
ptr addrspace(1) %r,
|
|
ptr addrspace(1) %a,
|
|
ptr addrspace(1) %b,
|
|
ptr addrspace(1) %c) {
|
|
%a.val = load <4 x half>, ptr addrspace(1) %a
|
|
%b.val = load <4 x half>, ptr addrspace(1) %b
|
|
%c.val = load <4 x half>, ptr addrspace(1) %c
|
|
%r.val = call <4 x half> @llvm.fma.v4f16(<4 x half> %a.val, <4 x half> %b.val, <4 x half> %c.val)
|
|
store <4 x half> %r.val, ptr addrspace(1) %r
|
|
ret void
|
|
}
|
|
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
|
; GCN: {{.*}}
|
|
; SIVI: {{.*}}
|
|
; VIGFX9: {{.*}}
|