
Recent upstream trends have moved away from explicitly using `-verify-machineinstrs`, as it's already covered by the expensive checks. This PR removes almost all `-verify-machineinstrs` from tests in `llvm/test/CodeGen/AMDGPU/*.ll`, leaving only those tests where its removal currently causes failures.
155 lines
5.2 KiB
LLVM
155 lines
5.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=tahiti < %s | FileCheck -check-prefix=GFX6 %s
|
|
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=tonga -mattr=-flat-for-global,-xnack < %s | FileCheck -check-prefix=GFX8 %s
|
|
|
|
define amdgpu_kernel void @s_mulk_i32_k0(ptr addrspace(1) %out, i32 %b) {
|
|
; GFX6-LABEL: s_mulk_i32_k0:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_load_dword s6, s[4:5], 0x2
|
|
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX6-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX6-NEXT: s_mov_b32 s2, -1
|
|
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX6-NEXT: s_mul_i32 s4, s6, 0x41
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX6-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: s_mulk_i32_k0:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x8
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX8-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX8-NEXT: s_mov_b32 s2, -1
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_mul_i32 s4, s6, 0x41
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX8-NEXT: s_endpgm
|
|
%mul = mul i32 %b, 65
|
|
store i32 %mul, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_mulk_i32_k1(ptr addrspace(1) %out, i32 %b) {
|
|
; GFX6-LABEL: s_mulk_i32_k1:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_load_dword s6, s[4:5], 0x2
|
|
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX6-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX6-NEXT: s_mov_b32 s2, -1
|
|
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX6-NEXT: s_mul_i32 s4, s6, 0x7fff
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX6-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: s_mulk_i32_k1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x8
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX8-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX8-NEXT: s_mov_b32 s2, -1
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_mul_i32 s4, s6, 0x7fff
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX8-NEXT: s_endpgm
|
|
%mul = mul i32 %b, 32767 ; (1 << 15) - 1
|
|
store i32 %mul, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_mulk_i32_k2(ptr addrspace(1) %out, i32 %b) {
|
|
; GFX6-LABEL: s_mulk_i32_k2:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_load_dword s6, s[4:5], 0x2
|
|
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX6-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX6-NEXT: s_mov_b32 s2, -1
|
|
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX6-NEXT: s_mul_i32 s4, s6, 0xffffffef
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX6-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: s_mulk_i32_k2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x8
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX8-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX8-NEXT: s_mov_b32 s2, -1
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_mul_i32 s4, s6, 0xffffffef
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX8-NEXT: s_endpgm
|
|
%mul = mul i32 %b, -17
|
|
store i32 %mul, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @no_s_mulk_i32_k0(ptr addrspace(1) %out, i32 %b) {
|
|
; GFX6-LABEL: no_s_mulk_i32_k0:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_load_dword s6, s[4:5], 0x2
|
|
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX6-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX6-NEXT: s_mov_b32 s2, -1
|
|
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX6-NEXT: s_mul_i32 s4, s6, 0x8001
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX6-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: no_s_mulk_i32_k0:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s6, s[4:5], 0x8
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
|
|
; GFX8-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX8-NEXT: s_mov_b32 s2, -1
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_mul_i32 s4, s6, 0x8001
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX8-NEXT: s_endpgm
|
|
%mul = mul i32 %b, 32769 ; 1 << 15 + 1
|
|
store i32 %mul, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
@lds = addrspace(3) global [512 x i32] poison, align 4
|
|
|
|
define amdgpu_kernel void @commute_s_mulk_i32(ptr addrspace(1) %out, i32 %b) #0 {
|
|
; GFX6-LABEL: commute_s_mulk_i32:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_load_dword s0, s[4:5], 0x2
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX6-NEXT: s_mulk_i32 s0, 0x800
|
|
; GFX6-NEXT: ;;#ASMSTART
|
|
; GFX6-NEXT: ; foo v0, s0
|
|
; GFX6-NEXT: ;;#ASMEND
|
|
; GFX6-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: commute_s_mulk_i32:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s0, s[4:5], 0x8
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: s_mulk_i32 s0, 0x800
|
|
; GFX8-NEXT: ;;#ASMSTART
|
|
; GFX8-NEXT: ; foo v0, s0
|
|
; GFX8-NEXT: ;;#ASMEND
|
|
; GFX8-NEXT: s_endpgm
|
|
%size = call i32 @llvm.amdgcn.groupstaticsize()
|
|
%add = mul i32 %size, %b
|
|
call void asm sideeffect "; foo $0, $1", "v,s"(ptr addrspace(3) @lds, i32 %add)
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.groupstaticsize() #1
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { nounwind readnone }
|