
They had it before but that was removed in #150024. They need to disable the check explicitly to pass expensive checks.
79 lines
3.5 KiB
LLVM
79 lines
3.5 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: not llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs=0 2> %t.err < %s | FileCheck %s
|
|
; RUN: FileCheck -check-prefix=ERR %s < %t.err
|
|
; FIXME: These tests cannot be tail called, and should be executed in a waterfall loop.
|
|
|
|
declare hidden void @void_func_i32_inreg(i32 inreg)
|
|
|
|
; ERR: error: <unknown>:0:0: in function tail_call_i32_inreg_divergent void (i32): illegal VGPR to SGPR copy
|
|
; ERR: error: <unknown>:0:0: in function indirect_tail_call_i32_inreg_divergent void (i32): illegal VGPR to SGPR copy
|
|
|
|
define void @tail_call_i32_inreg_divergent(i32 %vgpr) {
|
|
; CHECK-LABEL: tail_call_i32_inreg_divergent:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_mov_b32 s16, s33
|
|
; CHECK-NEXT: s_mov_b32 s33, s32
|
|
; CHECK-NEXT: s_or_saveexec_b64 s[18:19], -1
|
|
; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill
|
|
; CHECK-NEXT: s_mov_b64 exec, s[18:19]
|
|
; CHECK-NEXT: v_writelane_b32 v40, s16, 2
|
|
; CHECK-NEXT: s_addk_i32 s32, 0x400
|
|
; CHECK-NEXT: v_writelane_b32 v40, s30, 0
|
|
; CHECK-NEXT: v_writelane_b32 v40, s31, 1
|
|
; CHECK-NEXT: s_getpc_b64 s[16:17]
|
|
; CHECK-NEXT: s_add_u32 s16, s16, void_func_i32_inreg@rel32@lo+4
|
|
; CHECK-NEXT: s_addc_u32 s17, s17, void_func_i32_inreg@rel32@hi+12
|
|
; CHECK-NEXT: ; illegal copy v0 to s0
|
|
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
|
|
; CHECK-NEXT: v_readlane_b32 s31, v40, 1
|
|
; CHECK-NEXT: v_readlane_b32 s30, v40, 0
|
|
; CHECK-NEXT: s_mov_b32 s32, s33
|
|
; CHECK-NEXT: v_readlane_b32 s4, v40, 2
|
|
; CHECK-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
|
|
; CHECK-NEXT: s_mov_b64 exec, s[6:7]
|
|
; CHECK-NEXT: s_mov_b32 s33, s4
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
tail call void @void_func_i32_inreg(i32 inreg %vgpr)
|
|
ret void
|
|
}
|
|
|
|
@constant = external hidden addrspace(4) constant ptr
|
|
|
|
define void @indirect_tail_call_i32_inreg_divergent(i32 %vgpr) {
|
|
; CHECK-LABEL: indirect_tail_call_i32_inreg_divergent:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_mov_b32 s16, s33
|
|
; CHECK-NEXT: s_mov_b32 s33, s32
|
|
; CHECK-NEXT: s_or_saveexec_b64 s[18:19], -1
|
|
; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill
|
|
; CHECK-NEXT: s_mov_b64 exec, s[18:19]
|
|
; CHECK-NEXT: s_addk_i32 s32, 0x400
|
|
; CHECK-NEXT: v_writelane_b32 v40, s16, 2
|
|
; CHECK-NEXT: s_getpc_b64 s[16:17]
|
|
; CHECK-NEXT: s_add_u32 s16, s16, constant@rel32@lo+4
|
|
; CHECK-NEXT: s_addc_u32 s17, s17, constant@rel32@hi+12
|
|
; CHECK-NEXT: s_load_dwordx2 s[16:17], s[16:17], 0x0
|
|
; CHECK-NEXT: v_writelane_b32 v40, s30, 0
|
|
; CHECK-NEXT: v_writelane_b32 v40, s31, 1
|
|
; CHECK-NEXT: ; illegal copy v0 to s0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
|
|
; CHECK-NEXT: v_readlane_b32 s31, v40, 1
|
|
; CHECK-NEXT: v_readlane_b32 s30, v40, 0
|
|
; CHECK-NEXT: s_mov_b32 s32, s33
|
|
; CHECK-NEXT: v_readlane_b32 s4, v40, 2
|
|
; CHECK-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
|
|
; CHECK-NEXT: s_mov_b64 exec, s[6:7]
|
|
; CHECK-NEXT: s_mov_b32 s33, s4
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%fptr = load ptr, ptr addrspace(4) @constant, align 8
|
|
tail call void %fptr(i32 inreg %vgpr)
|
|
ret void
|
|
}
|