
Add support for using the existing SCRATCH_STORE_BLOCK and SCRATCH_LOAD_BLOCK instructions for saving and restoring callee-saved VGPRs. This is controlled by a new subtarget feature, block-vgpr-csr. It does not include WWM registers - those will be saved and restored individually, just like before. This patch does not change the ABI. Use of this feature may lead to slightly increased stack usage, because the memory is not compacted if certain registers don't have to be transferred (this will happen in practice for calling conventions where the callee and caller saved registers are interleaved in groups of 8). However, if the registers at the end of the block of 32 don't have to be transferred, we don't need to use a whole 128-byte stack slot - we can trim some space off the end of the range. In order to implement this feature, we need to rely less on the target-independent code in the PrologEpilogInserter, so we override several new methods in SIFrameLowering. We also add new pseudos, SI_BLOCK_SPILL_V1024_SAVE/RESTORE. One peculiarity is that both the SI_BLOCK_V1024_RESTORE pseudo and the SCRATCH_LOAD_BLOCK instructions will have all the registers that are not transferred added as implicit uses. This is done in order to inform LiveRegUnits that those registers are not available before the restore (since we're not really restoring them - so we can't afford to scavenge them). Unfortunately, this trick doesn't work with the save, so before the save all the registers in the block will be unavailable (see the unit test). This was reverted due to failures in the builds with expensive checks on, now fixed by always updating LiveIntervals and SlotIndexes in SILowerSGPRSpills.
96 lines
4.0 KiB
LLVM
96 lines
4.0 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
|
|
; RUN: llc -global-isel=1 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+block-vgpr-csr < %s | FileCheck -check-prefixes=CHECK,GISEL %s
|
|
; RUN: llc -global-isel=0 -mtriple=amdgcn--amdpal -mcpu=gfx1200 -mattr=+block-vgpr-csr < %s | FileCheck -check-prefixes=CHECK,DAGISEL %s
|
|
|
|
define i32 @non_entry_func(i32 %x) {
|
|
; CHECK-LABEL: non_entry_func:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
|
|
; CHECK-NEXT: s_wait_expcnt 0x0
|
|
; CHECK-NEXT: s_wait_samplecnt 0x0
|
|
; CHECK-NEXT: s_wait_bvhcnt 0x0
|
|
; CHECK-NEXT: s_wait_kmcnt 0x0
|
|
; CHECK-NEXT: s_xor_saveexec_b32 s0, -1
|
|
; CHECK-NEXT: scratch_store_b32 off, v2, s32 offset:100 ; 4-byte Folded Spill
|
|
; CHECK-NEXT: s_wait_alu 0xfffe
|
|
; CHECK-NEXT: s_mov_b32 exec_lo, s0
|
|
; CHECK-NEXT: v_writelane_b32 v2, s48, 0
|
|
; CHECK-NEXT: s_mov_b32 m0, 0x110003
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, v0
|
|
; CHECK-NEXT: ; transferring at most v40 v41 v56 v60 ; 128-byte Folded Spill
|
|
; CHECK-NEXT: scratch_store_block off, v[40:71], s32 offset:4
|
|
; CHECK-NEXT: s_mov_b32 m0, 1
|
|
; CHECK-NEXT: v_writelane_b32 v2, s49, 1
|
|
; CHECK-NEXT: ; transferring at most v120 ; 128-byte Folded Spill
|
|
; CHECK-NEXT: scratch_store_block off, v[120:151], s32
|
|
; CHECK-NEXT: ;;#ASMSTART
|
|
; CHECK-NEXT: s_nop
|
|
; CHECK-NEXT: ;;#ASMEND
|
|
; CHECK-NEXT: ; transferring at most v120 ; 128-byte Folded Reload
|
|
; CHECK-NEXT: scratch_load_block v[120:151], off, s32
|
|
; CHECK-NEXT: s_mov_b32 m0, 0x110003
|
|
; CHECK-NEXT: scratch_store_b32 off, v1, s32 offset:88
|
|
; CHECK-NEXT: ; transferring at most v40 v41 v56 v60 ; 128-byte Folded Reload
|
|
; CHECK-NEXT: scratch_load_block v[40:71], off, s32 offset:4
|
|
; CHECK-NEXT: v_mov_b32_e32 v0, v1
|
|
; CHECK-NEXT: v_readlane_b32 s49, v2, 1
|
|
; CHECK-NEXT: v_readlane_b32 s48, v2, 0
|
|
; CHECK-NEXT: s_xor_saveexec_b32 s0, -1
|
|
; CHECK-NEXT: scratch_load_b32 v2, off, s32 offset:100 ; 4-byte Folded Reload
|
|
; CHECK-NEXT: s_wait_alu 0xfffe
|
|
; CHECK-NEXT: s_mov_b32 exec_lo, s0
|
|
; CHECK-NEXT: s_wait_loadcnt 0x0
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%local = alloca i32, i32 3, addrspace(5)
|
|
store i32 %x, ptr addrspace(5) %local
|
|
call void asm "s_nop", "~{v0},~{v8},~{v40},~{v41},~{v49},~{v52},~{v56},~{v60},~{v120},~{s0},~{s48},~{s49}"()
|
|
ret i32 %x
|
|
}
|
|
|
|
define amdgpu_kernel void @entry_func(i32 %x) {
|
|
; GISEL-LABEL: entry_func:
|
|
; GISEL: ; %bb.0:
|
|
; GISEL-NEXT: s_mov_b64 s[10:11], s[6:7]
|
|
; GISEL-NEXT: s_load_b32 s6, s[4:5], 0x0
|
|
; GISEL-NEXT: v_mov_b32_e32 v31, v0
|
|
; GISEL-NEXT: s_mov_b64 s[12:13], s[0:1]
|
|
; GISEL-NEXT: ;;#ASMSTART
|
|
; GISEL-NEXT: s_nop
|
|
; GISEL-NEXT: ;;#ASMEND
|
|
; GISEL-NEXT: s_add_co_u32 s8, s4, 4
|
|
; GISEL-NEXT: s_mov_b32 s0, non_entry_func@abs32@lo
|
|
; GISEL-NEXT: s_mov_b32 s1, non_entry_func@abs32@hi
|
|
; GISEL-NEXT: s_add_co_ci_u32 s9, s5, 0
|
|
; GISEL-NEXT: s_mov_b64 s[4:5], s[12:13]
|
|
; GISEL-NEXT: s_mov_b32 s32, 0
|
|
; GISEL-NEXT: s_wait_kmcnt 0x0
|
|
; GISEL-NEXT: v_mov_b32_e32 v0, s6
|
|
; GISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
|
|
; GISEL-NEXT: s_wait_alu 0xfffe
|
|
; GISEL-NEXT: s_swappc_b64 s[30:31], s[0:1]
|
|
; GISEL-NEXT: s_endpgm
|
|
;
|
|
; DAGISEL-LABEL: entry_func:
|
|
; DAGISEL: ; %bb.0:
|
|
; DAGISEL-NEXT: s_load_b32 s12, s[4:5], 0x0
|
|
; DAGISEL-NEXT: s_mov_b64 s[10:11], s[6:7]
|
|
; DAGISEL-NEXT: v_mov_b32_e32 v31, v0
|
|
; DAGISEL-NEXT: s_mov_b64 s[6:7], s[0:1]
|
|
; DAGISEL-NEXT: ;;#ASMSTART
|
|
; DAGISEL-NEXT: s_nop
|
|
; DAGISEL-NEXT: ;;#ASMEND
|
|
; DAGISEL-NEXT: s_add_nc_u64 s[8:9], s[4:5], 4
|
|
; DAGISEL-NEXT: s_mov_b32 s1, non_entry_func@abs32@hi
|
|
; DAGISEL-NEXT: s_mov_b32 s0, non_entry_func@abs32@lo
|
|
; DAGISEL-NEXT: s_mov_b64 s[4:5], s[6:7]
|
|
; DAGISEL-NEXT: s_mov_b64 s[6:7], s[2:3]
|
|
; DAGISEL-NEXT: s_mov_b32 s32, 0
|
|
; DAGISEL-NEXT: s_wait_kmcnt 0x0
|
|
; DAGISEL-NEXT: v_mov_b32_e32 v0, s12
|
|
; DAGISEL-NEXT: s_swappc_b64 s[30:31], s[0:1]
|
|
; DAGISEL-NEXT: s_endpgm
|
|
call void asm "s_nop", "~{v0},~{v8},~{v40},~{v41},~{v49},~{v52},~{v56},~{v60},~{v120},~{s0},~{s48}"()
|
|
%res = call i32 @non_entry_func(i32 %x)
|
|
ret void
|
|
}
|