llvm-project/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll
Matt Arsenault e8e816344e AMDGPU: Allow folding multiple uses of some immediates into copies
In some cases this will require an avoidable re-defining of
a register, but it works out better most of the time. Also allow
folding 64-bit immediates into subregister extracts, unless it would
break an inline constant.

We could be more aggressive here, but this set of conditions seems
to do a reasonable job without introducing too many regressions.
2025-08-22 20:34:20 +09:00

187 lines
7.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a %s -o - | FileCheck %s
declare ptr @G()
define amdgpu_kernel void @foo(ptr addrspace(5) %ptr5, ptr %p0, double %v0, <4 x i32> %vec) {
; CHECK-LABEL: foo:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_add_u32 flat_scratch_lo, s12, s17
; CHECK-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; CHECK-NEXT: v_pk_mov_b32 v[46:47], 0, 0
; CHECK-NEXT: flat_load_dword v42, v[46:47]
; CHECK-NEXT: s_mov_b64 s[38:39], s[6:7]
; CHECK-NEXT: s_mov_b64 s[48:49], s[4:5]
; CHECK-NEXT: s_load_dwordx4 s[4:7], s[8:9], 0x8
; CHECK-NEXT: s_load_dword s64, s[8:9], 0x0
; CHECK-NEXT: s_add_u32 s0, s0, s17
; CHECK-NEXT: s_addc_u32 s1, s1, 0
; CHECK-NEXT: s_mov_b64 s[34:35], s[8:9]
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_accvgpr_write_b32 a32, s6
; CHECK-NEXT: v_accvgpr_write_b32 a33, s7
; CHECK-NEXT: s_mov_b64 s[6:7], src_private_base
; CHECK-NEXT: s_cmp_lg_u32 s64, -1
; CHECK-NEXT: s_cselect_b32 s7, s7, 0
; CHECK-NEXT: s_cselect_b32 s8, s64, 0
; CHECK-NEXT: s_add_u32 s50, s34, 48
; CHECK-NEXT: s_addc_u32 s51, s35, 0
; CHECK-NEXT: v_pk_mov_b32 v[58:59], s[4:5], s[4:5] op_sel:[0,1]
; CHECK-NEXT: s_getpc_b64 s[4:5]
; CHECK-NEXT: s_add_u32 s4, s4, G@gotpcrel32@lo+4
; CHECK-NEXT: s_addc_u32 s5, s5, G@gotpcrel32@hi+12
; CHECK-NEXT: s_load_dwordx2 s[54:55], s[4:5], 0x0
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: v_mov_b32_e32 v57, s7
; CHECK-NEXT: s_mov_b32 s7, s6
; CHECK-NEXT: s_mov_b32 s53, s14
; CHECK-NEXT: v_mov_b32_e32 v56, s8
; CHECK-NEXT: v_pk_mov_b32 v[60:61], s[6:7], s[6:7] op_sel:[0,1]
; CHECK-NEXT: s_mov_b64 s[4:5], s[48:49]
; CHECK-NEXT: s_mov_b64 s[6:7], s[38:39]
; CHECK-NEXT: s_mov_b64 s[8:9], s[50:51]
; CHECK-NEXT: s_mov_b32 s12, s14
; CHECK-NEXT: s_mov_b32 s13, s15
; CHECK-NEXT: s_mov_b32 s14, s16
; CHECK-NEXT: v_mov_b32_e32 v31, v0
; CHECK-NEXT: s_mov_b32 s32, 0
; CHECK-NEXT: s_mov_b32 s33, s16
; CHECK-NEXT: s_mov_b32 s52, s15
; CHECK-NEXT: s_mov_b64 s[36:37], s[10:11]
; CHECK-NEXT: v_mov_b32_e32 v40, v0
; CHECK-NEXT: flat_store_dwordx2 v[58:59], v[60:61]
; CHECK-NEXT: ; kill: def $sgpr15 killed $sgpr15
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[54:55]
; CHECK-NEXT: flat_load_dwordx2 v[62:63], v[58:59]
; CHECK-NEXT: v_mov_b32_e32 v44, 0
; CHECK-NEXT: v_mov_b32_e32 v45, 0x3ff00000
; CHECK-NEXT: s_mov_b64 s[4:5], s[48:49]
; CHECK-NEXT: s_mov_b64 s[6:7], s[38:39]
; CHECK-NEXT: s_mov_b64 s[8:9], s[50:51]
; CHECK-NEXT: s_mov_b64 s[10:11], s[36:37]
; CHECK-NEXT: s_mov_b32 s12, s53
; CHECK-NEXT: s_mov_b32 s13, s52
; CHECK-NEXT: s_mov_b32 s14, s33
; CHECK-NEXT: v_mov_b32_e32 v31, v40
; CHECK-NEXT: flat_store_dwordx2 v[46:47], v[44:45]
; CHECK-NEXT: flat_store_dwordx2 v[58:59], v[60:61]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: ; kill: def $sgpr15 killed $sgpr15
; CHECK-NEXT: s_swappc_b64 s[30:31], s[54:55]
; CHECK-NEXT: flat_load_dwordx2 v[0:1], v[56:57] glc
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, s64
; CHECK-NEXT: v_cmp_lt_i32_e32 vcc, 0, v42
; CHECK-NEXT: flat_store_dwordx2 v[58:59], v[62:63]
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_dwordx2 v[58:59], a[32:33]
; CHECK-NEXT: buffer_store_dword a33, v0, s[0:3], 0 offen offset:4
; CHECK-NEXT: buffer_store_dword v44, v0, s[0:3], 0 offen
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
; CHECK-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; CHECK-NEXT: s_cbranch_execz .LBB0_4
; CHECK-NEXT: ; %bb.1: ; %LeafBlock5
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, v42
; CHECK-NEXT: s_and_saveexec_b64 s[6:7], vcc
; CHECK-NEXT: ; %bb.2: ; %sw.bb17.i.i.i.i
; CHECK-NEXT: v_mov_b32_e32 v44, 1
; CHECK-NEXT: ; %bb.3: ; %Flow
; CHECK-NEXT: s_or_b64 exec, exec, s[6:7]
; CHECK-NEXT: .LBB0_4: ; %Flow8
; CHECK-NEXT: s_or_saveexec_b64 s[4:5], s[4:5]
; CHECK-NEXT: v_pk_mov_b32 v[0:1], v[42:43], v[42:43] op_sel:[0,1]
; CHECK-NEXT: v_pk_mov_b32 v[2:3], v[44:45], v[44:45] op_sel:[0,1]
; CHECK-NEXT: s_xor_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_cbranch_execz .LBB0_8
; CHECK-NEXT: ; %bb.5: ; %LeafBlock
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v42
; CHECK-NEXT: v_pk_mov_b32 v[0:1], v[42:43], v[42:43] op_sel:[0,1]
; CHECK-NEXT: v_pk_mov_b32 v[2:3], v[44:45], v[44:45] op_sel:[0,1]
; CHECK-NEXT: s_and_saveexec_b64 s[6:7], vcc
; CHECK-NEXT: ; %bb.6: ; %sw.bb.i.i.i.i
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: ; %bb.7: ; %Flow7
; CHECK-NEXT: s_or_b64 exec, exec, s[6:7]
; CHECK-NEXT: v_mov_b32_e32 v44, 0
; CHECK-NEXT: .LBB0_8: ; %bb.1
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v44
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
; CHECK-NEXT: s_cbranch_execz .LBB0_10
; CHECK-NEXT: ; %bb.9: ; %sw.bb.i.i.i.i.i
; CHECK-NEXT: s_load_dwordx4 s[8:11], s[34:35], 0x20
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
; CHECK-NEXT: v_pk_mov_b32 v[2:3], s[10:11], s[10:11] op_sel:[0,1]
; CHECK-NEXT: .LBB0_10: ; %bb.2
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], vcc
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: buffer_store_dword v42, off, s[0:3], 0
; CHECK-NEXT: s_endpgm
entry:
%load.null = load i32, ptr null, align 8
%insert = insertelement <4 x i32> zeroinitializer, i32 %load.null, i64 0
%cast = addrspacecast ptr addrspace(5) %ptr5 to ptr
store double 0.000000e+00, ptr %p0, align 8
%call = tail call ptr @G()
store double 1.000000e+00, ptr null, align 8
%load.0 = load double, ptr %p0, align 8
store volatile double 0.000000e+00, ptr %p0, align 8
%call.1 = tail call ptr @G()
%load.1 = load volatile double, ptr %cast, align 8
store volatile double %load.0, ptr %p0, align 8
store double %v0, ptr %p0, align 8
%load.2 = load double, ptr %p0, align 8
store double %load.2, ptr addrspace(5) %ptr5, align 8
store i32 0, ptr addrspace(5) %ptr5, align 4
switch i32 %load.null, label %bb.1 [
i32 0, label %sw.bb.i.i.i.i
i32 1, label %sw.bb17.i.i.i.i
]
sw.bb.i.i.i.i: ; preds = %entry
br label %bb.1
sw.bb17.i.i.i.i: ; preds = %entry
br label %bb.1
bb.1: ; preds = %sw.bb17.i.i.i.i, %sw.bb.i.i.i.i, %entry
%phi.0 = phi i32 [ 0, %entry ], [ 0, %sw.bb.i.i.i.i ], [ 1, %sw.bb17.i.i.i.i ]
%phi.1 = phi <4 x i32> [ %insert, %entry ], [ zeroinitializer, %sw.bb.i.i.i.i ], [ %insert, %sw.bb17.i.i.i.i ]
switch i32 %phi.0, label %bb.2 [
i32 0, label %sw.bb.i.i.i.i.i
]
sw.bb.i.i.i.i.i: ; preds = %bb.1
br label %bb.2
bb.2: ; preds = %sw.bb.i.i.i.i.i, %bb.1
%phi.2 = phi <4 x i32> [ %phi.1, %bb.1 ], [ %vec, %sw.bb.i.i.i.i.i ]
%extract.1 = extractelement <4 x i32> %phi.2, i64 0
switch i32 1, label %bb.3 [
i32 0, label %sw.bb.i.i5.i.i
]
sw.bb.i.i5.i.i: ; preds = %bb.2
br label %bb.3
bb.3: ; preds = %sw.bb.i.i5.i.i, %bb.2
%phi.3 = phi <4 x i32> [ zeroinitializer, %sw.bb.i.i5.i.i ], [ %insert, %bb.2 ]
switch i32 %extract.1, label %bb.4 [
i32 0, label %sw.bb7.i.i.i3.i.i
]
sw.bb7.i.i.i3.i.i: ; preds = %bb.3
%insert.0 = insertelement <4 x i32> %insert, i32 0, i64 1
br label %bb.4
bb.4: ; preds = %sw.bb7.i.i.i3.i.i, %bb.3
%phi.4 = phi <4 x i32> [ %phi.3, %bb.3 ], [ %insert.0, %sw.bb7.i.i.i3.i.i ]
%extract = extractelement <4 x i32> %phi.4, i64 0
store i32 %extract, ptr addrspace(5) null, align 4
ret void
}