
Anatoly Trosinenko found that when hasSideEffect was set to 0 in the definition of LOADgotAUTH, MultiSource/Benchmarks/Ptrdist/ks/ks test from llvm-test-suite started to crash. The issue was traced down to MachineLICM pass placing LOADgotAUTH right after an unrelated copy to x16 like rewriting this code: ```` bb.0: renamable $x16 = COPY renamable $x12 B %bb.1 bb.1: ... /* use $x16 */ ... renamable $x20 = LOADgotAUTH target-flags(aarch64-got) @some_variable, implicit-def dead $x16, implicit-def dead $x17, implicit-def dead $nzcv /* use $x20 */ ... ```` like the following: ```` bb.0: renamable $x16 = COPY renamable $x12 renamable $x20 = LOADgotAUTH target-flags(aarch64-got) @some_variable, implicit-def dead $x16, implicit-def dead $x17, implicit-def dead $nzcv B %bb.1 bb.1: ... /* use $x16 */ ... /* use $x20 */ ... ``` The issue was caused by inconsistent logic between implicit and explicit operand definitions, where the implicit side was incorrectly skipping checking RUDefs for dead operands, leading to RuledOut not being set for the X16 operand. Because there isn't really a semantic difference between implicit and explicit operands at this point, let's remove the isImplicit check and adjust the logic to do the same thing in both cases: - For implicit operands, we now check and update RUDefs in the same way as explicit operands. - For explicit operands, we now allow dead operands to be skipped. Reviewers: arsenm, s-barannikov, atrosinenko Reviewed By: arsenm, s-barannikov Pull Request: https://github.com/llvm/llvm-project/pull/147624
87 lines
3.3 KiB
LLVM
87 lines
3.3 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx90a | FileCheck %s
|
|
|
|
define amdgpu_kernel void @copy_to_reg_frameindex(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
|
|
; CHECK-LABEL: copy_to_reg_frameindex:
|
|
; CHECK: ; %bb.0: ; %entry
|
|
; CHECK-NEXT: s_cmp_lt_u32 0, 16
|
|
; CHECK-NEXT: ; implicit-def: $vgpr0
|
|
; CHECK-NEXT: .LBB0_1: ; %loop
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: s_set_gpr_idx_on 0, gpr_idx(DST)
|
|
; CHECK-NEXT: v_mov_b32_e32 v0, 0
|
|
; CHECK-NEXT: s_set_gpr_idx_off
|
|
; CHECK-NEXT: s_cbranch_scc1 .LBB0_1
|
|
; CHECK-NEXT: ; %bb.2: ; %done
|
|
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, 0
|
|
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CHECK-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; CHECK-NEXT: s_endpgm
|
|
entry:
|
|
%B = srem i32 %c, -1
|
|
br label %loop
|
|
|
|
loop:
|
|
%promotealloca = phi <16 x i32> [ poison, %entry ], [ %insert, %loop ]
|
|
%inc = phi i32 [ 0, %entry ], [ %inc.i, %loop ]
|
|
%insert = insertelement <16 x i32> %promotealloca, i32 %inc, i32 %inc
|
|
%inc.i = add i32 %inc, %B
|
|
%cnd = icmp uge i32 %inc.i, 16
|
|
br i1 %cnd, label %done, label %loop
|
|
|
|
done:
|
|
%extract.0 = extractelement <16 x i32> %insert, i32 0
|
|
store i32 %extract.0, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
; When creating registers for %divergent.alloca.phi, we should report
|
|
; the CopyToReg as divergent values (not uniform just because the
|
|
; alloca is uniform)
|
|
define void @phi_with_alloca_and_divergent_copy_to_reg(ptr addrspace(5) %divergent.private, ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
|
|
; CHECK-LABEL: phi_with_alloca_and_divergent_copy_to_reg:
|
|
; CHECK: ; %bb.0: ; %entry
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v7, v2
|
|
; CHECK-NEXT: v_mov_b32_e32 v6, v1
|
|
; CHECK-NEXT: s_mov_b64 s[4:5], 0
|
|
; CHECK-NEXT: v_lshrrev_b32_e64 v2, 6, s32
|
|
; CHECK-NEXT: .LBB1_1: ; %loop
|
|
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, v2
|
|
; CHECK-NEXT: v_lshl_add_u32 v2, v3, 2, v1
|
|
; CHECK-NEXT: buffer_store_dword v3, v2, s[0:3], 0 offen
|
|
; CHECK-NEXT: v_add_u32_e32 v2, 1, v3
|
|
; CHECK-NEXT: v_cmp_lt_u32_e32 vcc, 15, v2
|
|
; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; CHECK-NEXT: v_mov_b32_e32 v3, v4
|
|
; CHECK-NEXT: v_mov_b32_e32 v2, v0
|
|
; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; CHECK-NEXT: s_cbranch_execnz .LBB1_1
|
|
; CHECK-NEXT: ; %bb.2: ; %done
|
|
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; CHECK-NEXT: buffer_load_dword v0, v1, s[0:3], 0 offen
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: global_store_dword v[6:7], v0, off
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
entry:
|
|
%alloca0 = alloca [16 x i32], addrspace(5)
|
|
br label %loop
|
|
|
|
loop:
|
|
%inc = phi i32 [%a, %entry], [%b, %loop]
|
|
%divergent.alloca.phi = phi ptr addrspace(5) [ %alloca0, %entry ], [ %divergent.private, %loop ]
|
|
%ptr = getelementptr [16 x i32], ptr addrspace(5) %divergent.alloca.phi, i32 0, i32 %inc
|
|
store i32 %inc, ptr addrspace(5) %ptr
|
|
%inc.i = add i32 %inc, 1
|
|
%cnd = icmp uge i32 %inc.i, 16
|
|
br i1 %cnd, label %done, label %loop
|
|
|
|
done:
|
|
%tmp1 = load i32, ptr addrspace(5) %divergent.alloca.phi
|
|
store i32 %tmp1, ptr addrspace(1) %out
|
|
ret void
|
|
}
|