Matt Arsenault 58a88001f3
PeepholeOpt: Fix looking for def of current copy to coalesce (#125533)
This fixes the handling of subregister extract copies. This
will allow AMDGPU to remove its implementation of
shouldRewriteCopySrc, which exists as a 10 year old workaround
to this bug. peephole-opt-fold-reg-sequence-subreg.mir will
show the expected improvement once the custom implementation
is removed.

The copy coalescing processing here is overly abstracted
from what's actually happening. Previously when visiting
coalescable copy-like instructions, we would parse the
sources one at a time and then pass the def of the root
instruction into findNextSource. This means that the
first thing the new ValueTracker constructed would do
is getVRegDef to find the instruction we are currently
processing. This adds an unnecessary step, placing
a useless entry in the RewriteMap, and required skipping
the no-op case where getNewSource would return the original
source operand. This was a problem since in the case
of a subregister extract, shouldRewriteCopySource would always
say that it is useful to rewrite and the use-def chain walk
would abort, returning the original operand. Move the process
to start looking at the source operand to begin with.

This does not fix the confused handling in the uncoalescable
copy case which is proving to be more difficult. Some currently
handled cases have multiple defs from a single source, and other
handled cases have 0 input operands. It would be simpler if
this was implemented with isCopyLikeInstr, rather than guessing
at the operand structure as it does now.

There are some improvements and some regressions. The
regressions appear to be downstream issues for the most part. One
of the uglier regressions is in PPC, where a sequence of insert_subrgs
is used to build registers. I opened #125502 to use reg_sequence instead,
which may help.

The worst regression is an absurd SPARC testcase using a <251 x fp128>,
which uses a very long chain of insert_subregs.

We need improved subregister handling locally in PeepholeOptimizer,
and other pasess like MachineCSE to fix some of the other regressions.
We should handle subregister composes and folding more indexes
into insert_subreg and reg_sequence.
2025-02-05 23:29:02 +07:00

330 lines
11 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; Test 64-bit addition in which the second operand is variable.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
declare i64 @foo()
; Check ALGR.
define zeroext i1 @f1(i64 %dummy, i64 %a, i64 %b, ptr %res) {
; CHECK-LABEL: f1:
; CHECK: # %bb.0:
; CHECK-NEXT: algr %r3, %r4
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: br %r14
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check using the overflow result for a branch.
define void @f2(i64 %dummy, i64 %a, i64 %b, ptr %res) {
; CHECK-LABEL: f2:
; CHECK: # %bb.0:
; CHECK-NEXT: algr %r3, %r4
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: jgnle foo@PLT
; CHECK-NEXT: .LBB1_1: # %exit
; CHECK-NEXT: br %r14
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
br i1 %obit, label %call, label %exit
call:
tail call i64 @foo()
br label %exit
exit:
ret void
}
; ... and the same with the inverted direction.
define void @f3(i64 %dummy, i64 %a, i64 %b, ptr %res) {
; CHECK-LABEL: f3:
; CHECK: # %bb.0:
; CHECK-NEXT: algr %r3, %r4
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: jgle foo@PLT
; CHECK-NEXT: .LBB2_1: # %exit
; CHECK-NEXT: br %r14
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
br i1 %obit, label %exit, label %call
call:
tail call i64 @foo()
br label %exit
exit:
ret void
}
; Check ALG with no displacement.
define zeroext i1 @f4(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f4:
; CHECK: # %bb.0:
; CHECK-NEXT: alg %r3, 0(%r4)
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: br %r14
%b = load i64, ptr %src
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check the high end of the aligned ALG range.
define zeroext i1 @f5(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
; CHECK-NEXT: alg %r3, 524280(%r4)
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: br %r14
%ptr = getelementptr i64, ptr %src, i64 65535
%b = load i64, ptr %ptr
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check the next doubleword up, which needs separate address logic.
; Other sequences besides this one would be OK.
define zeroext i1 @f6(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f6:
; CHECK: # %bb.0:
; CHECK-NEXT: agfi %r4, 524288
; CHECK-NEXT: alg %r3, 0(%r4)
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: br %r14
%ptr = getelementptr i64, ptr %src, i64 65536
%b = load i64, ptr %ptr
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check the high end of the negative aligned ALG range.
define zeroext i1 @f7(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f7:
; CHECK: # %bb.0:
; CHECK-NEXT: alg %r3, -8(%r4)
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: br %r14
%ptr = getelementptr i64, ptr %src, i64 -1
%b = load i64, ptr %ptr
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check the low end of the ALG range.
define zeroext i1 @f8(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f8:
; CHECK: # %bb.0:
; CHECK-NEXT: alg %r3, -524288(%r4)
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: br %r14
%ptr = getelementptr i64, ptr %src, i64 -65536
%b = load i64, ptr %ptr
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check the next doubleword down, which needs separate address logic.
; Other sequences besides this one would be OK.
define zeroext i1 @f9(i64 %dummy, i64 %a, ptr %src, ptr %res) {
; CHECK-LABEL: f9:
; CHECK: # %bb.0:
; CHECK-NEXT: agfi %r4, -524296
; CHECK-NEXT: alg %r3, 0(%r4)
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r3, 0(%r5)
; CHECK-NEXT: br %r14
%ptr = getelementptr i64, ptr %src, i64 -65537
%b = load i64, ptr %ptr
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check that ALG allows an index.
define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, ptr %res) {
; CHECK-LABEL: f10:
; CHECK: # %bb.0:
; CHECK-NEXT: alg %r4, 524280(%r3,%r2)
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
; CHECK-NEXT: stg %r4, 0(%r5)
; CHECK-NEXT: br %r14
%add1 = add i64 %src, %index
%add2 = add i64 %add1, 524280
%ptr = inttoptr i64 %add2 to ptr
%b = load i64, ptr %ptr
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
%val = extractvalue {i64, i1} %t, 0
%obit = extractvalue {i64, i1} %t, 1
store i64 %val, ptr %res
ret i1 %obit
}
; Check that additions of spilled values can use ALG rather than ALGR.
define zeroext i1 @f11(ptr %ptr0) {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
; CHECK-NEXT: .cfi_offset %r6, -112
; CHECK-NEXT: .cfi_offset %r7, -104
; CHECK-NEXT: .cfi_offset %r8, -96
; CHECK-NEXT: .cfi_offset %r9, -88
; CHECK-NEXT: .cfi_offset %r10, -80
; CHECK-NEXT: .cfi_offset %r11, -72
; CHECK-NEXT: .cfi_offset %r12, -64
; CHECK-NEXT: .cfi_offset %r13, -56
; CHECK-NEXT: .cfi_offset %r14, -48
; CHECK-NEXT: .cfi_offset %r15, -40
; CHECK-NEXT: aghi %r15, -176
; CHECK-NEXT: .cfi_def_cfa_offset 336
; CHECK-NEXT: lg %r7, 0(%r2)
; CHECK-NEXT: lg %r6, 16(%r2)
; CHECK-NEXT: lg %r13, 32(%r2)
; CHECK-NEXT: lg %r12, 48(%r2)
; CHECK-NEXT: lg %r8, 64(%r2)
; CHECK-NEXT: lg %r9, 80(%r2)
; CHECK-NEXT: lg %r10, 96(%r2)
; CHECK-NEXT: lg %r11, 112(%r2)
; CHECK-NEXT: mvc 160(8,%r15), 128(%r2) # 8-byte Folded Spill
; CHECK-NEXT: mvc 168(8,%r15), 144(%r2) # 8-byte Folded Spill
; CHECK-NEXT: brasl %r14, foo@PLT
; CHECK-NEXT: algr %r2, %r7
; CHECK-NEXT: ipm %r0
; CHECK-NEXT: risbg %r0, %r0, 63, 191, 35
; CHECK-NEXT: algr %r2, %r6
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: algr %r2, %r13
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: algr %r2, %r12
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: algr %r2, %r8
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: algr %r2, %r9
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: algr %r2, %r10
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: algr %r2, %r11
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: alg %r2, 160(%r15) # 8-byte Folded Reload
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: alg %r2, 168(%r15) # 8-byte Folded Reload
; CHECK-NEXT: ipm %r1
; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
; CHECK-NEXT: risbg %r2, %r0, 63, 191, 0
; CHECK-NEXT: lmg %r6, %r15, 224(%r15)
; CHECK-NEXT: br %r14
%ptr1 = getelementptr i64, ptr %ptr0, i64 2
%ptr2 = getelementptr i64, ptr %ptr0, i64 4
%ptr3 = getelementptr i64, ptr %ptr0, i64 6
%ptr4 = getelementptr i64, ptr %ptr0, i64 8
%ptr5 = getelementptr i64, ptr %ptr0, i64 10
%ptr6 = getelementptr i64, ptr %ptr0, i64 12
%ptr7 = getelementptr i64, ptr %ptr0, i64 14
%ptr8 = getelementptr i64, ptr %ptr0, i64 16
%ptr9 = getelementptr i64, ptr %ptr0, i64 18
%val0 = load i64, ptr %ptr0
%val1 = load i64, ptr %ptr1
%val2 = load i64, ptr %ptr2
%val3 = load i64, ptr %ptr3
%val4 = load i64, ptr %ptr4
%val5 = load i64, ptr %ptr5
%val6 = load i64, ptr %ptr6
%val7 = load i64, ptr %ptr7
%val8 = load i64, ptr %ptr8
%val9 = load i64, ptr %ptr9
%ret = call i64 @foo()
%t0 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %ret, i64 %val0)
%add0 = extractvalue {i64, i1} %t0, 0
%obit0 = extractvalue {i64, i1} %t0, 1
%t1 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add0, i64 %val1)
%add1 = extractvalue {i64, i1} %t1, 0
%obit1 = extractvalue {i64, i1} %t1, 1
%res1 = or i1 %obit0, %obit1
%t2 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add1, i64 %val2)
%add2 = extractvalue {i64, i1} %t2, 0
%obit2 = extractvalue {i64, i1} %t2, 1
%res2 = or i1 %res1, %obit2
%t3 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add2, i64 %val3)
%add3 = extractvalue {i64, i1} %t3, 0
%obit3 = extractvalue {i64, i1} %t3, 1
%res3 = or i1 %res2, %obit3
%t4 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add3, i64 %val4)
%add4 = extractvalue {i64, i1} %t4, 0
%obit4 = extractvalue {i64, i1} %t4, 1
%res4 = or i1 %res3, %obit4
%t5 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add4, i64 %val5)
%add5 = extractvalue {i64, i1} %t5, 0
%obit5 = extractvalue {i64, i1} %t5, 1
%res5 = or i1 %res4, %obit5
%t6 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add5, i64 %val6)
%add6 = extractvalue {i64, i1} %t6, 0
%obit6 = extractvalue {i64, i1} %t6, 1
%res6 = or i1 %res5, %obit6
%t7 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add6, i64 %val7)
%add7 = extractvalue {i64, i1} %t7, 0
%obit7 = extractvalue {i64, i1} %t7, 1
%res7 = or i1 %res6, %obit7
%t8 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add7, i64 %val8)
%add8 = extractvalue {i64, i1} %t8, 0
%obit8 = extractvalue {i64, i1} %t8, 1
%res8 = or i1 %res7, %obit8
%t9 = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %add8, i64 %val9)
%add9 = extractvalue {i64, i1} %t9, 0
%obit9 = extractvalue {i64, i1} %t9, 1
%res9 = or i1 %res8, %obit9
ret i1 %res9
}
declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone