[TailDup] Delay aggressive computed-goto taildup to after RegAlloc. (#150911)

Back-ports additional tests (eb9febb4a6b0, dc697de12792), refactoring
(43c9c14577db) and functional change (18f1369297f4) in a single PR.

https://github.com/llvm/llvm-project/pull/114990 allowed more aggressive
tail duplication for computed-gotos in both pre- and post-regalloc tail
duplication.

In some cases, performing tail-duplication too early can lead to worse
results, especially if we duplicate blocks with a number of phi nodes.

This is causing a ~3% performance regression in some workloads using
Python 3.12.

This patch updates TailDup to delay aggressive tail-duplication for
computed gotos to after register allocation.

This means we can keep the non-duplicated version for a bit longer
throughout the backend, which should reduce compile-time as well as
allowing a number of optimizations and simplifications to trigger before
drastically expanding the CFG.

For the case in https://github.com/llvm/llvm-project/issues/106846, I
get the same performance with and without this patch on Skylake.

PR: https://github.com/llvm/llvm-project/pull/150911
This commit is contained in:
Florian Hahn 2025-07-28 09:37:00 +01:00
parent 993c5643ce
commit c587c24db5
No known key found for this signature in database
GPG Key ID: C8B0D7090F9127E6
5 changed files with 295 additions and 12 deletions

View File

@ -323,10 +323,11 @@ public:
const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; }
/// Returns true if the original IR terminator is an `indirectbr`. This
/// typically corresponds to a `goto` in C, rather than jump tables.
bool terminatorIsComputedGoto() const {
return back().isIndirectBranch() &&
/// Returns true if the original IR terminator is an `indirectbr` with
/// successor blocks. This typically corresponds to a `goto` in C, rather than
/// jump tables.
bool terminatorIsComputedGotoWithSuccessors() const {
return back().isIndirectBranch() && !succ_empty() &&
llvm::all_of(successors(), [](const MachineBasicBlock *Succ) {
return Succ->isIRBlockAddressTaken();
});

View File

@ -604,12 +604,23 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple,
bool HasComputedGoto = false;
if (!TailBB.empty()) {
HasIndirectbr = TailBB.back().isIndirectBranch();
HasComputedGoto = TailBB.terminatorIsComputedGoto();
HasComputedGoto = TailBB.terminatorIsComputedGotoWithSuccessors();
}
if (HasIndirectbr && PreRegAlloc)
MaxDuplicateCount = TailDupIndirectBranchSize;
// Allow higher limits when the block has computed-gotos and running after
// register allocation. NB. This basically unfactors computed gotos that were
// factored early on in the compilation process to speed up edge based data
// flow. If we do not unfactor them again, it can seriously pessimize code
// with many computed jumps in the source code, such as interpreters.
// Therefore we do not restrict the computed gotos.
bool DupComputedGotoLate =
HasComputedGoto && MF->getTarget().getTargetTriple().isOSDarwin();
if (DupComputedGotoLate && !PreRegAlloc)
MaxDuplicateCount = std::max(MaxDuplicateCount, 10u);
// Check the instructions in the block to determine whether tail-duplication
// is invalid or unlikely to be profitable.
unsigned InstrCount = 0;
@ -663,12 +674,10 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple,
// Duplicating a BB which has both multiple predecessors and successors will
// may cause huge amount of PHI nodes. If we want to remove this limitation,
// we have to address https://github.com/llvm/llvm-project/issues/78578.
// NB. This basically unfactors computed gotos that were factored early on in
// the compilation process to speed up edge based data flow. If we do not
// unfactor them again, it can seriously pessimize code with many computed
// jumps in the source code, such as interpreters. Therefore we do not
// restrict the computed gotos.
if (!HasComputedGoto && TailBB.pred_size() > TailDupPredSize &&
bool CheckSuccessorAndPredecessorSize =
DupComputedGotoLate ? PreRegAlloc : !HasComputedGoto;
if (CheckSuccessorAndPredecessorSize &&
TailBB.pred_size() > TailDupPredSize &&
TailBB.succ_size() > TailDupSuccSize) {
// If TailBB or any of its successors contains a phi, we may have to add a
// large number of additional phis with additional incoming values.

View File

@ -0,0 +1,143 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -tail-dup-pred-size=2 -tail-dup-succ-size=2 -o - %s | FileCheck %s
target triple = "arm64-apple-macosx13.0.0"
@opcode.targets = local_unnamed_addr constant [6 x ptr] [ptr blockaddress(@test_interp, %op1.bb), ptr blockaddress(@test_interp, %op6.bb), ptr blockaddress(@test_interp, %loop.header), ptr blockaddress(@test_interp, %op2.bb), ptr blockaddress(@test_interp, %op4.bb), ptr blockaddress(@test_interp, %op5.bb)]
define void @test_interp(ptr %frame, ptr %dst) {
; CHECK-LABEL: test_interp:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: stp x24, x23, [sp, #-64]! ; 16-byte Folded Spill
; CHECK-NEXT: stp x22, x21, [sp, #16] ; 16-byte Folded Spill
; CHECK-NEXT: stp x20, x19, [sp, #32] ; 16-byte Folded Spill
; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: .cfi_offset w19, -24
; CHECK-NEXT: .cfi_offset w20, -32
; CHECK-NEXT: .cfi_offset w21, -40
; CHECK-NEXT: .cfi_offset w22, -48
; CHECK-NEXT: .cfi_offset w23, -56
; CHECK-NEXT: .cfi_offset w24, -64
; CHECK-NEXT: Lloh0:
; CHECK-NEXT: adrp x21, _opcode.targets@PAGE
; CHECK-NEXT: Lloh1:
; CHECK-NEXT: add x21, x21, _opcode.targets@PAGEOFF
; CHECK-NEXT: mov x24, xzr
; CHECK-NEXT: add x8, x21, xzr, lsl #3
; CHECK-NEXT: mov x19, x1
; CHECK-NEXT: mov x20, x0
; CHECK-NEXT: mov x23, xzr
; CHECK-NEXT: mov w22, #1 ; =0x1
; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp0: ; Block address taken
; CHECK-NEXT: LBB0_1: ; %loop.header
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
; CHECK-NEXT: mov x23, xzr
; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp1: ; Block address taken
; CHECK-NEXT: LBB0_2: ; %op1.bb
; CHECK-NEXT: str xzr, [x19]
; CHECK-NEXT: Ltmp2: ; Block address taken
; CHECK-NEXT: LBB0_3: ; %op6.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr x0, [x20, #-8]!
; CHECK-NEXT: ldr x8, [x0, #8]
; CHECK-NEXT: str x22, [x0]
; CHECK-NEXT: ldr x8, [x8, #48]
; CHECK-NEXT: blr x8
; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp3: ; Block address taken
; CHECK-NEXT: LBB0_4: ; %op2.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
; CHECK-NEXT: str x23, [x19]
; CHECK-NEXT: mov x23, xzr
; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp4: ; Block address taken
; CHECK-NEXT: LBB0_5: ; %op4.bb
; CHECK-NEXT: Ltmp5: ; Block address taken
; CHECK-NEXT: LBB0_6: ; %op5.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: str x23, [x19]
; CHECK-NEXT: ldur x8, [x23, #12]
; CHECK-NEXT: ldur x9, [x20, #-8]
; CHECK-NEXT: add x23, x23, #20
; CHECK-NEXT: stp x8, x9, [x20, #-8]
; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: add x20, x20, #8
; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1
entry:
br label %loop.header
loop.header:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %op1.bb ], [ %iv.next, %op2.bb ], [ %iv.next, %op4.bb ], [ %iv.next, %op5.bb ], [ %iv.next, %op6.bb ], [ %iv.next, %loop.header ]
%stack.pointer = phi ptr [ %frame, %entry ], [ %stack.8, %op1.bb ], [ null, %op2.bb ], [ %stack.next, %op4.bb ], [ %stack.next.2, %op5.bb ], [ %stack.4, %op6.bb ], [ null, %loop.header ]
%next.instr = phi ptr [ null, %entry ], [ %next.instr, %op1.bb ], [ null, %op2.bb ], [ %next.instr.20, %op4.bb ], [ %next.instr.21, %op5.bb ], [ %next.instr, %op6.bb ], [ null, %loop.header ]
%iv.next = add i64 %iv, 1
%next_op = getelementptr [6 x ptr], ptr @opcode.targets, i64 0, i64 %iv
indirectbr ptr %next_op, [label %op1.bb, label %op6.bb, label %loop.header, label %op2.bb, label %op4.bb, label %op5.bb]
op1.bb:
store ptr null, ptr %dst, align 8
%stack.8 = getelementptr i8, ptr %stack.pointer, i64 -8
%l.0 = load ptr, ptr %stack.8, align 8
store i64 1, ptr %l.0, align 8
%gep.0 = getelementptr i8, ptr %l.0, i64 8
%l.1 = load ptr, ptr %gep.0, align 8
%gep.1 = getelementptr i8, ptr %l.1, i64 48
%l.2 = load ptr, ptr %gep.1, align 8
tail call void %l.2(ptr nonnull %l.0)
br label %loop.header
op2.bb:
store ptr %next.instr, ptr %dst, align 8
br label %loop.header
op4.bb:
store ptr %next.instr, ptr %dst, align 8
%next.instr.20 = getelementptr i8, ptr %next.instr, i64 20
%stack.2 = getelementptr i8, ptr %stack.pointer, i64 -8
%l.3 = load ptr, ptr %stack.2, align 8
%next.instr.12 = getelementptr i8, ptr %next.instr, i64 12
%next.instr.12.val = load ptr, ptr %next.instr.12, align 2
store ptr %next.instr.12.val, ptr %stack.2, align 8
store ptr %l.3, ptr %stack.pointer, align 8
%stack.next = getelementptr i8, ptr %stack.pointer, i64 8
br label %loop.header
op5.bb:
store ptr %next.instr, ptr %dst, align 8
%next.instr.21 = getelementptr i8, ptr %next.instr, i64 20
%stack.3 = getelementptr i8, ptr %stack.pointer, i64 -8
%l.4 = load ptr, ptr %stack.3, align 8
%next.instr.2 = getelementptr i8, ptr %next.instr, i64 12
%next.instr.2.val = load ptr, ptr %next.instr.2, align 2
store ptr %next.instr.2.val, ptr %stack.3, align 8
store ptr %l.4, ptr %stack.pointer, align 8
%stack.next.2 = getelementptr i8, ptr %stack.pointer, i64 8
br label %loop.header
op6.bb:
%stack.4 = getelementptr i8, ptr %stack.pointer, i64 -8
%l.5 = load ptr, ptr %stack.4, align 8
store i64 1, ptr %l.5, align 8
%gep.5 = getelementptr i8, ptr %l.5, i64 8
%l.6 = load ptr, ptr %gep.5, align 8
%gep.6 = getelementptr i8, ptr %l.6, i64 48
%l.7 = load ptr, ptr %gep.6, align 8
tail call void %l.7(ptr nonnull %l.5)
br label %loop.header
}

View File

@ -1,6 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=early-tailduplication -tail-dup-pred-size=1 -tail-dup-succ-size=1 %s -o - | FileCheck %s
# Check that only the computed goto is not be restrict by tail-dup-pred-size and tail-dup-succ-size.
#
# Check that only the computed goto and others are restricted by tail-dup-pred-size and tail-dup-succ-size.
#
--- |
@computed_goto.dispatch = constant [5 x ptr] [ptr null, ptr blockaddress(@computed_goto, %bb1), ptr blockaddress(@computed_goto, %bb2), ptr blockaddress(@computed_goto, %bb3), ptr blockaddress(@computed_goto, %bb4)]
declare i64 @f0()

View File

@ -0,0 +1,128 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=tailduplication -tail-dup-pred-size=1 -tail-dup-succ-size=1 %s -o - | FileCheck %s
#
# Check that only the computed gotos are duplicated aggressively.
#
--- |
@computed_goto.dispatch = constant [5 x ptr] [ptr null, ptr blockaddress(@computed_goto, %bb1), ptr blockaddress(@computed_goto, %bb2), ptr blockaddress(@computed_goto, %bb3), ptr blockaddress(@computed_goto, %bb4)]
declare i64 @f0()
declare i64 @f1()
declare i64 @f2()
declare i64 @f3()
declare i64 @f4()
declare i64 @f5()
define void @computed_goto() {
start:
ret void
bb1:
ret void
bb2:
ret void
bb3:
ret void
bb4:
ret void
}
define void @jump_table() { ret void }
define void @jump_table_pic() { ret void }
...
---
name: computed_goto
alignment: 1
exposesReturnsTwice: false
legalized: false
regBankSelected: false
selected: false
failedISel: false
tracksRegLiveness: true
body: |
; CHECK-LABEL: name: computed_goto
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rax
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64 = COPY $rax
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY3]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64 = COPY $rax
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY4]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: [[COPY5:%[0-9]+]]:gr64 = COPY $rax
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY5]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64 = COPY $rax
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY6]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
bb.0:
successors: %bb.5(0x80000000)
CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
%0:gr64 = COPY $rax
%6:gr64_nosp = COPY %0
JMP_1 %bb.5
bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
successors: %bb.5(0x80000000)
CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
%1:gr64 = COPY $rax
%6:gr64_nosp = COPY %1
JMP_1 %bb.5
bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
successors: %bb.5(0x80000000)
CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
%2:gr64 = COPY $rax
%6:gr64_nosp = COPY %2
JMP_1 %bb.5
bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
successors: %bb.5(0x80000000)
CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
%3:gr64 = COPY $rax
%6:gr64_nosp = COPY %3
JMP_1 %bb.5
bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
successors: %bb.5(0x80000000)
CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
%4:gr64 = COPY $rax
%6:gr64_nosp = COPY %4
bb.5:
successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
%5:gr64_nosp = COPY %6
JMP64m $noreg, 8, %5, @computed_goto.dispatch, $noreg
...