
The instruction-precise, or asynchronous, unwind tables usually take up much more space than the synchronous ones. If a user is concerned about the load size of the program and does not need the features provided with the asynchronous tables, the compiler should be able to generate the more compact variant. This patch changes the generation of CFI instructions for these cases so that they all come in one chunk in the prolog; it emits only one `.cfi_def_cfa*` instruction followed by `.cfi_offset` ones after all stack adjustments and register spills, and avoids generating CFI instructions in the epilog(s) as well as any other exceeding CFI instructions like `.cfi_remember_state` and `.cfi_restore_state`. Effectively, it reverses the effects of D111411 and D114545 on functions with the `uwtable(sync)` attribute. As a side effect, it also restores the behavior on functions that have neither `uwtable` nor `nounwind` attributes. Differential Revision: https://reviews.llvm.org/D153098
117 lines
3.4 KiB
LLVM
117 lines
3.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
|
|
|
|
declare void @callee_stack0()
|
|
declare void @callee_stack8([8 x i64], i64)
|
|
declare void @callee_stack16([8 x i64], i64, i64)
|
|
|
|
define dso_local void @caller_to0_from0() nounwind {
|
|
; CHECK-LABEL: caller_to0_from0:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: b callee_stack0
|
|
tail call void @callee_stack0()
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to0_from8([8 x i64], i64) nounwind{
|
|
; CHECK-LABEL: caller_to0_from8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: b callee_stack0
|
|
|
|
tail call void @callee_stack0()
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to8_from0() {
|
|
; CHECK-LABEL: caller_to8_from0:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: sub sp, sp, #32
|
|
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: .cfi_offset w30, -16
|
|
; CHECK-NEXT: mov w8, #42
|
|
; CHECK-NEXT: str x8, [sp]
|
|
; CHECK-NEXT: bl callee_stack8
|
|
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
|
; CHECK-NEXT: add sp, sp, #32
|
|
; CHECK-NEXT: ret
|
|
|
|
; Caller isn't going to clean up any extra stack we allocate, so it
|
|
; can't be a tail call.
|
|
tail call void @callee_stack8([8 x i64] undef, i64 42)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to8_from8([8 x i64], i64 %a) {
|
|
; CHECK-LABEL: caller_to8_from8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov w8, #42
|
|
; CHECK-NEXT: str x8, [sp]
|
|
; CHECK-NEXT: b callee_stack8
|
|
|
|
; This should reuse our stack area for the 42
|
|
tail call void @callee_stack8([8 x i64] undef, i64 42)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to16_from8([8 x i64], i64 %a) {
|
|
; CHECK-LABEL: caller_to16_from8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: sub sp, sp, #32
|
|
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
|
; CHECK-NEXT: .cfi_offset w30, -16
|
|
; CHECK-NEXT: bl callee_stack16
|
|
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
|
|
; CHECK-NEXT: add sp, sp, #32
|
|
; CHECK-NEXT: ret
|
|
|
|
; Shouldn't be a tail call: we can't use SP+8 because our caller might
|
|
; have something there. This may sound obvious but implementation does
|
|
; some funky aligning.
|
|
tail call void @callee_stack16([8 x i64] undef, i64 undef, i64 undef)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to8_from24([8 x i64], i64 %a, i64 %b, i64 %c) {
|
|
; CHECK-LABEL: caller_to8_from24:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov w8, #42
|
|
; CHECK-NEXT: str x8, [sp]
|
|
; CHECK-NEXT: b callee_stack8
|
|
|
|
; Reuse our area, putting "42" at incoming sp
|
|
tail call void @callee_stack8([8 x i64] undef, i64 42)
|
|
ret void
|
|
}
|
|
|
|
define dso_local void @caller_to16_from16([8 x i64], i64 %a, i64 %b) {
|
|
; CHECK-LABEL: caller_to16_from16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ldp x8, x9, [sp]
|
|
; CHECK-NEXT: stp x9, x8, [sp]
|
|
; CHECK-NEXT: b callee_stack16
|
|
|
|
; Here we want to make sure that both loads happen before the stores:
|
|
; otherwise either %a or %b will be wrongly clobbered.
|
|
tail call void @callee_stack16([8 x i64] undef, i64 %b, i64 %a)
|
|
ret void
|
|
|
|
|
|
}
|
|
|
|
@func = dso_local global ptr null
|
|
|
|
define dso_local void @indirect_tail() {
|
|
; CHECK-LABEL: indirect_tail:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: adrp x8, func
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ldr x1, [x8, :lo12:func]
|
|
; CHECK-NEXT: br x1
|
|
|
|
%fptr = load ptr, ptr @func
|
|
tail call void %fptr(i32 42)
|
|
ret void
|
|
}
|