
R_RISCV_CALL/R_RISCV_CALL_PLT distinction is not necessary and R_RISCV_CALL has been deprecated. Since https://reviews.llvm.org/D132530 `call foo` assembles to R_RISCV_CALL_PLT. The `@plt` suffix is not useful and can be removed now (matching AArch64 and PowerPC). GNU assembler assembles `call foo` to RISCV_CALL_PLT since 2022-09 (70f35d72ef04cd23771875c1661c9975044a749c). Without this patch, unconditionally changing MO_CALL to MO_PLT could create `jump .L1@plt, a0`, which is invalid in LLVM integrated assembler and GNU assembler.
65 lines
2.1 KiB
LLVM
65 lines
2.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs -enable-machine-outliner -mattr=+m -mtriple=riscv64 < %s | FileCheck %s
|
|
|
|
; Ensure that we won't outline CFIs when they are needed in unwinding.
|
|
|
|
define i32 @func1(i32 %x) #0 {
|
|
; CHECK-LABEL: func1:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: addi sp, sp, -16
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
|
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_offset ra, -8
|
|
; CHECK-NEXT: .cfi_offset s0, -16
|
|
; CHECK-NEXT: mul a0, a0, a0
|
|
; CHECK-NEXT: addi s0, a0, 1
|
|
; CHECK-NEXT: li a0, 4
|
|
; CHECK-NEXT: call __cxa_allocate_exception
|
|
; CHECK-NEXT: sw s0, 0(a0)
|
|
; CHECK-NEXT: lui a1, %hi(_ZTIi)
|
|
; CHECK-NEXT: addi a1, a1, %lo(_ZTIi)
|
|
; CHECK-NEXT: li a2, 0
|
|
; CHECK-NEXT: call __cxa_throw
|
|
entry:
|
|
%mul = mul i32 %x, %x
|
|
%add = add i32 %mul, 1
|
|
%exception = tail call ptr @__cxa_allocate_exception(i64 4)
|
|
store i32 %add, ptr %exception
|
|
tail call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
|
|
unreachable
|
|
}
|
|
|
|
define i32 @func2(i32 %x) #0 {
|
|
; CHECK-LABEL: func2:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: addi sp, sp, -16
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
|
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_offset ra, -8
|
|
; CHECK-NEXT: .cfi_offset s0, -16
|
|
; CHECK-NEXT: mul a0, a0, a0
|
|
; CHECK-NEXT: addi s0, a0, 1
|
|
; CHECK-NEXT: li a0, 4
|
|
; CHECK-NEXT: call __cxa_allocate_exception
|
|
; CHECK-NEXT: sw s0, 0(a0)
|
|
; CHECK-NEXT: lui a1, %hi(_ZTIi)
|
|
; CHECK-NEXT: addi a1, a1, %lo(_ZTIi)
|
|
; CHECK-NEXT: li a2, 0
|
|
; CHECK-NEXT: call __cxa_throw
|
|
entry:
|
|
%mul = mul i32 %x, %x
|
|
%add = add i32 %mul, 1
|
|
%exception = tail call ptr @__cxa_allocate_exception(i64 4)
|
|
store i32 %add, ptr %exception
|
|
tail call void @__cxa_throw(ptr %exception, ptr @_ZTIi, ptr null)
|
|
unreachable
|
|
}
|
|
|
|
@_ZTIi = external constant ptr
|
|
declare ptr @__cxa_allocate_exception(i64)
|
|
declare void @__cxa_throw(ptr, ptr, ptr)
|
|
|
|
attributes #0 = { minsize noreturn }
|