
This reverts commit 50cdf6cbc5035345507bb4d23fcb0292272754eb. This patch causes miscompiles with vector and produces some odd code for ilp32e.
4270 lines
166 KiB
LLVM
4270 lines
166 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
|
|
; RUN: llc -mtriple=riscv32 -mattr=+zcmp -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefixes=RV32IZCMP
|
|
; RUN: llc -mtriple=riscv64 -mattr=+zcmp -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefixes=RV64IZCMP
|
|
; RUN: llc -mtriple=riscv32 -mattr=+zcmp,+save-restore \
|
|
; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV32IZCMP-SR
|
|
; RUN: llc -mtriple=riscv64 -mattr=+zcmp,+save-restore \
|
|
; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV64IZCMP-SR
|
|
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck -check-prefixes=RV32I %s
|
|
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck -check-prefixes=RV64I %s
|
|
|
|
declare void @test(ptr)
|
|
declare void @callee_void(ptr)
|
|
declare i32 @callee(ptr)
|
|
|
|
define i32 @foo() {
|
|
; RV32IZCMP-LABEL: foo:
|
|
; RV32IZCMP: # %bb.0:
|
|
; RV32IZCMP-NEXT: cm.push {ra}, -64
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -4
|
|
; RV32IZCMP-NEXT: addi sp, sp, -464
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 528
|
|
; RV32IZCMP-NEXT: mv a0, sp
|
|
; RV32IZCMP-NEXT: call test
|
|
; RV32IZCMP-NEXT: addi sp, sp, 464
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-NEXT: cm.popretz {ra}, 64
|
|
;
|
|
; RV64IZCMP-LABEL: foo:
|
|
; RV64IZCMP: # %bb.0:
|
|
; RV64IZCMP-NEXT: cm.push {ra}, -64
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV64IZCMP-NEXT: addi sp, sp, -464
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 528
|
|
; RV64IZCMP-NEXT: mv a0, sp
|
|
; RV64IZCMP-NEXT: call test
|
|
; RV64IZCMP-NEXT: addi sp, sp, 464
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-NEXT: cm.popretz {ra}, 64
|
|
;
|
|
; RV32IZCMP-SR-LABEL: foo:
|
|
; RV32IZCMP-SR: # %bb.0:
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra}, -64
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, -464
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
|
|
; RV32IZCMP-SR-NEXT: mv a0, sp
|
|
; RV32IZCMP-SR-NEXT: call test
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, 464
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-SR-NEXT: cm.popretz {ra}, 64
|
|
;
|
|
; RV64IZCMP-SR-LABEL: foo:
|
|
; RV64IZCMP-SR: # %bb.0:
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra}, -64
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, -464
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
|
|
; RV64IZCMP-SR-NEXT: mv a0, sp
|
|
; RV64IZCMP-SR-NEXT: call test
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, 464
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-SR-NEXT: cm.popretz {ra}, 64
|
|
;
|
|
; RV32I-LABEL: foo:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -528
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 528
|
|
; RV32I-NEXT: sw ra, 524(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: addi a0, sp, 12
|
|
; RV32I-NEXT: call test
|
|
; RV32I-NEXT: li a0, 0
|
|
; RV32I-NEXT: lw ra, 524(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: addi sp, sp, 528
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: foo:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -528
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 528
|
|
; RV64I-NEXT: sd ra, 520(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: addi a0, sp, 8
|
|
; RV64I-NEXT: call test
|
|
; RV64I-NEXT: li a0, 0
|
|
; RV64I-NEXT: ld ra, 520(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: addi sp, sp, 528
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
%1 = alloca [512 x i8]
|
|
%2 = getelementptr [512 x i8], ptr %1, i32 0, i32 0
|
|
call void @test(ptr %2)
|
|
ret i32 0
|
|
}
|
|
|
|
define i32 @pushpopret0(i32 signext %size) {
|
|
; RV32IZCMP-LABEL: pushpopret0:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-NEXT: mv sp, a0
|
|
; RV32IZCMP-NEXT: call callee_void
|
|
; RV32IZCMP-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-NEXT: cm.popretz {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-LABEL: pushpopret0:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-NEXT: mv sp, a0
|
|
; RV64IZCMP-NEXT: call callee_void
|
|
; RV64IZCMP-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-NEXT: cm.popretz {ra, s0}, 16
|
|
;
|
|
; RV32IZCMP-SR-LABEL: pushpopret0:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-SR-NEXT: mv sp, a0
|
|
; RV32IZCMP-SR-NEXT: call callee_void
|
|
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-SR-NEXT: cm.popretz {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-SR-LABEL: pushpopret0:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-SR-NEXT: mv sp, a0
|
|
; RV64IZCMP-SR-NEXT: call callee_void
|
|
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-SR-NEXT: cm.popretz {ra, s0}, 16
|
|
;
|
|
; RV32I-LABEL: pushpopret0:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: addi s0, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: addi a0, a0, 15
|
|
; RV32I-NEXT: andi a0, a0, -16
|
|
; RV32I-NEXT: sub a0, sp, a0
|
|
; RV32I-NEXT: mv sp, a0
|
|
; RV32I-NEXT: call callee_void
|
|
; RV32I-NEXT: li a0, 0
|
|
; RV32I-NEXT: addi sp, s0, -16
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: pushpopret0:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: addi s0, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: addi a0, a0, 15
|
|
; RV64I-NEXT: andi a0, a0, -16
|
|
; RV64I-NEXT: sub a0, sp, a0
|
|
; RV64I-NEXT: mv sp, a0
|
|
; RV64I-NEXT: call callee_void
|
|
; RV64I-NEXT: li a0, 0
|
|
; RV64I-NEXT: addi sp, s0, -16
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
entry:
|
|
%0 = alloca i8, i32 %size, align 16
|
|
call void @callee_void(ptr nonnull %0)
|
|
ret i32 0
|
|
}
|
|
|
|
define i32 @pushpopret1(i32 signext %size) {
|
|
; RV32IZCMP-LABEL: pushpopret1:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-NEXT: mv sp, a0
|
|
; RV32IZCMP-NEXT: call callee_void
|
|
; RV32IZCMP-NEXT: li a0, 1
|
|
; RV32IZCMP-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-LABEL: pushpopret1:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-NEXT: mv sp, a0
|
|
; RV64IZCMP-NEXT: call callee_void
|
|
; RV64IZCMP-NEXT: li a0, 1
|
|
; RV64IZCMP-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV32IZCMP-SR-LABEL: pushpopret1:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-SR-NEXT: mv sp, a0
|
|
; RV32IZCMP-SR-NEXT: call callee_void
|
|
; RV32IZCMP-SR-NEXT: li a0, 1
|
|
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-SR-LABEL: pushpopret1:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-SR-NEXT: mv sp, a0
|
|
; RV64IZCMP-SR-NEXT: call callee_void
|
|
; RV64IZCMP-SR-NEXT: li a0, 1
|
|
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV32I-LABEL: pushpopret1:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: addi s0, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: addi a0, a0, 15
|
|
; RV32I-NEXT: andi a0, a0, -16
|
|
; RV32I-NEXT: sub a0, sp, a0
|
|
; RV32I-NEXT: mv sp, a0
|
|
; RV32I-NEXT: call callee_void
|
|
; RV32I-NEXT: li a0, 1
|
|
; RV32I-NEXT: addi sp, s0, -16
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: pushpopret1:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: addi s0, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: addi a0, a0, 15
|
|
; RV64I-NEXT: andi a0, a0, -16
|
|
; RV64I-NEXT: sub a0, sp, a0
|
|
; RV64I-NEXT: mv sp, a0
|
|
; RV64I-NEXT: call callee_void
|
|
; RV64I-NEXT: li a0, 1
|
|
; RV64I-NEXT: addi sp, s0, -16
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
entry:
|
|
%0 = alloca i8, i32 %size, align 16
|
|
call void @callee_void(ptr nonnull %0)
|
|
ret i32 1
|
|
}
|
|
|
|
define i32 @pushpopretneg1(i32 signext %size) {
|
|
; RV32IZCMP-LABEL: pushpopretneg1:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-NEXT: mv sp, a0
|
|
; RV32IZCMP-NEXT: call callee_void
|
|
; RV32IZCMP-NEXT: li a0, -1
|
|
; RV32IZCMP-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-LABEL: pushpopretneg1:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-NEXT: mv sp, a0
|
|
; RV64IZCMP-NEXT: call callee_void
|
|
; RV64IZCMP-NEXT: li a0, -1
|
|
; RV64IZCMP-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV32IZCMP-SR-LABEL: pushpopretneg1:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-SR-NEXT: mv sp, a0
|
|
; RV32IZCMP-SR-NEXT: call callee_void
|
|
; RV32IZCMP-SR-NEXT: li a0, -1
|
|
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-SR-LABEL: pushpopretneg1:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-SR-NEXT: mv sp, a0
|
|
; RV64IZCMP-SR-NEXT: call callee_void
|
|
; RV64IZCMP-SR-NEXT: li a0, -1
|
|
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV32I-LABEL: pushpopretneg1:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: addi s0, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: addi a0, a0, 15
|
|
; RV32I-NEXT: andi a0, a0, -16
|
|
; RV32I-NEXT: sub a0, sp, a0
|
|
; RV32I-NEXT: mv sp, a0
|
|
; RV32I-NEXT: call callee_void
|
|
; RV32I-NEXT: li a0, -1
|
|
; RV32I-NEXT: addi sp, s0, -16
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: pushpopretneg1:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: addi s0, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: addi a0, a0, 15
|
|
; RV64I-NEXT: andi a0, a0, -16
|
|
; RV64I-NEXT: sub a0, sp, a0
|
|
; RV64I-NEXT: mv sp, a0
|
|
; RV64I-NEXT: call callee_void
|
|
; RV64I-NEXT: li a0, -1
|
|
; RV64I-NEXT: addi sp, s0, -16
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
entry:
|
|
%0 = alloca i8, i32 %size, align 16
|
|
call void @callee_void(ptr nonnull %0)
|
|
ret i32 -1
|
|
}
|
|
|
|
define i32 @pushpopret2(i32 signext %size) {
|
|
; RV32IZCMP-LABEL: pushpopret2:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-NEXT: mv sp, a0
|
|
; RV32IZCMP-NEXT: call callee_void
|
|
; RV32IZCMP-NEXT: li a0, 2
|
|
; RV32IZCMP-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-LABEL: pushpopret2:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-NEXT: mv sp, a0
|
|
; RV64IZCMP-NEXT: call callee_void
|
|
; RV64IZCMP-NEXT: li a0, 2
|
|
; RV64IZCMP-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV32IZCMP-SR-LABEL: pushpopret2:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-SR-NEXT: mv sp, a0
|
|
; RV32IZCMP-SR-NEXT: call callee_void
|
|
; RV32IZCMP-SR-NEXT: li a0, 2
|
|
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV64IZCMP-SR-LABEL: pushpopret2:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-SR-NEXT: mv sp, a0
|
|
; RV64IZCMP-SR-NEXT: call callee_void
|
|
; RV64IZCMP-SR-NEXT: li a0, 2
|
|
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
|
|
;
|
|
; RV32I-LABEL: pushpopret2:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: addi s0, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: addi a0, a0, 15
|
|
; RV32I-NEXT: andi a0, a0, -16
|
|
; RV32I-NEXT: sub a0, sp, a0
|
|
; RV32I-NEXT: mv sp, a0
|
|
; RV32I-NEXT: call callee_void
|
|
; RV32I-NEXT: li a0, 2
|
|
; RV32I-NEXT: addi sp, s0, -16
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: pushpopret2:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: addi s0, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: addi a0, a0, 15
|
|
; RV64I-NEXT: andi a0, a0, -16
|
|
; RV64I-NEXT: sub a0, sp, a0
|
|
; RV64I-NEXT: mv sp, a0
|
|
; RV64I-NEXT: call callee_void
|
|
; RV64I-NEXT: li a0, 2
|
|
; RV64I-NEXT: addi sp, s0, -16
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
entry:
|
|
%0 = alloca i8, i32 %size, align 16
|
|
call void @callee_void(ptr nonnull %0)
|
|
ret i32 2
|
|
}
|
|
|
|
define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
|
|
; RV32IZCMP-LABEL: tailcall:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-NEXT: mv sp, a0
|
|
; RV32IZCMP-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
|
|
; RV32IZCMP-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-NEXT: .cfi_restore s0
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-NEXT: tail callee
|
|
;
|
|
; RV64IZCMP-LABEL: tailcall:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-NEXT: mv sp, a0
|
|
; RV64IZCMP-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
|
|
; RV64IZCMP-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-NEXT: .cfi_restore s0
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-NEXT: tail callee
|
|
;
|
|
; RV32IZCMP-SR-LABEL: tailcall:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-SR-NEXT: mv sp, a0
|
|
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s0
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-SR-NEXT: tail callee
|
|
;
|
|
; RV64IZCMP-SR-LABEL: tailcall:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-SR-NEXT: mv sp, a0
|
|
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s0
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-SR-NEXT: tail callee
|
|
;
|
|
; RV32I-LABEL: tailcall:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: addi s0, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: addi a0, a0, 15
|
|
; RV32I-NEXT: andi a0, a0, -16
|
|
; RV32I-NEXT: sub a0, sp, a0
|
|
; RV32I-NEXT: mv sp, a0
|
|
; RV32I-NEXT: addi sp, s0, -16
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: tail callee
|
|
;
|
|
; RV64I-LABEL: tailcall:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: addi s0, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: addi a0, a0, 15
|
|
; RV64I-NEXT: andi a0, a0, -16
|
|
; RV64I-NEXT: sub a0, sp, a0
|
|
; RV64I-NEXT: mv sp, a0
|
|
; RV64I-NEXT: addi sp, s0, -16
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: tail callee
|
|
entry:
|
|
%0 = alloca i8, i32 %size, align 16
|
|
%1 = tail call i32 @callee(ptr nonnull %0)
|
|
ret i32 %1
|
|
}
|
|
|
|
@var = global [5 x i32] zeroinitializer
|
|
define i32 @nocompress(i32 signext %size) {
|
|
; RV32IZCMP-LABEL: nocompress:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0-s8}, -48
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 48
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -40
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -36
|
|
; RV32IZCMP-NEXT: .cfi_offset s1, -32
|
|
; RV32IZCMP-NEXT: .cfi_offset s2, -28
|
|
; RV32IZCMP-NEXT: .cfi_offset s3, -24
|
|
; RV32IZCMP-NEXT: .cfi_offset s4, -20
|
|
; RV32IZCMP-NEXT: .cfi_offset s5, -16
|
|
; RV32IZCMP-NEXT: .cfi_offset s6, -12
|
|
; RV32IZCMP-NEXT: .cfi_offset s7, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s8, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 48
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-NEXT: sub s2, sp, a0
|
|
; RV32IZCMP-NEXT: mv sp, s2
|
|
; RV32IZCMP-NEXT: lui s1, %hi(var)
|
|
; RV32IZCMP-NEXT: lw s3, %lo(var)(s1)
|
|
; RV32IZCMP-NEXT: lw s4, %lo(var+4)(s1)
|
|
; RV32IZCMP-NEXT: lw s5, %lo(var+8)(s1)
|
|
; RV32IZCMP-NEXT: lw s6, %lo(var+12)(s1)
|
|
; RV32IZCMP-NEXT: addi s7, s1, %lo(var)
|
|
; RV32IZCMP-NEXT: lw s8, 16(s7)
|
|
; RV32IZCMP-NEXT: mv a0, s2
|
|
; RV32IZCMP-NEXT: call callee_void
|
|
; RV32IZCMP-NEXT: sw s8, 16(s7)
|
|
; RV32IZCMP-NEXT: sw s6, %lo(var+12)(s1)
|
|
; RV32IZCMP-NEXT: sw s5, %lo(var+8)(s1)
|
|
; RV32IZCMP-NEXT: sw s4, %lo(var+4)(s1)
|
|
; RV32IZCMP-NEXT: sw s3, %lo(var)(s1)
|
|
; RV32IZCMP-NEXT: mv a0, s2
|
|
; RV32IZCMP-NEXT: addi sp, s0, -48
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 48
|
|
; RV32IZCMP-NEXT: cm.pop {ra, s0-s8}, 48
|
|
; RV32IZCMP-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-NEXT: .cfi_restore s0
|
|
; RV32IZCMP-NEXT: .cfi_restore s1
|
|
; RV32IZCMP-NEXT: .cfi_restore s2
|
|
; RV32IZCMP-NEXT: .cfi_restore s3
|
|
; RV32IZCMP-NEXT: .cfi_restore s4
|
|
; RV32IZCMP-NEXT: .cfi_restore s5
|
|
; RV32IZCMP-NEXT: .cfi_restore s6
|
|
; RV32IZCMP-NEXT: .cfi_restore s7
|
|
; RV32IZCMP-NEXT: .cfi_restore s8
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-NEXT: tail callee
|
|
;
|
|
; RV64IZCMP-LABEL: nocompress:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0-s8}, -80
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 80
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -80
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -72
|
|
; RV64IZCMP-NEXT: .cfi_offset s1, -64
|
|
; RV64IZCMP-NEXT: .cfi_offset s2, -56
|
|
; RV64IZCMP-NEXT: .cfi_offset s3, -48
|
|
; RV64IZCMP-NEXT: .cfi_offset s4, -40
|
|
; RV64IZCMP-NEXT: .cfi_offset s5, -32
|
|
; RV64IZCMP-NEXT: .cfi_offset s6, -24
|
|
; RV64IZCMP-NEXT: .cfi_offset s7, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s8, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 80
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-NEXT: sub s2, sp, a0
|
|
; RV64IZCMP-NEXT: mv sp, s2
|
|
; RV64IZCMP-NEXT: lui s1, %hi(var)
|
|
; RV64IZCMP-NEXT: lw s3, %lo(var)(s1)
|
|
; RV64IZCMP-NEXT: lw s4, %lo(var+4)(s1)
|
|
; RV64IZCMP-NEXT: lw s5, %lo(var+8)(s1)
|
|
; RV64IZCMP-NEXT: lw s6, %lo(var+12)(s1)
|
|
; RV64IZCMP-NEXT: addi s7, s1, %lo(var)
|
|
; RV64IZCMP-NEXT: lw s8, 16(s7)
|
|
; RV64IZCMP-NEXT: mv a0, s2
|
|
; RV64IZCMP-NEXT: call callee_void
|
|
; RV64IZCMP-NEXT: sw s8, 16(s7)
|
|
; RV64IZCMP-NEXT: sw s6, %lo(var+12)(s1)
|
|
; RV64IZCMP-NEXT: sw s5, %lo(var+8)(s1)
|
|
; RV64IZCMP-NEXT: sw s4, %lo(var+4)(s1)
|
|
; RV64IZCMP-NEXT: sw s3, %lo(var)(s1)
|
|
; RV64IZCMP-NEXT: mv a0, s2
|
|
; RV64IZCMP-NEXT: addi sp, s0, -80
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 80
|
|
; RV64IZCMP-NEXT: cm.pop {ra, s0-s8}, 80
|
|
; RV64IZCMP-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-NEXT: .cfi_restore s0
|
|
; RV64IZCMP-NEXT: .cfi_restore s1
|
|
; RV64IZCMP-NEXT: .cfi_restore s2
|
|
; RV64IZCMP-NEXT: .cfi_restore s3
|
|
; RV64IZCMP-NEXT: .cfi_restore s4
|
|
; RV64IZCMP-NEXT: .cfi_restore s5
|
|
; RV64IZCMP-NEXT: .cfi_restore s6
|
|
; RV64IZCMP-NEXT: .cfi_restore s7
|
|
; RV64IZCMP-NEXT: .cfi_restore s8
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-NEXT: tail callee
|
|
;
|
|
; RV32IZCMP-SR-LABEL: nocompress:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -48
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -40
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -36
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -32
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s2, -28
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s3, -24
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s4, -20
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s5, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s6, -12
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s7, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s8, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 48
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-SR-NEXT: sub s2, sp, a0
|
|
; RV32IZCMP-SR-NEXT: mv sp, s2
|
|
; RV32IZCMP-SR-NEXT: lui s1, %hi(var)
|
|
; RV32IZCMP-SR-NEXT: lw s3, %lo(var)(s1)
|
|
; RV32IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
|
|
; RV32IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
|
|
; RV32IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
|
|
; RV32IZCMP-SR-NEXT: addi s7, s1, %lo(var)
|
|
; RV32IZCMP-SR-NEXT: lw s8, 16(s7)
|
|
; RV32IZCMP-SR-NEXT: mv a0, s2
|
|
; RV32IZCMP-SR-NEXT: call callee_void
|
|
; RV32IZCMP-SR-NEXT: sw s8, 16(s7)
|
|
; RV32IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
|
|
; RV32IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
|
|
; RV32IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
|
|
; RV32IZCMP-SR-NEXT: sw s3, %lo(var)(s1)
|
|
; RV32IZCMP-SR-NEXT: mv a0, s2
|
|
; RV32IZCMP-SR-NEXT: addi sp, s0, -48
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 48
|
|
; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 48
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s0
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s1
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s2
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s3
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s4
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s5
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s6
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s7
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s8
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-SR-NEXT: tail callee
|
|
;
|
|
; RV64IZCMP-SR-LABEL: nocompress:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -72
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s1, -64
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s2, -56
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s3, -48
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s4, -40
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s5, -32
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s6, -24
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s7, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s8, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 80
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-SR-NEXT: sub s2, sp, a0
|
|
; RV64IZCMP-SR-NEXT: mv sp, s2
|
|
; RV64IZCMP-SR-NEXT: lui s1, %hi(var)
|
|
; RV64IZCMP-SR-NEXT: lw s3, %lo(var)(s1)
|
|
; RV64IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
|
|
; RV64IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
|
|
; RV64IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
|
|
; RV64IZCMP-SR-NEXT: addi s7, s1, %lo(var)
|
|
; RV64IZCMP-SR-NEXT: lw s8, 16(s7)
|
|
; RV64IZCMP-SR-NEXT: mv a0, s2
|
|
; RV64IZCMP-SR-NEXT: call callee_void
|
|
; RV64IZCMP-SR-NEXT: sw s8, 16(s7)
|
|
; RV64IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
|
|
; RV64IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
|
|
; RV64IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
|
|
; RV64IZCMP-SR-NEXT: sw s3, %lo(var)(s1)
|
|
; RV64IZCMP-SR-NEXT: mv a0, s2
|
|
; RV64IZCMP-SR-NEXT: addi sp, s0, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 80
|
|
; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 80
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s0
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s1
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s2
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s3
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s4
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s5
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s6
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s7
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s8
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-SR-NEXT: tail callee
|
|
;
|
|
; RV32I-LABEL: nocompress:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -48
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 48
|
|
; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: .cfi_offset s1, -12
|
|
; RV32I-NEXT: .cfi_offset s2, -16
|
|
; RV32I-NEXT: .cfi_offset s3, -20
|
|
; RV32I-NEXT: .cfi_offset s4, -24
|
|
; RV32I-NEXT: .cfi_offset s5, -28
|
|
; RV32I-NEXT: .cfi_offset s6, -32
|
|
; RV32I-NEXT: .cfi_offset s7, -36
|
|
; RV32I-NEXT: .cfi_offset s8, -40
|
|
; RV32I-NEXT: addi s0, sp, 48
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: addi a0, a0, 15
|
|
; RV32I-NEXT: andi a0, a0, -16
|
|
; RV32I-NEXT: sub s1, sp, a0
|
|
; RV32I-NEXT: mv sp, s1
|
|
; RV32I-NEXT: lui s2, %hi(var)
|
|
; RV32I-NEXT: lw s3, %lo(var)(s2)
|
|
; RV32I-NEXT: lw s4, %lo(var+4)(s2)
|
|
; RV32I-NEXT: lw s5, %lo(var+8)(s2)
|
|
; RV32I-NEXT: lw s6, %lo(var+12)(s2)
|
|
; RV32I-NEXT: addi s7, s2, %lo(var)
|
|
; RV32I-NEXT: lw s8, 16(s7)
|
|
; RV32I-NEXT: mv a0, s1
|
|
; RV32I-NEXT: call callee_void
|
|
; RV32I-NEXT: sw s8, 16(s7)
|
|
; RV32I-NEXT: sw s6, %lo(var+12)(s2)
|
|
; RV32I-NEXT: sw s5, %lo(var+8)(s2)
|
|
; RV32I-NEXT: sw s4, %lo(var+4)(s2)
|
|
; RV32I-NEXT: sw s3, %lo(var)(s2)
|
|
; RV32I-NEXT: mv a0, s1
|
|
; RV32I-NEXT: addi sp, s0, -48
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 48
|
|
; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: .cfi_restore s1
|
|
; RV32I-NEXT: .cfi_restore s2
|
|
; RV32I-NEXT: .cfi_restore s3
|
|
; RV32I-NEXT: .cfi_restore s4
|
|
; RV32I-NEXT: .cfi_restore s5
|
|
; RV32I-NEXT: .cfi_restore s6
|
|
; RV32I-NEXT: .cfi_restore s7
|
|
; RV32I-NEXT: .cfi_restore s8
|
|
; RV32I-NEXT: addi sp, sp, 48
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: tail callee
|
|
;
|
|
; RV64I-LABEL: nocompress:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -80
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 80
|
|
; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s1, 56(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s4, 32(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s5, 24(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s6, 16(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s7, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s8, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: .cfi_offset s1, -24
|
|
; RV64I-NEXT: .cfi_offset s2, -32
|
|
; RV64I-NEXT: .cfi_offset s3, -40
|
|
; RV64I-NEXT: .cfi_offset s4, -48
|
|
; RV64I-NEXT: .cfi_offset s5, -56
|
|
; RV64I-NEXT: .cfi_offset s6, -64
|
|
; RV64I-NEXT: .cfi_offset s7, -72
|
|
; RV64I-NEXT: .cfi_offset s8, -80
|
|
; RV64I-NEXT: addi s0, sp, 80
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: addi a0, a0, 15
|
|
; RV64I-NEXT: andi a0, a0, -16
|
|
; RV64I-NEXT: sub s1, sp, a0
|
|
; RV64I-NEXT: mv sp, s1
|
|
; RV64I-NEXT: lui s2, %hi(var)
|
|
; RV64I-NEXT: lw s3, %lo(var)(s2)
|
|
; RV64I-NEXT: lw s4, %lo(var+4)(s2)
|
|
; RV64I-NEXT: lw s5, %lo(var+8)(s2)
|
|
; RV64I-NEXT: lw s6, %lo(var+12)(s2)
|
|
; RV64I-NEXT: addi s7, s2, %lo(var)
|
|
; RV64I-NEXT: lw s8, 16(s7)
|
|
; RV64I-NEXT: mv a0, s1
|
|
; RV64I-NEXT: call callee_void
|
|
; RV64I-NEXT: sw s8, 16(s7)
|
|
; RV64I-NEXT: sw s6, %lo(var+12)(s2)
|
|
; RV64I-NEXT: sw s5, %lo(var+8)(s2)
|
|
; RV64I-NEXT: sw s4, %lo(var+4)(s2)
|
|
; RV64I-NEXT: sw s3, %lo(var)(s2)
|
|
; RV64I-NEXT: mv a0, s1
|
|
; RV64I-NEXT: addi sp, s0, -80
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 80
|
|
; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s2, 48(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s3, 40(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s4, 32(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s5, 24(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s6, 16(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s7, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s8, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: .cfi_restore s1
|
|
; RV64I-NEXT: .cfi_restore s2
|
|
; RV64I-NEXT: .cfi_restore s3
|
|
; RV64I-NEXT: .cfi_restore s4
|
|
; RV64I-NEXT: .cfi_restore s5
|
|
; RV64I-NEXT: .cfi_restore s6
|
|
; RV64I-NEXT: .cfi_restore s7
|
|
; RV64I-NEXT: .cfi_restore s8
|
|
; RV64I-NEXT: addi sp, sp, 80
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: tail callee
|
|
entry:
|
|
%0 = alloca i8, i32 %size, align 16
|
|
%val = load [5 x i32], ptr @var
|
|
call void @callee_void(ptr nonnull %0)
|
|
store volatile [5 x i32] %val, ptr @var
|
|
%1 = tail call i32 @callee(ptr nonnull %0)
|
|
ret i32 %1
|
|
}
|
|
|
|
; Check that functions with varargs do not use save/restore code
|
|
|
|
declare void @llvm.va_start(ptr)
|
|
declare void @llvm.va_end(ptr)
|
|
|
|
define i32 @varargs(ptr %fmt, ...) {
|
|
; RV32IZCMP-LABEL: varargs:
|
|
; RV32IZCMP: # %bb.0:
|
|
; RV32IZCMP-NEXT: addi sp, sp, -48
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 48
|
|
; RV32IZCMP-NEXT: mv a0, a1
|
|
; RV32IZCMP-NEXT: sw a5, 36(sp)
|
|
; RV32IZCMP-NEXT: sw a6, 40(sp)
|
|
; RV32IZCMP-NEXT: sw a7, 44(sp)
|
|
; RV32IZCMP-NEXT: sw a1, 20(sp)
|
|
; RV32IZCMP-NEXT: sw a2, 24(sp)
|
|
; RV32IZCMP-NEXT: sw a3, 28(sp)
|
|
; RV32IZCMP-NEXT: sw a4, 32(sp)
|
|
; RV32IZCMP-NEXT: addi a1, sp, 24
|
|
; RV32IZCMP-NEXT: sw a1, 12(sp)
|
|
; RV32IZCMP-NEXT: addi sp, sp, 48
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-NEXT: ret
|
|
;
|
|
; RV64IZCMP-LABEL: varargs:
|
|
; RV64IZCMP: # %bb.0:
|
|
; RV64IZCMP-NEXT: addi sp, sp, -80
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 80
|
|
; RV64IZCMP-NEXT: sd a1, 24(sp)
|
|
; RV64IZCMP-NEXT: addi a0, sp, 28
|
|
; RV64IZCMP-NEXT: sd a0, 8(sp)
|
|
; RV64IZCMP-NEXT: lw a0, 24(sp)
|
|
; RV64IZCMP-NEXT: sd a5, 56(sp)
|
|
; RV64IZCMP-NEXT: sd a6, 64(sp)
|
|
; RV64IZCMP-NEXT: sd a7, 72(sp)
|
|
; RV64IZCMP-NEXT: sd a2, 32(sp)
|
|
; RV64IZCMP-NEXT: sd a3, 40(sp)
|
|
; RV64IZCMP-NEXT: sd a4, 48(sp)
|
|
; RV64IZCMP-NEXT: addi sp, sp, 80
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-NEXT: ret
|
|
;
|
|
; RV32IZCMP-SR-LABEL: varargs:
|
|
; RV32IZCMP-SR: # %bb.0:
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, -48
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
|
|
; RV32IZCMP-SR-NEXT: mv a0, a1
|
|
; RV32IZCMP-SR-NEXT: sw a5, 36(sp)
|
|
; RV32IZCMP-SR-NEXT: sw a6, 40(sp)
|
|
; RV32IZCMP-SR-NEXT: sw a7, 44(sp)
|
|
; RV32IZCMP-SR-NEXT: sw a1, 20(sp)
|
|
; RV32IZCMP-SR-NEXT: sw a2, 24(sp)
|
|
; RV32IZCMP-SR-NEXT: sw a3, 28(sp)
|
|
; RV32IZCMP-SR-NEXT: sw a4, 32(sp)
|
|
; RV32IZCMP-SR-NEXT: addi a1, sp, 24
|
|
; RV32IZCMP-SR-NEXT: sw a1, 12(sp)
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, 48
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-SR-NEXT: ret
|
|
;
|
|
; RV64IZCMP-SR-LABEL: varargs:
|
|
; RV64IZCMP-SR: # %bb.0:
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
|
|
; RV64IZCMP-SR-NEXT: sd a1, 24(sp)
|
|
; RV64IZCMP-SR-NEXT: addi a0, sp, 28
|
|
; RV64IZCMP-SR-NEXT: sd a0, 8(sp)
|
|
; RV64IZCMP-SR-NEXT: lw a0, 24(sp)
|
|
; RV64IZCMP-SR-NEXT: sd a5, 56(sp)
|
|
; RV64IZCMP-SR-NEXT: sd a6, 64(sp)
|
|
; RV64IZCMP-SR-NEXT: sd a7, 72(sp)
|
|
; RV64IZCMP-SR-NEXT: sd a2, 32(sp)
|
|
; RV64IZCMP-SR-NEXT: sd a3, 40(sp)
|
|
; RV64IZCMP-SR-NEXT: sd a4, 48(sp)
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, 80
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-SR-NEXT: ret
|
|
;
|
|
; RV32I-LABEL: varargs:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -48
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 48
|
|
; RV32I-NEXT: mv a0, a1
|
|
; RV32I-NEXT: sw a5, 36(sp)
|
|
; RV32I-NEXT: sw a6, 40(sp)
|
|
; RV32I-NEXT: sw a7, 44(sp)
|
|
; RV32I-NEXT: sw a1, 20(sp)
|
|
; RV32I-NEXT: sw a2, 24(sp)
|
|
; RV32I-NEXT: sw a3, 28(sp)
|
|
; RV32I-NEXT: sw a4, 32(sp)
|
|
; RV32I-NEXT: addi a1, sp, 24
|
|
; RV32I-NEXT: sw a1, 12(sp)
|
|
; RV32I-NEXT: addi sp, sp, 48
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: varargs:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -80
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 80
|
|
; RV64I-NEXT: sd a1, 24(sp)
|
|
; RV64I-NEXT: addi a0, sp, 28
|
|
; RV64I-NEXT: sd a0, 8(sp)
|
|
; RV64I-NEXT: lw a0, 24(sp)
|
|
; RV64I-NEXT: sd a5, 56(sp)
|
|
; RV64I-NEXT: sd a6, 64(sp)
|
|
; RV64I-NEXT: sd a7, 72(sp)
|
|
; RV64I-NEXT: sd a2, 32(sp)
|
|
; RV64I-NEXT: sd a3, 40(sp)
|
|
; RV64I-NEXT: sd a4, 48(sp)
|
|
; RV64I-NEXT: addi sp, sp, 80
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
%va = alloca ptr
|
|
call void @llvm.va_start(ptr %va)
|
|
%argp.cur = load ptr, ptr %va
|
|
%argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
|
|
store ptr %argp.next, ptr %va
|
|
%1 = load i32, ptr %argp.cur
|
|
call void @llvm.va_end(ptr %va)
|
|
ret i32 %1
|
|
}
|
|
|
|
@var0 = global [18 x i32] zeroinitializer
|
|
|
|
define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) {
|
|
; RV32IZCMP-LABEL: many_args:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0-s4}, -32
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 32
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -20
|
|
; RV32IZCMP-NEXT: .cfi_offset s1, -16
|
|
; RV32IZCMP-NEXT: .cfi_offset s2, -12
|
|
; RV32IZCMP-NEXT: .cfi_offset s3, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s4, -4
|
|
; RV32IZCMP-NEXT: lui a0, %hi(var0)
|
|
; RV32IZCMP-NEXT: lw a6, %lo(var0)(a0)
|
|
; RV32IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
|
|
; RV32IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
|
|
; RV32IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
|
|
; RV32IZCMP-NEXT: addi a5, a0, %lo(var0)
|
|
; RV32IZCMP-NEXT: lw t2, 16(a5)
|
|
; RV32IZCMP-NEXT: lw t3, 20(a5)
|
|
; RV32IZCMP-NEXT: lw t4, 24(a5)
|
|
; RV32IZCMP-NEXT: lw t5, 28(a5)
|
|
; RV32IZCMP-NEXT: lw t6, 48(a5)
|
|
; RV32IZCMP-NEXT: lw s2, 52(a5)
|
|
; RV32IZCMP-NEXT: lw a3, 56(a5)
|
|
; RV32IZCMP-NEXT: lw a4, 60(a5)
|
|
; RV32IZCMP-NEXT: lw a1, 64(a5)
|
|
; RV32IZCMP-NEXT: lw s0, 68(a5)
|
|
; RV32IZCMP-NEXT: lw s3, 32(a5)
|
|
; RV32IZCMP-NEXT: lw s4, 36(a5)
|
|
; RV32IZCMP-NEXT: lw s1, 40(a5)
|
|
; RV32IZCMP-NEXT: lw a2, 44(a5)
|
|
; RV32IZCMP-NEXT: sw s0, 68(a5)
|
|
; RV32IZCMP-NEXT: sw a1, 64(a5)
|
|
; RV32IZCMP-NEXT: sw a4, 60(a5)
|
|
; RV32IZCMP-NEXT: sw a3, 56(a5)
|
|
; RV32IZCMP-NEXT: sw s2, 52(a5)
|
|
; RV32IZCMP-NEXT: sw t6, 48(a5)
|
|
; RV32IZCMP-NEXT: sw a2, 44(a5)
|
|
; RV32IZCMP-NEXT: sw s1, 40(a5)
|
|
; RV32IZCMP-NEXT: sw s4, 36(a5)
|
|
; RV32IZCMP-NEXT: sw s3, 32(a5)
|
|
; RV32IZCMP-NEXT: sw t5, 28(a5)
|
|
; RV32IZCMP-NEXT: sw t4, 24(a5)
|
|
; RV32IZCMP-NEXT: sw t3, 20(a5)
|
|
; RV32IZCMP-NEXT: sw t2, 16(a5)
|
|
; RV32IZCMP-NEXT: sw t1, %lo(var0+12)(a0)
|
|
; RV32IZCMP-NEXT: sw t0, %lo(var0+8)(a0)
|
|
; RV32IZCMP-NEXT: sw a7, %lo(var0+4)(a0)
|
|
; RV32IZCMP-NEXT: sw a6, %lo(var0)(a0)
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0-s4}, 32
|
|
;
|
|
; RV64IZCMP-LABEL: many_args:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0-s4}, -48
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 48
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -40
|
|
; RV64IZCMP-NEXT: .cfi_offset s1, -32
|
|
; RV64IZCMP-NEXT: .cfi_offset s2, -24
|
|
; RV64IZCMP-NEXT: .cfi_offset s3, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s4, -8
|
|
; RV64IZCMP-NEXT: lui a0, %hi(var0)
|
|
; RV64IZCMP-NEXT: lw a6, %lo(var0)(a0)
|
|
; RV64IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
|
|
; RV64IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
|
|
; RV64IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
|
|
; RV64IZCMP-NEXT: addi a5, a0, %lo(var0)
|
|
; RV64IZCMP-NEXT: lw t2, 16(a5)
|
|
; RV64IZCMP-NEXT: lw t3, 20(a5)
|
|
; RV64IZCMP-NEXT: lw t4, 24(a5)
|
|
; RV64IZCMP-NEXT: lw t5, 28(a5)
|
|
; RV64IZCMP-NEXT: lw t6, 48(a5)
|
|
; RV64IZCMP-NEXT: lw s2, 52(a5)
|
|
; RV64IZCMP-NEXT: lw a3, 56(a5)
|
|
; RV64IZCMP-NEXT: lw a4, 60(a5)
|
|
; RV64IZCMP-NEXT: lw a1, 64(a5)
|
|
; RV64IZCMP-NEXT: lw s0, 68(a5)
|
|
; RV64IZCMP-NEXT: lw s3, 32(a5)
|
|
; RV64IZCMP-NEXT: lw s4, 36(a5)
|
|
; RV64IZCMP-NEXT: lw s1, 40(a5)
|
|
; RV64IZCMP-NEXT: lw a2, 44(a5)
|
|
; RV64IZCMP-NEXT: sw s0, 68(a5)
|
|
; RV64IZCMP-NEXT: sw a1, 64(a5)
|
|
; RV64IZCMP-NEXT: sw a4, 60(a5)
|
|
; RV64IZCMP-NEXT: sw a3, 56(a5)
|
|
; RV64IZCMP-NEXT: sw s2, 52(a5)
|
|
; RV64IZCMP-NEXT: sw t6, 48(a5)
|
|
; RV64IZCMP-NEXT: sw a2, 44(a5)
|
|
; RV64IZCMP-NEXT: sw s1, 40(a5)
|
|
; RV64IZCMP-NEXT: sw s4, 36(a5)
|
|
; RV64IZCMP-NEXT: sw s3, 32(a5)
|
|
; RV64IZCMP-NEXT: sw t5, 28(a5)
|
|
; RV64IZCMP-NEXT: sw t4, 24(a5)
|
|
; RV64IZCMP-NEXT: sw t3, 20(a5)
|
|
; RV64IZCMP-NEXT: sw t2, 16(a5)
|
|
; RV64IZCMP-NEXT: sw t1, %lo(var0+12)(a0)
|
|
; RV64IZCMP-NEXT: sw t0, %lo(var0+8)(a0)
|
|
; RV64IZCMP-NEXT: sw a7, %lo(var0+4)(a0)
|
|
; RV64IZCMP-NEXT: sw a6, %lo(var0)(a0)
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0-s4}, 48
|
|
;
|
|
; RV32IZCMP-SR-LABEL: many_args:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s4}, -32
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -20
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s2, -12
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s3, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s4, -4
|
|
; RV32IZCMP-SR-NEXT: lui a0, %hi(var0)
|
|
; RV32IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
|
|
; RV32IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
|
|
; RV32IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
|
|
; RV32IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
|
|
; RV32IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
|
|
; RV32IZCMP-SR-NEXT: lw t2, 16(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t3, 20(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t6, 48(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s2, 52(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a3, 56(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a4, 60(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a1, 64(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s0, 68(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s3, 32(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s4, 36(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s1, 40(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a2, 44(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s0, 68(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a1, 64(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a4, 60(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a3, 56(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s2, 52(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t6, 48(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a2, 44(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s1, 40(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s4, 36(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s3, 32(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t3, 20(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t2, 16(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t1, %lo(var0+12)(a0)
|
|
; RV32IZCMP-SR-NEXT: sw t0, %lo(var0+8)(a0)
|
|
; RV32IZCMP-SR-NEXT: sw a7, %lo(var0+4)(a0)
|
|
; RV32IZCMP-SR-NEXT: sw a6, %lo(var0)(a0)
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s4}, 32
|
|
;
|
|
; RV64IZCMP-SR-LABEL: many_args:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s4}, -48
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -40
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s1, -32
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s2, -24
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s3, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s4, -8
|
|
; RV64IZCMP-SR-NEXT: lui a0, %hi(var0)
|
|
; RV64IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
|
|
; RV64IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
|
|
; RV64IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
|
|
; RV64IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
|
|
; RV64IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
|
|
; RV64IZCMP-SR-NEXT: lw t2, 16(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t3, 20(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t6, 48(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s2, 52(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a3, 56(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a4, 60(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a1, 64(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s0, 68(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s3, 32(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s4, 36(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s1, 40(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a2, 44(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s0, 68(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a1, 64(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a4, 60(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a3, 56(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s2, 52(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t6, 48(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a2, 44(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s1, 40(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s4, 36(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s3, 32(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t3, 20(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t2, 16(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t1, %lo(var0+12)(a0)
|
|
; RV64IZCMP-SR-NEXT: sw t0, %lo(var0+8)(a0)
|
|
; RV64IZCMP-SR-NEXT: sw a7, %lo(var0+4)(a0)
|
|
; RV64IZCMP-SR-NEXT: sw a6, %lo(var0)(a0)
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s4}, 48
|
|
;
|
|
; RV32I-LABEL: many_args:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -32
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 32
|
|
; RV32I-NEXT: sw s0, 28(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s1, 24(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s4, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset s0, -4
|
|
; RV32I-NEXT: .cfi_offset s1, -8
|
|
; RV32I-NEXT: .cfi_offset s2, -12
|
|
; RV32I-NEXT: .cfi_offset s3, -16
|
|
; RV32I-NEXT: .cfi_offset s4, -20
|
|
; RV32I-NEXT: lui a0, %hi(var0)
|
|
; RV32I-NEXT: lw a1, %lo(var0)(a0)
|
|
; RV32I-NEXT: lw a2, %lo(var0+4)(a0)
|
|
; RV32I-NEXT: lw a3, %lo(var0+8)(a0)
|
|
; RV32I-NEXT: lw a4, %lo(var0+12)(a0)
|
|
; RV32I-NEXT: addi a5, a0, %lo(var0)
|
|
; RV32I-NEXT: lw a6, 16(a5)
|
|
; RV32I-NEXT: lw a7, 20(a5)
|
|
; RV32I-NEXT: lw t0, 24(a5)
|
|
; RV32I-NEXT: lw t1, 28(a5)
|
|
; RV32I-NEXT: lw t2, 48(a5)
|
|
; RV32I-NEXT: lw t3, 52(a5)
|
|
; RV32I-NEXT: lw t4, 56(a5)
|
|
; RV32I-NEXT: lw t5, 60(a5)
|
|
; RV32I-NEXT: lw t6, 64(a5)
|
|
; RV32I-NEXT: lw s0, 68(a5)
|
|
; RV32I-NEXT: lw s1, 32(a5)
|
|
; RV32I-NEXT: lw s2, 36(a5)
|
|
; RV32I-NEXT: lw s3, 40(a5)
|
|
; RV32I-NEXT: lw s4, 44(a5)
|
|
; RV32I-NEXT: sw s0, 68(a5)
|
|
; RV32I-NEXT: sw t6, 64(a5)
|
|
; RV32I-NEXT: sw t5, 60(a5)
|
|
; RV32I-NEXT: sw t4, 56(a5)
|
|
; RV32I-NEXT: sw t3, 52(a5)
|
|
; RV32I-NEXT: sw t2, 48(a5)
|
|
; RV32I-NEXT: sw s4, 44(a5)
|
|
; RV32I-NEXT: sw s3, 40(a5)
|
|
; RV32I-NEXT: sw s2, 36(a5)
|
|
; RV32I-NEXT: sw s1, 32(a5)
|
|
; RV32I-NEXT: sw t1, 28(a5)
|
|
; RV32I-NEXT: sw t0, 24(a5)
|
|
; RV32I-NEXT: sw a7, 20(a5)
|
|
; RV32I-NEXT: sw a6, 16(a5)
|
|
; RV32I-NEXT: sw a4, %lo(var0+12)(a0)
|
|
; RV32I-NEXT: sw a3, %lo(var0+8)(a0)
|
|
; RV32I-NEXT: sw a2, %lo(var0+4)(a0)
|
|
; RV32I-NEXT: sw a1, %lo(var0)(a0)
|
|
; RV32I-NEXT: lw s0, 28(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s1, 24(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s4, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: .cfi_restore s1
|
|
; RV32I-NEXT: .cfi_restore s2
|
|
; RV32I-NEXT: .cfi_restore s3
|
|
; RV32I-NEXT: .cfi_restore s4
|
|
; RV32I-NEXT: addi sp, sp, 32
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: many_args:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -48
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 48
|
|
; RV64I-NEXT: sd s0, 40(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s1, 32(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s2, 24(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s3, 16(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s4, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset s0, -8
|
|
; RV64I-NEXT: .cfi_offset s1, -16
|
|
; RV64I-NEXT: .cfi_offset s2, -24
|
|
; RV64I-NEXT: .cfi_offset s3, -32
|
|
; RV64I-NEXT: .cfi_offset s4, -40
|
|
; RV64I-NEXT: lui a0, %hi(var0)
|
|
; RV64I-NEXT: lw a1, %lo(var0)(a0)
|
|
; RV64I-NEXT: lw a2, %lo(var0+4)(a0)
|
|
; RV64I-NEXT: lw a3, %lo(var0+8)(a0)
|
|
; RV64I-NEXT: lw a4, %lo(var0+12)(a0)
|
|
; RV64I-NEXT: addi a5, a0, %lo(var0)
|
|
; RV64I-NEXT: lw a6, 16(a5)
|
|
; RV64I-NEXT: lw a7, 20(a5)
|
|
; RV64I-NEXT: lw t0, 24(a5)
|
|
; RV64I-NEXT: lw t1, 28(a5)
|
|
; RV64I-NEXT: lw t2, 48(a5)
|
|
; RV64I-NEXT: lw t3, 52(a5)
|
|
; RV64I-NEXT: lw t4, 56(a5)
|
|
; RV64I-NEXT: lw t5, 60(a5)
|
|
; RV64I-NEXT: lw t6, 64(a5)
|
|
; RV64I-NEXT: lw s0, 68(a5)
|
|
; RV64I-NEXT: lw s1, 32(a5)
|
|
; RV64I-NEXT: lw s2, 36(a5)
|
|
; RV64I-NEXT: lw s3, 40(a5)
|
|
; RV64I-NEXT: lw s4, 44(a5)
|
|
; RV64I-NEXT: sw s0, 68(a5)
|
|
; RV64I-NEXT: sw t6, 64(a5)
|
|
; RV64I-NEXT: sw t5, 60(a5)
|
|
; RV64I-NEXT: sw t4, 56(a5)
|
|
; RV64I-NEXT: sw t3, 52(a5)
|
|
; RV64I-NEXT: sw t2, 48(a5)
|
|
; RV64I-NEXT: sw s4, 44(a5)
|
|
; RV64I-NEXT: sw s3, 40(a5)
|
|
; RV64I-NEXT: sw s2, 36(a5)
|
|
; RV64I-NEXT: sw s1, 32(a5)
|
|
; RV64I-NEXT: sw t1, 28(a5)
|
|
; RV64I-NEXT: sw t0, 24(a5)
|
|
; RV64I-NEXT: sw a7, 20(a5)
|
|
; RV64I-NEXT: sw a6, 16(a5)
|
|
; RV64I-NEXT: sw a4, %lo(var0+12)(a0)
|
|
; RV64I-NEXT: sw a3, %lo(var0+8)(a0)
|
|
; RV64I-NEXT: sw a2, %lo(var0+4)(a0)
|
|
; RV64I-NEXT: sw a1, %lo(var0)(a0)
|
|
; RV64I-NEXT: ld s0, 40(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s1, 32(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s2, 24(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s3, 16(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s4, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: .cfi_restore s1
|
|
; RV64I-NEXT: .cfi_restore s2
|
|
; RV64I-NEXT: .cfi_restore s3
|
|
; RV64I-NEXT: .cfi_restore s4
|
|
; RV64I-NEXT: addi sp, sp, 48
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
entry:
|
|
%val = load [18 x i32], ptr @var0
|
|
store volatile [18 x i32] %val, ptr @var0
|
|
ret void
|
|
}
|
|
|
|
; Check that dynamic allocation calculations remain correct
|
|
|
|
declare ptr @llvm.stacksave()
|
|
declare void @llvm.stackrestore(ptr)
|
|
declare void @notdead(ptr)
|
|
|
|
define void @alloca(i32 %n) {
|
|
; RV32IZCMP-LABEL: alloca:
|
|
; RV32IZCMP: # %bb.0:
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -12
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s1, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: mv s1, sp
|
|
; RV32IZCMP-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-NEXT: mv sp, a0
|
|
; RV32IZCMP-NEXT: call notdead
|
|
; RV32IZCMP-NEXT: mv sp, s1
|
|
; RV32IZCMP-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 16
|
|
;
|
|
; RV64IZCMP-LABEL: alloca:
|
|
; RV64IZCMP: # %bb.0:
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -32
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 32
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -24
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s1, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 32
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: mv s1, sp
|
|
; RV64IZCMP-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-NEXT: mv sp, a0
|
|
; RV64IZCMP-NEXT: call notdead
|
|
; RV64IZCMP-NEXT: mv sp, s1
|
|
; RV64IZCMP-NEXT: addi sp, s0, -32
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 32
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
|
|
;
|
|
; RV32IZCMP-SR-LABEL: alloca:
|
|
; RV32IZCMP-SR: # %bb.0:
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -12
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: mv s1, sp
|
|
; RV32IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV32IZCMP-SR-NEXT: mv sp, a0
|
|
; RV32IZCMP-SR-NEXT: call notdead
|
|
; RV32IZCMP-SR-NEXT: mv sp, s1
|
|
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 16
|
|
;
|
|
; RV64IZCMP-SR-LABEL: alloca:
|
|
; RV64IZCMP-SR: # %bb.0:
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -24
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s1, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 32
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: mv s1, sp
|
|
; RV64IZCMP-SR-NEXT: slli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: srli a0, a0, 32
|
|
; RV64IZCMP-SR-NEXT: addi a0, a0, 15
|
|
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
|
|
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
|
|
; RV64IZCMP-SR-NEXT: mv sp, a0
|
|
; RV64IZCMP-SR-NEXT: call notdead
|
|
; RV64IZCMP-SR-NEXT: mv sp, s1
|
|
; RV64IZCMP-SR-NEXT: addi sp, s0, -32
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 32
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
|
|
;
|
|
; RV32I-LABEL: alloca:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: .cfi_offset s1, -12
|
|
; RV32I-NEXT: addi s0, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: mv s1, sp
|
|
; RV32I-NEXT: addi a0, a0, 15
|
|
; RV32I-NEXT: andi a0, a0, -16
|
|
; RV32I-NEXT: sub a0, sp, a0
|
|
; RV32I-NEXT: mv sp, a0
|
|
; RV32I-NEXT: call notdead
|
|
; RV32I-NEXT: mv sp, s1
|
|
; RV32I-NEXT: addi sp, s0, -16
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: .cfi_restore s1
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: alloca:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -32
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 32
|
|
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: .cfi_offset s1, -24
|
|
; RV64I-NEXT: addi s0, sp, 32
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: mv s1, sp
|
|
; RV64I-NEXT: slli a0, a0, 32
|
|
; RV64I-NEXT: srli a0, a0, 32
|
|
; RV64I-NEXT: addi a0, a0, 15
|
|
; RV64I-NEXT: andi a0, a0, -16
|
|
; RV64I-NEXT: sub a0, sp, a0
|
|
; RV64I-NEXT: mv sp, a0
|
|
; RV64I-NEXT: call notdead
|
|
; RV64I-NEXT: mv sp, s1
|
|
; RV64I-NEXT: addi sp, s0, -32
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 32
|
|
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: .cfi_restore s1
|
|
; RV64I-NEXT: addi sp, sp, 32
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
%sp = call ptr @llvm.stacksave()
|
|
%addr = alloca i8, i32 %n
|
|
call void @notdead(ptr %addr)
|
|
call void @llvm.stackrestore(ptr %sp)
|
|
ret void
|
|
}
|
|
|
|
declare i32 @foo_test_irq(...)
|
|
@var_test_irq = global [32 x i32] zeroinitializer
|
|
|
|
define void @foo_with_irq() "interrupt"="machine" {
|
|
; RV32IZCMP-LABEL: foo_with_irq:
|
|
; RV32IZCMP: # %bb.0:
|
|
; RV32IZCMP-NEXT: cm.push {ra}, -64
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -4
|
|
; RV32IZCMP-NEXT: addi sp, sp, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 80
|
|
; RV32IZCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: .cfi_offset t0, -20
|
|
; RV32IZCMP-NEXT: .cfi_offset t1, -24
|
|
; RV32IZCMP-NEXT: .cfi_offset t2, -28
|
|
; RV32IZCMP-NEXT: .cfi_offset a0, -32
|
|
; RV32IZCMP-NEXT: .cfi_offset a1, -36
|
|
; RV32IZCMP-NEXT: .cfi_offset a2, -40
|
|
; RV32IZCMP-NEXT: .cfi_offset a3, -44
|
|
; RV32IZCMP-NEXT: .cfi_offset a4, -48
|
|
; RV32IZCMP-NEXT: .cfi_offset a5, -52
|
|
; RV32IZCMP-NEXT: .cfi_offset a6, -56
|
|
; RV32IZCMP-NEXT: .cfi_offset a7, -60
|
|
; RV32IZCMP-NEXT: .cfi_offset t3, -64
|
|
; RV32IZCMP-NEXT: .cfi_offset t4, -68
|
|
; RV32IZCMP-NEXT: .cfi_offset t5, -72
|
|
; RV32IZCMP-NEXT: .cfi_offset t6, -76
|
|
; RV32IZCMP-NEXT: call foo_test_irq
|
|
; RV32IZCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: .cfi_restore t0
|
|
; RV32IZCMP-NEXT: .cfi_restore t1
|
|
; RV32IZCMP-NEXT: .cfi_restore t2
|
|
; RV32IZCMP-NEXT: .cfi_restore a0
|
|
; RV32IZCMP-NEXT: .cfi_restore a1
|
|
; RV32IZCMP-NEXT: .cfi_restore a2
|
|
; RV32IZCMP-NEXT: .cfi_restore a3
|
|
; RV32IZCMP-NEXT: .cfi_restore a4
|
|
; RV32IZCMP-NEXT: .cfi_restore a5
|
|
; RV32IZCMP-NEXT: .cfi_restore a6
|
|
; RV32IZCMP-NEXT: .cfi_restore a7
|
|
; RV32IZCMP-NEXT: .cfi_restore t3
|
|
; RV32IZCMP-NEXT: .cfi_restore t4
|
|
; RV32IZCMP-NEXT: .cfi_restore t5
|
|
; RV32IZCMP-NEXT: .cfi_restore t6
|
|
; RV32IZCMP-NEXT: addi sp, sp, 16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-NEXT: cm.pop {ra}, 64
|
|
; RV32IZCMP-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-NEXT: mret
|
|
;
|
|
; RV64IZCMP-LABEL: foo_with_irq:
|
|
; RV64IZCMP: # %bb.0:
|
|
; RV64IZCMP-NEXT: cm.push {ra}, -64
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV64IZCMP-NEXT: addi sp, sp, -80
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 144
|
|
; RV64IZCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: .cfi_offset t0, -24
|
|
; RV64IZCMP-NEXT: .cfi_offset t1, -32
|
|
; RV64IZCMP-NEXT: .cfi_offset t2, -40
|
|
; RV64IZCMP-NEXT: .cfi_offset a0, -48
|
|
; RV64IZCMP-NEXT: .cfi_offset a1, -56
|
|
; RV64IZCMP-NEXT: .cfi_offset a2, -64
|
|
; RV64IZCMP-NEXT: .cfi_offset a3, -72
|
|
; RV64IZCMP-NEXT: .cfi_offset a4, -80
|
|
; RV64IZCMP-NEXT: .cfi_offset a5, -88
|
|
; RV64IZCMP-NEXT: .cfi_offset a6, -96
|
|
; RV64IZCMP-NEXT: .cfi_offset a7, -104
|
|
; RV64IZCMP-NEXT: .cfi_offset t3, -112
|
|
; RV64IZCMP-NEXT: .cfi_offset t4, -120
|
|
; RV64IZCMP-NEXT: .cfi_offset t5, -128
|
|
; RV64IZCMP-NEXT: .cfi_offset t6, -136
|
|
; RV64IZCMP-NEXT: call foo_test_irq
|
|
; RV64IZCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: .cfi_restore t0
|
|
; RV64IZCMP-NEXT: .cfi_restore t1
|
|
; RV64IZCMP-NEXT: .cfi_restore t2
|
|
; RV64IZCMP-NEXT: .cfi_restore a0
|
|
; RV64IZCMP-NEXT: .cfi_restore a1
|
|
; RV64IZCMP-NEXT: .cfi_restore a2
|
|
; RV64IZCMP-NEXT: .cfi_restore a3
|
|
; RV64IZCMP-NEXT: .cfi_restore a4
|
|
; RV64IZCMP-NEXT: .cfi_restore a5
|
|
; RV64IZCMP-NEXT: .cfi_restore a6
|
|
; RV64IZCMP-NEXT: .cfi_restore a7
|
|
; RV64IZCMP-NEXT: .cfi_restore t3
|
|
; RV64IZCMP-NEXT: .cfi_restore t4
|
|
; RV64IZCMP-NEXT: .cfi_restore t5
|
|
; RV64IZCMP-NEXT: .cfi_restore t6
|
|
; RV64IZCMP-NEXT: addi sp, sp, 80
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-NEXT: cm.pop {ra}, 64
|
|
; RV64IZCMP-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-NEXT: mret
|
|
;
|
|
; RV32IZCMP-SR-LABEL: foo_with_irq:
|
|
; RV32IZCMP-SR: # %bb.0:
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra}, -64
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
|
|
; RV32IZCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t0, -20
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t1, -24
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t2, -28
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a0, -32
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a1, -36
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a2, -40
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a3, -44
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a4, -48
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a5, -52
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a6, -56
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a7, -60
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t3, -64
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t4, -68
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t5, -72
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t6, -76
|
|
; RV32IZCMP-SR-NEXT: call foo_test_irq
|
|
; RV32IZCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t0
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t1
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t2
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a0
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a1
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a2
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a3
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a4
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a5
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a6
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a7
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t3
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t4
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t5
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t6
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-SR-NEXT: cm.pop {ra}, 64
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-SR-NEXT: mret
|
|
;
|
|
; RV64IZCMP-SR-LABEL: foo_with_irq:
|
|
; RV64IZCMP-SR: # %bb.0:
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra}, -64
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 144
|
|
; RV64IZCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t0, -24
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t1, -32
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t2, -40
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a0, -48
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a1, -56
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a2, -64
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a3, -72
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a4, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a5, -88
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a6, -96
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a7, -104
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t3, -112
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t4, -120
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t5, -128
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t6, -136
|
|
; RV64IZCMP-SR-NEXT: call foo_test_irq
|
|
; RV64IZCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t0
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t1
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t2
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a0
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a1
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a2
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a3
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a4
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a5
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a6
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a7
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t3
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t4
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t5
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t6
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, 80
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV64IZCMP-SR-NEXT: cm.pop {ra}, 64
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-SR-NEXT: mret
|
|
;
|
|
; RV32I-LABEL: foo_with_irq:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -64
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset t0, -8
|
|
; RV32I-NEXT: .cfi_offset t1, -12
|
|
; RV32I-NEXT: .cfi_offset t2, -16
|
|
; RV32I-NEXT: .cfi_offset a0, -20
|
|
; RV32I-NEXT: .cfi_offset a1, -24
|
|
; RV32I-NEXT: .cfi_offset a2, -28
|
|
; RV32I-NEXT: .cfi_offset a3, -32
|
|
; RV32I-NEXT: .cfi_offset a4, -36
|
|
; RV32I-NEXT: .cfi_offset a5, -40
|
|
; RV32I-NEXT: .cfi_offset a6, -44
|
|
; RV32I-NEXT: .cfi_offset a7, -48
|
|
; RV32I-NEXT: .cfi_offset t3, -52
|
|
; RV32I-NEXT: .cfi_offset t4, -56
|
|
; RV32I-NEXT: .cfi_offset t5, -60
|
|
; RV32I-NEXT: .cfi_offset t6, -64
|
|
; RV32I-NEXT: call foo_test_irq
|
|
; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore t0
|
|
; RV32I-NEXT: .cfi_restore t1
|
|
; RV32I-NEXT: .cfi_restore t2
|
|
; RV32I-NEXT: .cfi_restore a0
|
|
; RV32I-NEXT: .cfi_restore a1
|
|
; RV32I-NEXT: .cfi_restore a2
|
|
; RV32I-NEXT: .cfi_restore a3
|
|
; RV32I-NEXT: .cfi_restore a4
|
|
; RV32I-NEXT: .cfi_restore a5
|
|
; RV32I-NEXT: .cfi_restore a6
|
|
; RV32I-NEXT: .cfi_restore a7
|
|
; RV32I-NEXT: .cfi_restore t3
|
|
; RV32I-NEXT: .cfi_restore t4
|
|
; RV32I-NEXT: .cfi_restore t5
|
|
; RV32I-NEXT: .cfi_restore t6
|
|
; RV32I-NEXT: addi sp, sp, 64
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: mret
|
|
;
|
|
; RV64I-LABEL: foo_with_irq:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -128
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 128
|
|
; RV64I-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset t0, -16
|
|
; RV64I-NEXT: .cfi_offset t1, -24
|
|
; RV64I-NEXT: .cfi_offset t2, -32
|
|
; RV64I-NEXT: .cfi_offset a0, -40
|
|
; RV64I-NEXT: .cfi_offset a1, -48
|
|
; RV64I-NEXT: .cfi_offset a2, -56
|
|
; RV64I-NEXT: .cfi_offset a3, -64
|
|
; RV64I-NEXT: .cfi_offset a4, -72
|
|
; RV64I-NEXT: .cfi_offset a5, -80
|
|
; RV64I-NEXT: .cfi_offset a6, -88
|
|
; RV64I-NEXT: .cfi_offset a7, -96
|
|
; RV64I-NEXT: .cfi_offset t3, -104
|
|
; RV64I-NEXT: .cfi_offset t4, -112
|
|
; RV64I-NEXT: .cfi_offset t5, -120
|
|
; RV64I-NEXT: .cfi_offset t6, -128
|
|
; RV64I-NEXT: call foo_test_irq
|
|
; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore t0
|
|
; RV64I-NEXT: .cfi_restore t1
|
|
; RV64I-NEXT: .cfi_restore t2
|
|
; RV64I-NEXT: .cfi_restore a0
|
|
; RV64I-NEXT: .cfi_restore a1
|
|
; RV64I-NEXT: .cfi_restore a2
|
|
; RV64I-NEXT: .cfi_restore a3
|
|
; RV64I-NEXT: .cfi_restore a4
|
|
; RV64I-NEXT: .cfi_restore a5
|
|
; RV64I-NEXT: .cfi_restore a6
|
|
; RV64I-NEXT: .cfi_restore a7
|
|
; RV64I-NEXT: .cfi_restore t3
|
|
; RV64I-NEXT: .cfi_restore t4
|
|
; RV64I-NEXT: .cfi_restore t5
|
|
; RV64I-NEXT: .cfi_restore t6
|
|
; RV64I-NEXT: addi sp, sp, 128
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: mret
|
|
%call = call i32 @foo_test_irq()
|
|
ret void
|
|
}
|
|
|
|
define void @foo_no_irq() {
|
|
; RV32IZCMP-LABEL: foo_no_irq:
|
|
; RV32IZCMP: # %bb.0:
|
|
; RV32IZCMP-NEXT: cm.push {ra}, -16
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -4
|
|
; RV32IZCMP-NEXT: call foo_test_irq
|
|
; RV32IZCMP-NEXT: cm.popret {ra}, 16
|
|
;
|
|
; RV64IZCMP-LABEL: foo_no_irq:
|
|
; RV64IZCMP: # %bb.0:
|
|
; RV64IZCMP-NEXT: cm.push {ra}, -16
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -8
|
|
; RV64IZCMP-NEXT: call foo_test_irq
|
|
; RV64IZCMP-NEXT: cm.popret {ra}, 16
|
|
;
|
|
; RV32IZCMP-SR-LABEL: foo_no_irq:
|
|
; RV32IZCMP-SR: # %bb.0:
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra}, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
|
|
; RV32IZCMP-SR-NEXT: call foo_test_irq
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra}, 16
|
|
;
|
|
; RV64IZCMP-SR-LABEL: foo_no_irq:
|
|
; RV64IZCMP-SR: # %bb.0:
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra}, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
|
|
; RV64IZCMP-SR-NEXT: call foo_test_irq
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra}, 16
|
|
;
|
|
; RV32I-LABEL: foo_no_irq:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: call foo_test_irq
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: foo_no_irq:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: call foo_test_irq
|
|
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
%call = call i32 @foo_test_irq()
|
|
ret void
|
|
}
|
|
|
|
define void @callee_with_irq() "interrupt"="machine" {
|
|
; RV32IZCMP-LABEL: callee_with_irq:
|
|
; RV32IZCMP: # %bb.0:
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -112
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -52
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -48
|
|
; RV32IZCMP-NEXT: .cfi_offset s1, -44
|
|
; RV32IZCMP-NEXT: .cfi_offset s2, -40
|
|
; RV32IZCMP-NEXT: .cfi_offset s3, -36
|
|
; RV32IZCMP-NEXT: .cfi_offset s4, -32
|
|
; RV32IZCMP-NEXT: .cfi_offset s5, -28
|
|
; RV32IZCMP-NEXT: .cfi_offset s6, -24
|
|
; RV32IZCMP-NEXT: .cfi_offset s7, -20
|
|
; RV32IZCMP-NEXT: .cfi_offset s8, -16
|
|
; RV32IZCMP-NEXT: .cfi_offset s9, -12
|
|
; RV32IZCMP-NEXT: .cfi_offset s10, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s11, -4
|
|
; RV32IZCMP-NEXT: addi sp, sp, -48
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 160
|
|
; RV32IZCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: .cfi_offset t0, -68
|
|
; RV32IZCMP-NEXT: .cfi_offset t1, -72
|
|
; RV32IZCMP-NEXT: .cfi_offset t2, -76
|
|
; RV32IZCMP-NEXT: .cfi_offset a0, -80
|
|
; RV32IZCMP-NEXT: .cfi_offset a1, -84
|
|
; RV32IZCMP-NEXT: .cfi_offset a2, -88
|
|
; RV32IZCMP-NEXT: .cfi_offset a3, -92
|
|
; RV32IZCMP-NEXT: .cfi_offset a4, -96
|
|
; RV32IZCMP-NEXT: .cfi_offset a5, -100
|
|
; RV32IZCMP-NEXT: .cfi_offset a6, -104
|
|
; RV32IZCMP-NEXT: .cfi_offset a7, -108
|
|
; RV32IZCMP-NEXT: .cfi_offset t3, -112
|
|
; RV32IZCMP-NEXT: .cfi_offset t4, -116
|
|
; RV32IZCMP-NEXT: .cfi_offset t5, -120
|
|
; RV32IZCMP-NEXT: .cfi_offset t6, -124
|
|
; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV32IZCMP-NEXT: lw a0, 16(a5)
|
|
; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, 20(a5)
|
|
; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw t4, 24(a5)
|
|
; RV32IZCMP-NEXT: lw t5, 28(a5)
|
|
; RV32IZCMP-NEXT: lw t6, 32(a5)
|
|
; RV32IZCMP-NEXT: lw s2, 36(a5)
|
|
; RV32IZCMP-NEXT: lw s3, 40(a5)
|
|
; RV32IZCMP-NEXT: lw s4, 44(a5)
|
|
; RV32IZCMP-NEXT: lw s5, 48(a5)
|
|
; RV32IZCMP-NEXT: lw s6, 52(a5)
|
|
; RV32IZCMP-NEXT: lw s7, 56(a5)
|
|
; RV32IZCMP-NEXT: lw s8, 60(a5)
|
|
; RV32IZCMP-NEXT: lw s9, 64(a5)
|
|
; RV32IZCMP-NEXT: lw s10, 68(a5)
|
|
; RV32IZCMP-NEXT: lw s11, 72(a5)
|
|
; RV32IZCMP-NEXT: lw ra, 76(a5)
|
|
; RV32IZCMP-NEXT: lw s1, 80(a5)
|
|
; RV32IZCMP-NEXT: lw t3, 84(a5)
|
|
; RV32IZCMP-NEXT: lw t2, 88(a5)
|
|
; RV32IZCMP-NEXT: lw t1, 92(a5)
|
|
; RV32IZCMP-NEXT: lw a7, 112(a5)
|
|
; RV32IZCMP-NEXT: lw s0, 116(a5)
|
|
; RV32IZCMP-NEXT: lw a3, 120(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 124(a5)
|
|
; RV32IZCMP-NEXT: lw a6, 96(a5)
|
|
; RV32IZCMP-NEXT: lw a4, 100(a5)
|
|
; RV32IZCMP-NEXT: lw a2, 104(a5)
|
|
; RV32IZCMP-NEXT: lw a1, 108(a5)
|
|
; RV32IZCMP-NEXT: sw a0, 124(a5)
|
|
; RV32IZCMP-NEXT: sw a3, 120(a5)
|
|
; RV32IZCMP-NEXT: sw s0, 116(a5)
|
|
; RV32IZCMP-NEXT: sw a7, 112(a5)
|
|
; RV32IZCMP-NEXT: sw a1, 108(a5)
|
|
; RV32IZCMP-NEXT: sw a2, 104(a5)
|
|
; RV32IZCMP-NEXT: sw a4, 100(a5)
|
|
; RV32IZCMP-NEXT: sw a6, 96(a5)
|
|
; RV32IZCMP-NEXT: sw t1, 92(a5)
|
|
; RV32IZCMP-NEXT: sw t2, 88(a5)
|
|
; RV32IZCMP-NEXT: sw t3, 84(a5)
|
|
; RV32IZCMP-NEXT: sw s1, 80(a5)
|
|
; RV32IZCMP-NEXT: sw ra, 76(a5)
|
|
; RV32IZCMP-NEXT: sw s11, 72(a5)
|
|
; RV32IZCMP-NEXT: sw s10, 68(a5)
|
|
; RV32IZCMP-NEXT: sw s9, 64(a5)
|
|
; RV32IZCMP-NEXT: sw s8, 60(a5)
|
|
; RV32IZCMP-NEXT: sw s7, 56(a5)
|
|
; RV32IZCMP-NEXT: sw s6, 52(a5)
|
|
; RV32IZCMP-NEXT: sw s5, 48(a5)
|
|
; RV32IZCMP-NEXT: sw s4, 44(a5)
|
|
; RV32IZCMP-NEXT: sw s3, 40(a5)
|
|
; RV32IZCMP-NEXT: sw s2, 36(a5)
|
|
; RV32IZCMP-NEXT: sw t6, 32(a5)
|
|
; RV32IZCMP-NEXT: sw t5, 28(a5)
|
|
; RV32IZCMP-NEXT: sw t4, 24(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, 20(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, 16(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: .cfi_restore t0
|
|
; RV32IZCMP-NEXT: .cfi_restore t1
|
|
; RV32IZCMP-NEXT: .cfi_restore t2
|
|
; RV32IZCMP-NEXT: .cfi_restore a0
|
|
; RV32IZCMP-NEXT: .cfi_restore a1
|
|
; RV32IZCMP-NEXT: .cfi_restore a2
|
|
; RV32IZCMP-NEXT: .cfi_restore a3
|
|
; RV32IZCMP-NEXT: .cfi_restore a4
|
|
; RV32IZCMP-NEXT: .cfi_restore a5
|
|
; RV32IZCMP-NEXT: .cfi_restore a6
|
|
; RV32IZCMP-NEXT: .cfi_restore a7
|
|
; RV32IZCMP-NEXT: .cfi_restore t3
|
|
; RV32IZCMP-NEXT: .cfi_restore t4
|
|
; RV32IZCMP-NEXT: .cfi_restore t5
|
|
; RV32IZCMP-NEXT: .cfi_restore t6
|
|
; RV32IZCMP-NEXT: addi sp, sp, 48
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 112
|
|
; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 112
|
|
; RV32IZCMP-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-NEXT: .cfi_restore s0
|
|
; RV32IZCMP-NEXT: .cfi_restore s1
|
|
; RV32IZCMP-NEXT: .cfi_restore s2
|
|
; RV32IZCMP-NEXT: .cfi_restore s3
|
|
; RV32IZCMP-NEXT: .cfi_restore s4
|
|
; RV32IZCMP-NEXT: .cfi_restore s5
|
|
; RV32IZCMP-NEXT: .cfi_restore s6
|
|
; RV32IZCMP-NEXT: .cfi_restore s7
|
|
; RV32IZCMP-NEXT: .cfi_restore s8
|
|
; RV32IZCMP-NEXT: .cfi_restore s9
|
|
; RV32IZCMP-NEXT: .cfi_restore s10
|
|
; RV32IZCMP-NEXT: .cfi_restore s11
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-NEXT: mret
|
|
;
|
|
; RV64IZCMP-LABEL: callee_with_irq:
|
|
; RV64IZCMP: # %bb.0:
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -104
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -96
|
|
; RV64IZCMP-NEXT: .cfi_offset s1, -88
|
|
; RV64IZCMP-NEXT: .cfi_offset s2, -80
|
|
; RV64IZCMP-NEXT: .cfi_offset s3, -72
|
|
; RV64IZCMP-NEXT: .cfi_offset s4, -64
|
|
; RV64IZCMP-NEXT: .cfi_offset s5, -56
|
|
; RV64IZCMP-NEXT: .cfi_offset s6, -48
|
|
; RV64IZCMP-NEXT: .cfi_offset s7, -40
|
|
; RV64IZCMP-NEXT: .cfi_offset s8, -32
|
|
; RV64IZCMP-NEXT: .cfi_offset s9, -24
|
|
; RV64IZCMP-NEXT: .cfi_offset s10, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s11, -8
|
|
; RV64IZCMP-NEXT: addi sp, sp, -128
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 288
|
|
; RV64IZCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: .cfi_offset t0, -120
|
|
; RV64IZCMP-NEXT: .cfi_offset t1, -128
|
|
; RV64IZCMP-NEXT: .cfi_offset t2, -136
|
|
; RV64IZCMP-NEXT: .cfi_offset a0, -144
|
|
; RV64IZCMP-NEXT: .cfi_offset a1, -152
|
|
; RV64IZCMP-NEXT: .cfi_offset a2, -160
|
|
; RV64IZCMP-NEXT: .cfi_offset a3, -168
|
|
; RV64IZCMP-NEXT: .cfi_offset a4, -176
|
|
; RV64IZCMP-NEXT: .cfi_offset a5, -184
|
|
; RV64IZCMP-NEXT: .cfi_offset a6, -192
|
|
; RV64IZCMP-NEXT: .cfi_offset a7, -200
|
|
; RV64IZCMP-NEXT: .cfi_offset t3, -208
|
|
; RV64IZCMP-NEXT: .cfi_offset t4, -216
|
|
; RV64IZCMP-NEXT: .cfi_offset t5, -224
|
|
; RV64IZCMP-NEXT: .cfi_offset t6, -232
|
|
; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV64IZCMP-NEXT: lw a0, 16(a5)
|
|
; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, 20(a5)
|
|
; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw t4, 24(a5)
|
|
; RV64IZCMP-NEXT: lw t5, 28(a5)
|
|
; RV64IZCMP-NEXT: lw t6, 32(a5)
|
|
; RV64IZCMP-NEXT: lw s2, 36(a5)
|
|
; RV64IZCMP-NEXT: lw s3, 40(a5)
|
|
; RV64IZCMP-NEXT: lw s4, 44(a5)
|
|
; RV64IZCMP-NEXT: lw s5, 48(a5)
|
|
; RV64IZCMP-NEXT: lw s6, 52(a5)
|
|
; RV64IZCMP-NEXT: lw s7, 56(a5)
|
|
; RV64IZCMP-NEXT: lw s8, 60(a5)
|
|
; RV64IZCMP-NEXT: lw s9, 64(a5)
|
|
; RV64IZCMP-NEXT: lw s10, 68(a5)
|
|
; RV64IZCMP-NEXT: lw s11, 72(a5)
|
|
; RV64IZCMP-NEXT: lw ra, 76(a5)
|
|
; RV64IZCMP-NEXT: lw s1, 80(a5)
|
|
; RV64IZCMP-NEXT: lw t3, 84(a5)
|
|
; RV64IZCMP-NEXT: lw t2, 88(a5)
|
|
; RV64IZCMP-NEXT: lw t1, 92(a5)
|
|
; RV64IZCMP-NEXT: lw a7, 112(a5)
|
|
; RV64IZCMP-NEXT: lw s0, 116(a5)
|
|
; RV64IZCMP-NEXT: lw a3, 120(a5)
|
|
; RV64IZCMP-NEXT: lw a0, 124(a5)
|
|
; RV64IZCMP-NEXT: lw a6, 96(a5)
|
|
; RV64IZCMP-NEXT: lw a4, 100(a5)
|
|
; RV64IZCMP-NEXT: lw a2, 104(a5)
|
|
; RV64IZCMP-NEXT: lw a1, 108(a5)
|
|
; RV64IZCMP-NEXT: sw a0, 124(a5)
|
|
; RV64IZCMP-NEXT: sw a3, 120(a5)
|
|
; RV64IZCMP-NEXT: sw s0, 116(a5)
|
|
; RV64IZCMP-NEXT: sw a7, 112(a5)
|
|
; RV64IZCMP-NEXT: sw a1, 108(a5)
|
|
; RV64IZCMP-NEXT: sw a2, 104(a5)
|
|
; RV64IZCMP-NEXT: sw a4, 100(a5)
|
|
; RV64IZCMP-NEXT: sw a6, 96(a5)
|
|
; RV64IZCMP-NEXT: sw t1, 92(a5)
|
|
; RV64IZCMP-NEXT: sw t2, 88(a5)
|
|
; RV64IZCMP-NEXT: sw t3, 84(a5)
|
|
; RV64IZCMP-NEXT: sw s1, 80(a5)
|
|
; RV64IZCMP-NEXT: sw ra, 76(a5)
|
|
; RV64IZCMP-NEXT: sw s11, 72(a5)
|
|
; RV64IZCMP-NEXT: sw s10, 68(a5)
|
|
; RV64IZCMP-NEXT: sw s9, 64(a5)
|
|
; RV64IZCMP-NEXT: sw s8, 60(a5)
|
|
; RV64IZCMP-NEXT: sw s7, 56(a5)
|
|
; RV64IZCMP-NEXT: sw s6, 52(a5)
|
|
; RV64IZCMP-NEXT: sw s5, 48(a5)
|
|
; RV64IZCMP-NEXT: sw s4, 44(a5)
|
|
; RV64IZCMP-NEXT: sw s3, 40(a5)
|
|
; RV64IZCMP-NEXT: sw s2, 36(a5)
|
|
; RV64IZCMP-NEXT: sw t6, 32(a5)
|
|
; RV64IZCMP-NEXT: sw t5, 28(a5)
|
|
; RV64IZCMP-NEXT: sw t4, 24(a5)
|
|
; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, 20(a5)
|
|
; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, 16(a5)
|
|
; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: .cfi_restore t0
|
|
; RV64IZCMP-NEXT: .cfi_restore t1
|
|
; RV64IZCMP-NEXT: .cfi_restore t2
|
|
; RV64IZCMP-NEXT: .cfi_restore a0
|
|
; RV64IZCMP-NEXT: .cfi_restore a1
|
|
; RV64IZCMP-NEXT: .cfi_restore a2
|
|
; RV64IZCMP-NEXT: .cfi_restore a3
|
|
; RV64IZCMP-NEXT: .cfi_restore a4
|
|
; RV64IZCMP-NEXT: .cfi_restore a5
|
|
; RV64IZCMP-NEXT: .cfi_restore a6
|
|
; RV64IZCMP-NEXT: .cfi_restore a7
|
|
; RV64IZCMP-NEXT: .cfi_restore t3
|
|
; RV64IZCMP-NEXT: .cfi_restore t4
|
|
; RV64IZCMP-NEXT: .cfi_restore t5
|
|
; RV64IZCMP-NEXT: .cfi_restore t6
|
|
; RV64IZCMP-NEXT: addi sp, sp, 128
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
|
|
; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
|
|
; RV64IZCMP-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-NEXT: .cfi_restore s0
|
|
; RV64IZCMP-NEXT: .cfi_restore s1
|
|
; RV64IZCMP-NEXT: .cfi_restore s2
|
|
; RV64IZCMP-NEXT: .cfi_restore s3
|
|
; RV64IZCMP-NEXT: .cfi_restore s4
|
|
; RV64IZCMP-NEXT: .cfi_restore s5
|
|
; RV64IZCMP-NEXT: .cfi_restore s6
|
|
; RV64IZCMP-NEXT: .cfi_restore s7
|
|
; RV64IZCMP-NEXT: .cfi_restore s8
|
|
; RV64IZCMP-NEXT: .cfi_restore s9
|
|
; RV64IZCMP-NEXT: .cfi_restore s10
|
|
; RV64IZCMP-NEXT: .cfi_restore s11
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-NEXT: mret
|
|
;
|
|
; RV32IZCMP-SR-LABEL: callee_with_irq:
|
|
; RV32IZCMP-SR: # %bb.0:
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -112
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 112
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -52
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -48
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -44
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s2, -40
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s3, -36
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s4, -32
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s5, -28
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s6, -24
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s7, -20
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s8, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s9, -12
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s10, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, -48
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
|
|
; RV32IZCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t0, -68
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t1, -72
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t2, -76
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a0, -80
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a1, -84
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a2, -88
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a3, -92
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a4, -96
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a5, -100
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a6, -104
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset a7, -108
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t3, -112
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t4, -116
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t5, -120
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset t6, -124
|
|
; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s5, 48(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s6, 52(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s7, 56(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s8, 60(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s9, 64(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s10, 68(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s11, 72(a5)
|
|
; RV32IZCMP-SR-NEXT: lw ra, 76(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s1, 80(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t3, 84(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t2, 88(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a7, 112(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s0, 116(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a3, 120(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a6, 96(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a4, 100(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a2, 104(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a1, 108(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 124(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a3, 120(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s0, 116(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a7, 112(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a1, 108(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a2, 104(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a4, 100(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a6, 96(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t2, 88(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t3, 84(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s1, 80(a5)
|
|
; RV32IZCMP-SR-NEXT: sw ra, 76(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s11, 72(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s10, 68(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s9, 64(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s8, 60(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s7, 56(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s6, 52(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s5, 48(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t0
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t1
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t2
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a0
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a1
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a2
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a3
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a4
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a5
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a6
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore a7
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t3
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t4
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t5
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore t6
|
|
; RV32IZCMP-SR-NEXT: addi sp, sp, 48
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 112
|
|
; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 112
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s0
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s1
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s2
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s3
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s4
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s5
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s6
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s7
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s8
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s9
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s10
|
|
; RV32IZCMP-SR-NEXT: .cfi_restore s11
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32IZCMP-SR-NEXT: mret
|
|
;
|
|
; RV64IZCMP-SR-LABEL: callee_with_irq:
|
|
; RV64IZCMP-SR: # %bb.0:
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -104
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -96
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s1, -88
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s2, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s3, -72
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s4, -64
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s5, -56
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s6, -48
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s7, -40
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s8, -32
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s9, -24
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s10, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, -128
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 288
|
|
; RV64IZCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t0, -120
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t1, -128
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t2, -136
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a0, -144
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a1, -152
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a2, -160
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a3, -168
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a4, -176
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a5, -184
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a6, -192
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset a7, -200
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t3, -208
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t4, -216
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t5, -224
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset t6, -232
|
|
; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s5, 48(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s6, 52(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s7, 56(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s8, 60(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s9, 64(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s10, 68(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s11, 72(a5)
|
|
; RV64IZCMP-SR-NEXT: lw ra, 76(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s1, 80(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t3, 84(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t2, 88(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a7, 112(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s0, 116(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a3, 120(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a6, 96(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a4, 100(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a2, 104(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a1, 108(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a0, 124(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a3, 120(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s0, 116(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a7, 112(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a1, 108(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a2, 104(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a4, 100(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a6, 96(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t2, 88(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t3, 84(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s1, 80(a5)
|
|
; RV64IZCMP-SR-NEXT: sw ra, 76(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s11, 72(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s10, 68(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s9, 64(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s8, 60(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s7, 56(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s6, 52(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s5, 48(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t0
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t1
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t2
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a0
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a1
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a2
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a3
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a4
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a5
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a6
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore a7
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t3
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t4
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t5
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore t6
|
|
; RV64IZCMP-SR-NEXT: addi sp, sp, 128
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
|
|
; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 160
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore ra
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s0
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s1
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s2
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s3
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s4
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s5
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s6
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s7
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s8
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s9
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s10
|
|
; RV64IZCMP-SR-NEXT: .cfi_restore s11
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64IZCMP-SR-NEXT: mret
|
|
;
|
|
; RV32I-LABEL: callee_with_irq:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -144
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 144
|
|
; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t0, 136(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t1, 132(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t2, 128(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 124(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s1, 120(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a0, 116(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a1, 112(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a2, 108(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a3, 104(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a4, 100(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a5, 96(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a6, 92(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw a7, 88(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s2, 84(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s3, 80(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s4, 76(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s5, 72(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s6, 68(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s7, 64(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s8, 60(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s9, 56(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s10, 52(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s11, 48(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset t0, -8
|
|
; RV32I-NEXT: .cfi_offset t1, -12
|
|
; RV32I-NEXT: .cfi_offset t2, -16
|
|
; RV32I-NEXT: .cfi_offset s0, -20
|
|
; RV32I-NEXT: .cfi_offset s1, -24
|
|
; RV32I-NEXT: .cfi_offset a0, -28
|
|
; RV32I-NEXT: .cfi_offset a1, -32
|
|
; RV32I-NEXT: .cfi_offset a2, -36
|
|
; RV32I-NEXT: .cfi_offset a3, -40
|
|
; RV32I-NEXT: .cfi_offset a4, -44
|
|
; RV32I-NEXT: .cfi_offset a5, -48
|
|
; RV32I-NEXT: .cfi_offset a6, -52
|
|
; RV32I-NEXT: .cfi_offset a7, -56
|
|
; RV32I-NEXT: .cfi_offset s2, -60
|
|
; RV32I-NEXT: .cfi_offset s3, -64
|
|
; RV32I-NEXT: .cfi_offset s4, -68
|
|
; RV32I-NEXT: .cfi_offset s5, -72
|
|
; RV32I-NEXT: .cfi_offset s6, -76
|
|
; RV32I-NEXT: .cfi_offset s7, -80
|
|
; RV32I-NEXT: .cfi_offset s8, -84
|
|
; RV32I-NEXT: .cfi_offset s9, -88
|
|
; RV32I-NEXT: .cfi_offset s10, -92
|
|
; RV32I-NEXT: .cfi_offset s11, -96
|
|
; RV32I-NEXT: .cfi_offset t3, -100
|
|
; RV32I-NEXT: .cfi_offset t4, -104
|
|
; RV32I-NEXT: .cfi_offset t5, -108
|
|
; RV32I-NEXT: .cfi_offset t6, -112
|
|
; RV32I-NEXT: lui a7, %hi(var_test_irq)
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
|
|
; RV32I-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
|
|
; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
|
|
; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
|
|
; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: addi a5, a7, %lo(var_test_irq)
|
|
; RV32I-NEXT: lw a0, 16(a5)
|
|
; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, 20(a5)
|
|
; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw t0, 24(a5)
|
|
; RV32I-NEXT: lw t1, 28(a5)
|
|
; RV32I-NEXT: lw t2, 32(a5)
|
|
; RV32I-NEXT: lw t3, 36(a5)
|
|
; RV32I-NEXT: lw t4, 40(a5)
|
|
; RV32I-NEXT: lw t5, 44(a5)
|
|
; RV32I-NEXT: lw t6, 48(a5)
|
|
; RV32I-NEXT: lw s0, 52(a5)
|
|
; RV32I-NEXT: lw s1, 56(a5)
|
|
; RV32I-NEXT: lw s2, 60(a5)
|
|
; RV32I-NEXT: lw s3, 64(a5)
|
|
; RV32I-NEXT: lw s4, 68(a5)
|
|
; RV32I-NEXT: lw s5, 72(a5)
|
|
; RV32I-NEXT: lw s6, 76(a5)
|
|
; RV32I-NEXT: lw s7, 80(a5)
|
|
; RV32I-NEXT: lw s8, 84(a5)
|
|
; RV32I-NEXT: lw s9, 88(a5)
|
|
; RV32I-NEXT: lw s10, 92(a5)
|
|
; RV32I-NEXT: lw s11, 112(a5)
|
|
; RV32I-NEXT: lw ra, 116(a5)
|
|
; RV32I-NEXT: lw a3, 120(a5)
|
|
; RV32I-NEXT: lw a0, 124(a5)
|
|
; RV32I-NEXT: lw a6, 96(a5)
|
|
; RV32I-NEXT: lw a4, 100(a5)
|
|
; RV32I-NEXT: lw a2, 104(a5)
|
|
; RV32I-NEXT: lw a1, 108(a5)
|
|
; RV32I-NEXT: sw a0, 124(a5)
|
|
; RV32I-NEXT: sw a3, 120(a5)
|
|
; RV32I-NEXT: sw ra, 116(a5)
|
|
; RV32I-NEXT: sw s11, 112(a5)
|
|
; RV32I-NEXT: sw a1, 108(a5)
|
|
; RV32I-NEXT: sw a2, 104(a5)
|
|
; RV32I-NEXT: sw a4, 100(a5)
|
|
; RV32I-NEXT: sw a6, 96(a5)
|
|
; RV32I-NEXT: sw s10, 92(a5)
|
|
; RV32I-NEXT: sw s9, 88(a5)
|
|
; RV32I-NEXT: sw s8, 84(a5)
|
|
; RV32I-NEXT: sw s7, 80(a5)
|
|
; RV32I-NEXT: sw s6, 76(a5)
|
|
; RV32I-NEXT: sw s5, 72(a5)
|
|
; RV32I-NEXT: sw s4, 68(a5)
|
|
; RV32I-NEXT: sw s3, 64(a5)
|
|
; RV32I-NEXT: sw s2, 60(a5)
|
|
; RV32I-NEXT: sw s1, 56(a5)
|
|
; RV32I-NEXT: sw s0, 52(a5)
|
|
; RV32I-NEXT: sw t6, 48(a5)
|
|
; RV32I-NEXT: sw t5, 44(a5)
|
|
; RV32I-NEXT: sw t4, 40(a5)
|
|
; RV32I-NEXT: sw t3, 36(a5)
|
|
; RV32I-NEXT: sw t2, 32(a5)
|
|
; RV32I-NEXT: sw t1, 28(a5)
|
|
; RV32I-NEXT: sw t0, 24(a5)
|
|
; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, 20(a5)
|
|
; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, 16(a5)
|
|
; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
|
|
; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
|
|
; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
|
|
; RV32I-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq)(a7)
|
|
; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t0, 136(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t1, 132(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t2, 128(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 124(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s1, 120(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a0, 116(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a1, 112(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a2, 108(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a3, 104(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a4, 100(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a5, 96(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a6, 92(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw a7, 88(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s2, 84(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s3, 80(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s4, 76(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s5, 72(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s6, 68(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s7, 64(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s8, 60(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s9, 56(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s11, 48(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore t0
|
|
; RV32I-NEXT: .cfi_restore t1
|
|
; RV32I-NEXT: .cfi_restore t2
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: .cfi_restore s1
|
|
; RV32I-NEXT: .cfi_restore a0
|
|
; RV32I-NEXT: .cfi_restore a1
|
|
; RV32I-NEXT: .cfi_restore a2
|
|
; RV32I-NEXT: .cfi_restore a3
|
|
; RV32I-NEXT: .cfi_restore a4
|
|
; RV32I-NEXT: .cfi_restore a5
|
|
; RV32I-NEXT: .cfi_restore a6
|
|
; RV32I-NEXT: .cfi_restore a7
|
|
; RV32I-NEXT: .cfi_restore s2
|
|
; RV32I-NEXT: .cfi_restore s3
|
|
; RV32I-NEXT: .cfi_restore s4
|
|
; RV32I-NEXT: .cfi_restore s5
|
|
; RV32I-NEXT: .cfi_restore s6
|
|
; RV32I-NEXT: .cfi_restore s7
|
|
; RV32I-NEXT: .cfi_restore s8
|
|
; RV32I-NEXT: .cfi_restore s9
|
|
; RV32I-NEXT: .cfi_restore s10
|
|
; RV32I-NEXT: .cfi_restore s11
|
|
; RV32I-NEXT: .cfi_restore t3
|
|
; RV32I-NEXT: .cfi_restore t4
|
|
; RV32I-NEXT: .cfi_restore t5
|
|
; RV32I-NEXT: .cfi_restore t6
|
|
; RV32I-NEXT: addi sp, sp, 144
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: mret
|
|
;
|
|
; RV64I-LABEL: callee_with_irq:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -272
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 272
|
|
; RV64I-NEXT: sd ra, 264(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t0, 256(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t1, 248(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t2, 240(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 232(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s1, 224(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a0, 216(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a1, 208(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a2, 200(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a3, 192(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a4, 184(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a5, 176(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a6, 168(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd a7, 160(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s2, 152(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s3, 144(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s4, 136(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s5, 128(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s6, 120(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s7, 112(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s8, 104(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s9, 96(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s10, 88(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s11, 80(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t3, 72(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset t0, -16
|
|
; RV64I-NEXT: .cfi_offset t1, -24
|
|
; RV64I-NEXT: .cfi_offset t2, -32
|
|
; RV64I-NEXT: .cfi_offset s0, -40
|
|
; RV64I-NEXT: .cfi_offset s1, -48
|
|
; RV64I-NEXT: .cfi_offset a0, -56
|
|
; RV64I-NEXT: .cfi_offset a1, -64
|
|
; RV64I-NEXT: .cfi_offset a2, -72
|
|
; RV64I-NEXT: .cfi_offset a3, -80
|
|
; RV64I-NEXT: .cfi_offset a4, -88
|
|
; RV64I-NEXT: .cfi_offset a5, -96
|
|
; RV64I-NEXT: .cfi_offset a6, -104
|
|
; RV64I-NEXT: .cfi_offset a7, -112
|
|
; RV64I-NEXT: .cfi_offset s2, -120
|
|
; RV64I-NEXT: .cfi_offset s3, -128
|
|
; RV64I-NEXT: .cfi_offset s4, -136
|
|
; RV64I-NEXT: .cfi_offset s5, -144
|
|
; RV64I-NEXT: .cfi_offset s6, -152
|
|
; RV64I-NEXT: .cfi_offset s7, -160
|
|
; RV64I-NEXT: .cfi_offset s8, -168
|
|
; RV64I-NEXT: .cfi_offset s9, -176
|
|
; RV64I-NEXT: .cfi_offset s10, -184
|
|
; RV64I-NEXT: .cfi_offset s11, -192
|
|
; RV64I-NEXT: .cfi_offset t3, -200
|
|
; RV64I-NEXT: .cfi_offset t4, -208
|
|
; RV64I-NEXT: .cfi_offset t5, -216
|
|
; RV64I-NEXT: .cfi_offset t6, -224
|
|
; RV64I-NEXT: lui a7, %hi(var_test_irq)
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
|
|
; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
|
|
; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
|
|
; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
|
|
; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: addi a5, a7, %lo(var_test_irq)
|
|
; RV64I-NEXT: lw a0, 16(a5)
|
|
; RV64I-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, 20(a5)
|
|
; RV64I-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw t0, 24(a5)
|
|
; RV64I-NEXT: lw t1, 28(a5)
|
|
; RV64I-NEXT: lw t2, 32(a5)
|
|
; RV64I-NEXT: lw t3, 36(a5)
|
|
; RV64I-NEXT: lw t4, 40(a5)
|
|
; RV64I-NEXT: lw t5, 44(a5)
|
|
; RV64I-NEXT: lw t6, 48(a5)
|
|
; RV64I-NEXT: lw s0, 52(a5)
|
|
; RV64I-NEXT: lw s1, 56(a5)
|
|
; RV64I-NEXT: lw s2, 60(a5)
|
|
; RV64I-NEXT: lw s3, 64(a5)
|
|
; RV64I-NEXT: lw s4, 68(a5)
|
|
; RV64I-NEXT: lw s5, 72(a5)
|
|
; RV64I-NEXT: lw s6, 76(a5)
|
|
; RV64I-NEXT: lw s7, 80(a5)
|
|
; RV64I-NEXT: lw s8, 84(a5)
|
|
; RV64I-NEXT: lw s9, 88(a5)
|
|
; RV64I-NEXT: lw s10, 92(a5)
|
|
; RV64I-NEXT: lw s11, 112(a5)
|
|
; RV64I-NEXT: lw ra, 116(a5)
|
|
; RV64I-NEXT: lw a3, 120(a5)
|
|
; RV64I-NEXT: lw a0, 124(a5)
|
|
; RV64I-NEXT: lw a6, 96(a5)
|
|
; RV64I-NEXT: lw a4, 100(a5)
|
|
; RV64I-NEXT: lw a2, 104(a5)
|
|
; RV64I-NEXT: lw a1, 108(a5)
|
|
; RV64I-NEXT: sw a0, 124(a5)
|
|
; RV64I-NEXT: sw a3, 120(a5)
|
|
; RV64I-NEXT: sw ra, 116(a5)
|
|
; RV64I-NEXT: sw s11, 112(a5)
|
|
; RV64I-NEXT: sw a1, 108(a5)
|
|
; RV64I-NEXT: sw a2, 104(a5)
|
|
; RV64I-NEXT: sw a4, 100(a5)
|
|
; RV64I-NEXT: sw a6, 96(a5)
|
|
; RV64I-NEXT: sw s10, 92(a5)
|
|
; RV64I-NEXT: sw s9, 88(a5)
|
|
; RV64I-NEXT: sw s8, 84(a5)
|
|
; RV64I-NEXT: sw s7, 80(a5)
|
|
; RV64I-NEXT: sw s6, 76(a5)
|
|
; RV64I-NEXT: sw s5, 72(a5)
|
|
; RV64I-NEXT: sw s4, 68(a5)
|
|
; RV64I-NEXT: sw s3, 64(a5)
|
|
; RV64I-NEXT: sw s2, 60(a5)
|
|
; RV64I-NEXT: sw s1, 56(a5)
|
|
; RV64I-NEXT: sw s0, 52(a5)
|
|
; RV64I-NEXT: sw t6, 48(a5)
|
|
; RV64I-NEXT: sw t5, 44(a5)
|
|
; RV64I-NEXT: sw t4, 40(a5)
|
|
; RV64I-NEXT: sw t3, 36(a5)
|
|
; RV64I-NEXT: sw t2, 32(a5)
|
|
; RV64I-NEXT: sw t1, 28(a5)
|
|
; RV64I-NEXT: sw t0, 24(a5)
|
|
; RV64I-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, 20(a5)
|
|
; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, 16(a5)
|
|
; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
|
|
; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
|
|
; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
|
|
; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq)(a7)
|
|
; RV64I-NEXT: ld ra, 264(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t0, 256(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t1, 248(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t2, 240(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 232(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s1, 224(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a0, 216(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a1, 208(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a2, 200(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a3, 192(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a4, 184(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a5, 176(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a6, 168(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld a7, 160(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s2, 152(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s3, 144(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s4, 136(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s5, 128(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s6, 120(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s7, 112(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s8, 104(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s9, 96(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s10, 88(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s11, 80(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t3, 72(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore t0
|
|
; RV64I-NEXT: .cfi_restore t1
|
|
; RV64I-NEXT: .cfi_restore t2
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: .cfi_restore s1
|
|
; RV64I-NEXT: .cfi_restore a0
|
|
; RV64I-NEXT: .cfi_restore a1
|
|
; RV64I-NEXT: .cfi_restore a2
|
|
; RV64I-NEXT: .cfi_restore a3
|
|
; RV64I-NEXT: .cfi_restore a4
|
|
; RV64I-NEXT: .cfi_restore a5
|
|
; RV64I-NEXT: .cfi_restore a6
|
|
; RV64I-NEXT: .cfi_restore a7
|
|
; RV64I-NEXT: .cfi_restore s2
|
|
; RV64I-NEXT: .cfi_restore s3
|
|
; RV64I-NEXT: .cfi_restore s4
|
|
; RV64I-NEXT: .cfi_restore s5
|
|
; RV64I-NEXT: .cfi_restore s6
|
|
; RV64I-NEXT: .cfi_restore s7
|
|
; RV64I-NEXT: .cfi_restore s8
|
|
; RV64I-NEXT: .cfi_restore s9
|
|
; RV64I-NEXT: .cfi_restore s10
|
|
; RV64I-NEXT: .cfi_restore s11
|
|
; RV64I-NEXT: .cfi_restore t3
|
|
; RV64I-NEXT: .cfi_restore t4
|
|
; RV64I-NEXT: .cfi_restore t5
|
|
; RV64I-NEXT: .cfi_restore t6
|
|
; RV64I-NEXT: addi sp, sp, 272
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: mret
|
|
%val = load [32 x i32], ptr @var_test_irq
|
|
store volatile [32 x i32] %val, ptr @var_test_irq
|
|
ret void
|
|
}
|
|
|
|
define void @callee_no_irq() {
|
|
; RV32IZCMP-LABEL: callee_no_irq:
|
|
; RV32IZCMP: # %bb.0:
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 96
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -52
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -48
|
|
; RV32IZCMP-NEXT: .cfi_offset s1, -44
|
|
; RV32IZCMP-NEXT: .cfi_offset s2, -40
|
|
; RV32IZCMP-NEXT: .cfi_offset s3, -36
|
|
; RV32IZCMP-NEXT: .cfi_offset s4, -32
|
|
; RV32IZCMP-NEXT: .cfi_offset s5, -28
|
|
; RV32IZCMP-NEXT: .cfi_offset s6, -24
|
|
; RV32IZCMP-NEXT: .cfi_offset s7, -20
|
|
; RV32IZCMP-NEXT: .cfi_offset s8, -16
|
|
; RV32IZCMP-NEXT: .cfi_offset s9, -12
|
|
; RV32IZCMP-NEXT: .cfi_offset s10, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s11, -4
|
|
; RV32IZCMP-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV32IZCMP-NEXT: lw a0, 16(a5)
|
|
; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw a0, 20(a5)
|
|
; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-NEXT: lw t4, 24(a5)
|
|
; RV32IZCMP-NEXT: lw t5, 28(a5)
|
|
; RV32IZCMP-NEXT: lw t6, 32(a5)
|
|
; RV32IZCMP-NEXT: lw s2, 36(a5)
|
|
; RV32IZCMP-NEXT: lw s3, 40(a5)
|
|
; RV32IZCMP-NEXT: lw s4, 44(a5)
|
|
; RV32IZCMP-NEXT: lw s5, 48(a5)
|
|
; RV32IZCMP-NEXT: lw s6, 52(a5)
|
|
; RV32IZCMP-NEXT: lw s7, 56(a5)
|
|
; RV32IZCMP-NEXT: lw s8, 60(a5)
|
|
; RV32IZCMP-NEXT: lw s9, 64(a5)
|
|
; RV32IZCMP-NEXT: lw s10, 68(a5)
|
|
; RV32IZCMP-NEXT: lw s11, 72(a5)
|
|
; RV32IZCMP-NEXT: lw ra, 76(a5)
|
|
; RV32IZCMP-NEXT: lw s1, 80(a5)
|
|
; RV32IZCMP-NEXT: lw t3, 84(a5)
|
|
; RV32IZCMP-NEXT: lw t2, 88(a5)
|
|
; RV32IZCMP-NEXT: lw t1, 92(a5)
|
|
; RV32IZCMP-NEXT: lw a7, 112(a5)
|
|
; RV32IZCMP-NEXT: lw s0, 116(a5)
|
|
; RV32IZCMP-NEXT: lw a3, 120(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 124(a5)
|
|
; RV32IZCMP-NEXT: lw a6, 96(a5)
|
|
; RV32IZCMP-NEXT: lw a4, 100(a5)
|
|
; RV32IZCMP-NEXT: lw a2, 104(a5)
|
|
; RV32IZCMP-NEXT: lw a1, 108(a5)
|
|
; RV32IZCMP-NEXT: sw a0, 124(a5)
|
|
; RV32IZCMP-NEXT: sw a3, 120(a5)
|
|
; RV32IZCMP-NEXT: sw s0, 116(a5)
|
|
; RV32IZCMP-NEXT: sw a7, 112(a5)
|
|
; RV32IZCMP-NEXT: sw a1, 108(a5)
|
|
; RV32IZCMP-NEXT: sw a2, 104(a5)
|
|
; RV32IZCMP-NEXT: sw a4, 100(a5)
|
|
; RV32IZCMP-NEXT: sw a6, 96(a5)
|
|
; RV32IZCMP-NEXT: sw t1, 92(a5)
|
|
; RV32IZCMP-NEXT: sw t2, 88(a5)
|
|
; RV32IZCMP-NEXT: sw t3, 84(a5)
|
|
; RV32IZCMP-NEXT: sw s1, 80(a5)
|
|
; RV32IZCMP-NEXT: sw ra, 76(a5)
|
|
; RV32IZCMP-NEXT: sw s11, 72(a5)
|
|
; RV32IZCMP-NEXT: sw s10, 68(a5)
|
|
; RV32IZCMP-NEXT: sw s9, 64(a5)
|
|
; RV32IZCMP-NEXT: sw s8, 60(a5)
|
|
; RV32IZCMP-NEXT: sw s7, 56(a5)
|
|
; RV32IZCMP-NEXT: sw s6, 52(a5)
|
|
; RV32IZCMP-NEXT: sw s5, 48(a5)
|
|
; RV32IZCMP-NEXT: sw s4, 44(a5)
|
|
; RV32IZCMP-NEXT: sw s3, 40(a5)
|
|
; RV32IZCMP-NEXT: sw s2, 36(a5)
|
|
; RV32IZCMP-NEXT: sw t6, 32(a5)
|
|
; RV32IZCMP-NEXT: sw t5, 28(a5)
|
|
; RV32IZCMP-NEXT: sw t4, 24(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, 20(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, 16(a5)
|
|
; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96
|
|
;
|
|
; RV64IZCMP-LABEL: callee_no_irq:
|
|
; RV64IZCMP: # %bb.0:
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 160
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -104
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -96
|
|
; RV64IZCMP-NEXT: .cfi_offset s1, -88
|
|
; RV64IZCMP-NEXT: .cfi_offset s2, -80
|
|
; RV64IZCMP-NEXT: .cfi_offset s3, -72
|
|
; RV64IZCMP-NEXT: .cfi_offset s4, -64
|
|
; RV64IZCMP-NEXT: .cfi_offset s5, -56
|
|
; RV64IZCMP-NEXT: .cfi_offset s6, -48
|
|
; RV64IZCMP-NEXT: .cfi_offset s7, -40
|
|
; RV64IZCMP-NEXT: .cfi_offset s8, -32
|
|
; RV64IZCMP-NEXT: .cfi_offset s9, -24
|
|
; RV64IZCMP-NEXT: .cfi_offset s10, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s11, -8
|
|
; RV64IZCMP-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV64IZCMP-NEXT: lw a0, 16(a5)
|
|
; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw a0, 20(a5)
|
|
; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-NEXT: lw t4, 24(a5)
|
|
; RV64IZCMP-NEXT: lw t5, 28(a5)
|
|
; RV64IZCMP-NEXT: lw t6, 32(a5)
|
|
; RV64IZCMP-NEXT: lw s2, 36(a5)
|
|
; RV64IZCMP-NEXT: lw s3, 40(a5)
|
|
; RV64IZCMP-NEXT: lw s4, 44(a5)
|
|
; RV64IZCMP-NEXT: lw s5, 48(a5)
|
|
; RV64IZCMP-NEXT: lw s6, 52(a5)
|
|
; RV64IZCMP-NEXT: lw s7, 56(a5)
|
|
; RV64IZCMP-NEXT: lw s8, 60(a5)
|
|
; RV64IZCMP-NEXT: lw s9, 64(a5)
|
|
; RV64IZCMP-NEXT: lw s10, 68(a5)
|
|
; RV64IZCMP-NEXT: lw s11, 72(a5)
|
|
; RV64IZCMP-NEXT: lw ra, 76(a5)
|
|
; RV64IZCMP-NEXT: lw s1, 80(a5)
|
|
; RV64IZCMP-NEXT: lw t3, 84(a5)
|
|
; RV64IZCMP-NEXT: lw t2, 88(a5)
|
|
; RV64IZCMP-NEXT: lw t1, 92(a5)
|
|
; RV64IZCMP-NEXT: lw a7, 112(a5)
|
|
; RV64IZCMP-NEXT: lw s0, 116(a5)
|
|
; RV64IZCMP-NEXT: lw a3, 120(a5)
|
|
; RV64IZCMP-NEXT: lw a0, 124(a5)
|
|
; RV64IZCMP-NEXT: lw a6, 96(a5)
|
|
; RV64IZCMP-NEXT: lw a4, 100(a5)
|
|
; RV64IZCMP-NEXT: lw a2, 104(a5)
|
|
; RV64IZCMP-NEXT: lw a1, 108(a5)
|
|
; RV64IZCMP-NEXT: sw a0, 124(a5)
|
|
; RV64IZCMP-NEXT: sw a3, 120(a5)
|
|
; RV64IZCMP-NEXT: sw s0, 116(a5)
|
|
; RV64IZCMP-NEXT: sw a7, 112(a5)
|
|
; RV64IZCMP-NEXT: sw a1, 108(a5)
|
|
; RV64IZCMP-NEXT: sw a2, 104(a5)
|
|
; RV64IZCMP-NEXT: sw a4, 100(a5)
|
|
; RV64IZCMP-NEXT: sw a6, 96(a5)
|
|
; RV64IZCMP-NEXT: sw t1, 92(a5)
|
|
; RV64IZCMP-NEXT: sw t2, 88(a5)
|
|
; RV64IZCMP-NEXT: sw t3, 84(a5)
|
|
; RV64IZCMP-NEXT: sw s1, 80(a5)
|
|
; RV64IZCMP-NEXT: sw ra, 76(a5)
|
|
; RV64IZCMP-NEXT: sw s11, 72(a5)
|
|
; RV64IZCMP-NEXT: sw s10, 68(a5)
|
|
; RV64IZCMP-NEXT: sw s9, 64(a5)
|
|
; RV64IZCMP-NEXT: sw s8, 60(a5)
|
|
; RV64IZCMP-NEXT: sw s7, 56(a5)
|
|
; RV64IZCMP-NEXT: sw s6, 52(a5)
|
|
; RV64IZCMP-NEXT: sw s5, 48(a5)
|
|
; RV64IZCMP-NEXT: sw s4, 44(a5)
|
|
; RV64IZCMP-NEXT: sw s3, 40(a5)
|
|
; RV64IZCMP-NEXT: sw s2, 36(a5)
|
|
; RV64IZCMP-NEXT: sw t6, 32(a5)
|
|
; RV64IZCMP-NEXT: sw t5, 28(a5)
|
|
; RV64IZCMP-NEXT: sw t4, 24(a5)
|
|
; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, 20(a5)
|
|
; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, 16(a5)
|
|
; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160
|
|
;
|
|
; RV32IZCMP-SR-LABEL: callee_no_irq:
|
|
; RV32IZCMP-SR: # %bb.0:
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -96
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 96
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -52
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -48
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -44
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s2, -40
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s3, -36
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s4, -32
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s5, -28
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s6, -24
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s7, -20
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s8, -16
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s9, -12
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s10, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4
|
|
; RV32IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
|
|
; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s5, 48(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s6, 52(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s7, 56(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s8, 60(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s9, 64(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s10, 68(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s11, 72(a5)
|
|
; RV32IZCMP-SR-NEXT: lw ra, 76(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s1, 80(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t3, 84(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t2, 88(a5)
|
|
; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a7, 112(a5)
|
|
; RV32IZCMP-SR-NEXT: lw s0, 116(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a3, 120(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a6, 96(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a4, 100(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a2, 104(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a1, 108(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a0, 124(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a3, 120(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s0, 116(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a7, 112(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a1, 108(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a2, 104(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a4, 100(a5)
|
|
; RV32IZCMP-SR-NEXT: sw a6, 96(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t2, 88(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t3, 84(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s1, 80(a5)
|
|
; RV32IZCMP-SR-NEXT: sw ra, 76(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s11, 72(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s10, 68(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s9, 64(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s8, 60(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s7, 56(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s6, 52(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s5, 48(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
|
|
; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
|
|
; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
|
|
; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 96
|
|
;
|
|
; RV64IZCMP-SR-LABEL: callee_no_irq:
|
|
; RV64IZCMP-SR: # %bb.0:
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 160
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -104
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -96
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s1, -88
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s2, -80
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s3, -72
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s4, -64
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s5, -56
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s6, -48
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s7, -40
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s8, -32
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s9, -24
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s10, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8
|
|
; RV64IZCMP-SR-NEXT: lui t0, %hi(var_test_irq)
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: addi a5, t0, %lo(var_test_irq)
|
|
; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
|
|
; RV64IZCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
|
|
; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s5, 48(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s6, 52(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s7, 56(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s8, 60(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s9, 64(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s10, 68(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s11, 72(a5)
|
|
; RV64IZCMP-SR-NEXT: lw ra, 76(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s1, 80(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t3, 84(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t2, 88(a5)
|
|
; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a7, 112(a5)
|
|
; RV64IZCMP-SR-NEXT: lw s0, 116(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a3, 120(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a6, 96(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a4, 100(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a2, 104(a5)
|
|
; RV64IZCMP-SR-NEXT: lw a1, 108(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a0, 124(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a3, 120(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s0, 116(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a7, 112(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a1, 108(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a2, 104(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a4, 100(a5)
|
|
; RV64IZCMP-SR-NEXT: sw a6, 96(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t2, 88(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t3, 84(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s1, 80(a5)
|
|
; RV64IZCMP-SR-NEXT: sw ra, 76(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s11, 72(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s10, 68(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s9, 64(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s8, 60(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s7, 56(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s6, 52(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s5, 48(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
|
|
; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
|
|
; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(t0)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(t0)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(t0)
|
|
; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
|
|
; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(t0)
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 160
|
|
;
|
|
; RV32I-LABEL: callee_no_irq:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -80
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 80
|
|
; RV32I-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: .cfi_offset s1, -12
|
|
; RV32I-NEXT: .cfi_offset s2, -16
|
|
; RV32I-NEXT: .cfi_offset s3, -20
|
|
; RV32I-NEXT: .cfi_offset s4, -24
|
|
; RV32I-NEXT: .cfi_offset s5, -28
|
|
; RV32I-NEXT: .cfi_offset s6, -32
|
|
; RV32I-NEXT: .cfi_offset s7, -36
|
|
; RV32I-NEXT: .cfi_offset s8, -40
|
|
; RV32I-NEXT: .cfi_offset s9, -44
|
|
; RV32I-NEXT: .cfi_offset s10, -48
|
|
; RV32I-NEXT: .cfi_offset s11, -52
|
|
; RV32I-NEXT: lui a7, %hi(var_test_irq)
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq)(a7)
|
|
; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
|
|
; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
|
|
; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
|
|
; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: addi a5, a7, %lo(var_test_irq)
|
|
; RV32I-NEXT: lw a0, 16(a5)
|
|
; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw a0, 20(a5)
|
|
; RV32I-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: lw t0, 24(a5)
|
|
; RV32I-NEXT: lw t1, 28(a5)
|
|
; RV32I-NEXT: lw t2, 32(a5)
|
|
; RV32I-NEXT: lw t3, 36(a5)
|
|
; RV32I-NEXT: lw t4, 40(a5)
|
|
; RV32I-NEXT: lw t5, 44(a5)
|
|
; RV32I-NEXT: lw t6, 48(a5)
|
|
; RV32I-NEXT: lw s0, 52(a5)
|
|
; RV32I-NEXT: lw s1, 56(a5)
|
|
; RV32I-NEXT: lw s2, 60(a5)
|
|
; RV32I-NEXT: lw s3, 64(a5)
|
|
; RV32I-NEXT: lw s4, 68(a5)
|
|
; RV32I-NEXT: lw s5, 72(a5)
|
|
; RV32I-NEXT: lw s6, 76(a5)
|
|
; RV32I-NEXT: lw s7, 80(a5)
|
|
; RV32I-NEXT: lw s8, 84(a5)
|
|
; RV32I-NEXT: lw s9, 88(a5)
|
|
; RV32I-NEXT: lw s10, 92(a5)
|
|
; RV32I-NEXT: lw s11, 112(a5)
|
|
; RV32I-NEXT: lw ra, 116(a5)
|
|
; RV32I-NEXT: lw a3, 120(a5)
|
|
; RV32I-NEXT: lw a0, 124(a5)
|
|
; RV32I-NEXT: lw a6, 96(a5)
|
|
; RV32I-NEXT: lw a4, 100(a5)
|
|
; RV32I-NEXT: lw a2, 104(a5)
|
|
; RV32I-NEXT: lw a1, 108(a5)
|
|
; RV32I-NEXT: sw a0, 124(a5)
|
|
; RV32I-NEXT: sw a3, 120(a5)
|
|
; RV32I-NEXT: sw ra, 116(a5)
|
|
; RV32I-NEXT: sw s11, 112(a5)
|
|
; RV32I-NEXT: sw a1, 108(a5)
|
|
; RV32I-NEXT: sw a2, 104(a5)
|
|
; RV32I-NEXT: sw a4, 100(a5)
|
|
; RV32I-NEXT: sw a6, 96(a5)
|
|
; RV32I-NEXT: sw s10, 92(a5)
|
|
; RV32I-NEXT: sw s9, 88(a5)
|
|
; RV32I-NEXT: sw s8, 84(a5)
|
|
; RV32I-NEXT: sw s7, 80(a5)
|
|
; RV32I-NEXT: sw s6, 76(a5)
|
|
; RV32I-NEXT: sw s5, 72(a5)
|
|
; RV32I-NEXT: sw s4, 68(a5)
|
|
; RV32I-NEXT: sw s3, 64(a5)
|
|
; RV32I-NEXT: sw s2, 60(a5)
|
|
; RV32I-NEXT: sw s1, 56(a5)
|
|
; RV32I-NEXT: sw s0, 52(a5)
|
|
; RV32I-NEXT: sw t6, 48(a5)
|
|
; RV32I-NEXT: sw t5, 44(a5)
|
|
; RV32I-NEXT: sw t4, 40(a5)
|
|
; RV32I-NEXT: sw t3, 36(a5)
|
|
; RV32I-NEXT: sw t2, 32(a5)
|
|
; RV32I-NEXT: sw t1, 28(a5)
|
|
; RV32I-NEXT: sw t0, 24(a5)
|
|
; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, 20(a5)
|
|
; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, 16(a5)
|
|
; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
|
|
; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
|
|
; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
|
|
; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: sw a0, %lo(var_test_irq)(a7)
|
|
; RV32I-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: .cfi_restore s1
|
|
; RV32I-NEXT: .cfi_restore s2
|
|
; RV32I-NEXT: .cfi_restore s3
|
|
; RV32I-NEXT: .cfi_restore s4
|
|
; RV32I-NEXT: .cfi_restore s5
|
|
; RV32I-NEXT: .cfi_restore s6
|
|
; RV32I-NEXT: .cfi_restore s7
|
|
; RV32I-NEXT: .cfi_restore s8
|
|
; RV32I-NEXT: .cfi_restore s9
|
|
; RV32I-NEXT: .cfi_restore s10
|
|
; RV32I-NEXT: .cfi_restore s11
|
|
; RV32I-NEXT: addi sp, sp, 80
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: callee_no_irq:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -160
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 160
|
|
; RV64I-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s8, 80(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: .cfi_offset s1, -24
|
|
; RV64I-NEXT: .cfi_offset s2, -32
|
|
; RV64I-NEXT: .cfi_offset s3, -40
|
|
; RV64I-NEXT: .cfi_offset s4, -48
|
|
; RV64I-NEXT: .cfi_offset s5, -56
|
|
; RV64I-NEXT: .cfi_offset s6, -64
|
|
; RV64I-NEXT: .cfi_offset s7, -72
|
|
; RV64I-NEXT: .cfi_offset s8, -80
|
|
; RV64I-NEXT: .cfi_offset s9, -88
|
|
; RV64I-NEXT: .cfi_offset s10, -96
|
|
; RV64I-NEXT: .cfi_offset s11, -104
|
|
; RV64I-NEXT: lui a7, %hi(var_test_irq)
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq)(a7)
|
|
; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a7)
|
|
; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a7)
|
|
; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a7)
|
|
; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: addi a5, a7, %lo(var_test_irq)
|
|
; RV64I-NEXT: lw a0, 16(a5)
|
|
; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw a0, 20(a5)
|
|
; RV64I-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: lw t0, 24(a5)
|
|
; RV64I-NEXT: lw t1, 28(a5)
|
|
; RV64I-NEXT: lw t2, 32(a5)
|
|
; RV64I-NEXT: lw t3, 36(a5)
|
|
; RV64I-NEXT: lw t4, 40(a5)
|
|
; RV64I-NEXT: lw t5, 44(a5)
|
|
; RV64I-NEXT: lw t6, 48(a5)
|
|
; RV64I-NEXT: lw s0, 52(a5)
|
|
; RV64I-NEXT: lw s1, 56(a5)
|
|
; RV64I-NEXT: lw s2, 60(a5)
|
|
; RV64I-NEXT: lw s3, 64(a5)
|
|
; RV64I-NEXT: lw s4, 68(a5)
|
|
; RV64I-NEXT: lw s5, 72(a5)
|
|
; RV64I-NEXT: lw s6, 76(a5)
|
|
; RV64I-NEXT: lw s7, 80(a5)
|
|
; RV64I-NEXT: lw s8, 84(a5)
|
|
; RV64I-NEXT: lw s9, 88(a5)
|
|
; RV64I-NEXT: lw s10, 92(a5)
|
|
; RV64I-NEXT: lw s11, 112(a5)
|
|
; RV64I-NEXT: lw ra, 116(a5)
|
|
; RV64I-NEXT: lw a3, 120(a5)
|
|
; RV64I-NEXT: lw a0, 124(a5)
|
|
; RV64I-NEXT: lw a6, 96(a5)
|
|
; RV64I-NEXT: lw a4, 100(a5)
|
|
; RV64I-NEXT: lw a2, 104(a5)
|
|
; RV64I-NEXT: lw a1, 108(a5)
|
|
; RV64I-NEXT: sw a0, 124(a5)
|
|
; RV64I-NEXT: sw a3, 120(a5)
|
|
; RV64I-NEXT: sw ra, 116(a5)
|
|
; RV64I-NEXT: sw s11, 112(a5)
|
|
; RV64I-NEXT: sw a1, 108(a5)
|
|
; RV64I-NEXT: sw a2, 104(a5)
|
|
; RV64I-NEXT: sw a4, 100(a5)
|
|
; RV64I-NEXT: sw a6, 96(a5)
|
|
; RV64I-NEXT: sw s10, 92(a5)
|
|
; RV64I-NEXT: sw s9, 88(a5)
|
|
; RV64I-NEXT: sw s8, 84(a5)
|
|
; RV64I-NEXT: sw s7, 80(a5)
|
|
; RV64I-NEXT: sw s6, 76(a5)
|
|
; RV64I-NEXT: sw s5, 72(a5)
|
|
; RV64I-NEXT: sw s4, 68(a5)
|
|
; RV64I-NEXT: sw s3, 64(a5)
|
|
; RV64I-NEXT: sw s2, 60(a5)
|
|
; RV64I-NEXT: sw s1, 56(a5)
|
|
; RV64I-NEXT: sw s0, 52(a5)
|
|
; RV64I-NEXT: sw t6, 48(a5)
|
|
; RV64I-NEXT: sw t5, 44(a5)
|
|
; RV64I-NEXT: sw t4, 40(a5)
|
|
; RV64I-NEXT: sw t3, 36(a5)
|
|
; RV64I-NEXT: sw t2, 32(a5)
|
|
; RV64I-NEXT: sw t1, 28(a5)
|
|
; RV64I-NEXT: sw t0, 24(a5)
|
|
; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, 20(a5)
|
|
; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, 16(a5)
|
|
; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a7)
|
|
; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a7)
|
|
; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a7)
|
|
; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: sw a0, %lo(var_test_irq)(a7)
|
|
; RV64I-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s8, 80(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: .cfi_restore s1
|
|
; RV64I-NEXT: .cfi_restore s2
|
|
; RV64I-NEXT: .cfi_restore s3
|
|
; RV64I-NEXT: .cfi_restore s4
|
|
; RV64I-NEXT: .cfi_restore s5
|
|
; RV64I-NEXT: .cfi_restore s6
|
|
; RV64I-NEXT: .cfi_restore s7
|
|
; RV64I-NEXT: .cfi_restore s8
|
|
; RV64I-NEXT: .cfi_restore s9
|
|
; RV64I-NEXT: .cfi_restore s10
|
|
; RV64I-NEXT: .cfi_restore s11
|
|
; RV64I-NEXT: addi sp, sp, 160
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
%val = load [32 x i32], ptr @var_test_irq
|
|
store volatile [32 x i32] %val, ptr @var_test_irq
|
|
ret void
|
|
}
|
|
|
|
declare void @bar(ptr, ptr)
|
|
declare ptr @llvm.frameaddress.p0(i32 immarg)
|
|
|
|
define i32 @use_fp(i32 %x) {
|
|
; RV32IZCMP-LABEL: use_fp:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -32
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 32
|
|
; RV32IZCMP-NEXT: .cfi_offset ra, -12
|
|
; RV32IZCMP-NEXT: .cfi_offset s0, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s1, -4
|
|
; RV32IZCMP-NEXT: addi s0, sp, 32
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-NEXT: mv s1, a0
|
|
; RV32IZCMP-NEXT: addi a1, s0, -20
|
|
; RV32IZCMP-NEXT: mv a0, s0
|
|
; RV32IZCMP-NEXT: call bar
|
|
; RV32IZCMP-NEXT: mv a0, s1
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa sp, 32
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
|
|
;
|
|
; RV64IZCMP-LABEL: use_fp:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -48
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 48
|
|
; RV64IZCMP-NEXT: .cfi_offset ra, -24
|
|
; RV64IZCMP-NEXT: .cfi_offset s0, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s1, -8
|
|
; RV64IZCMP-NEXT: addi s0, sp, 48
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-NEXT: mv s1, a0
|
|
; RV64IZCMP-NEXT: addi a1, s0, -36
|
|
; RV64IZCMP-NEXT: mv a0, s0
|
|
; RV64IZCMP-NEXT: call bar
|
|
; RV64IZCMP-NEXT: mv a0, s1
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa sp, 48
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 48
|
|
;
|
|
; RV32IZCMP-SR-LABEL: use_fp:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -12
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s1, -4
|
|
; RV32IZCMP-SR-NEXT: addi s0, sp, 32
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32IZCMP-SR-NEXT: mv s1, a0
|
|
; RV32IZCMP-SR-NEXT: addi a1, s0, -20
|
|
; RV32IZCMP-SR-NEXT: mv a0, s0
|
|
; RV32IZCMP-SR-NEXT: call bar
|
|
; RV32IZCMP-SR-NEXT: mv a0, s1
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa sp, 32
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
|
|
;
|
|
; RV64IZCMP-SR-LABEL: use_fp:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -48
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -24
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s1, -8
|
|
; RV64IZCMP-SR-NEXT: addi s0, sp, 48
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64IZCMP-SR-NEXT: mv s1, a0
|
|
; RV64IZCMP-SR-NEXT: addi a1, s0, -36
|
|
; RV64IZCMP-SR-NEXT: mv a0, s0
|
|
; RV64IZCMP-SR-NEXT: call bar
|
|
; RV64IZCMP-SR-NEXT: mv a0, s1
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa sp, 48
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 48
|
|
;
|
|
; RV32I-LABEL: use_fp:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset ra, -4
|
|
; RV32I-NEXT: .cfi_offset s0, -8
|
|
; RV32I-NEXT: .cfi_offset s1, -12
|
|
; RV32I-NEXT: addi s0, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV32I-NEXT: mv s1, a0
|
|
; RV32I-NEXT: addi a1, s0, -16
|
|
; RV32I-NEXT: mv a0, s0
|
|
; RV32I-NEXT: call bar
|
|
; RV32I-NEXT: mv a0, s1
|
|
; RV32I-NEXT: .cfi_def_cfa sp, 16
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore ra
|
|
; RV32I-NEXT: .cfi_restore s0
|
|
; RV32I-NEXT: .cfi_restore s1
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: use_fp:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -32
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 32
|
|
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset ra, -8
|
|
; RV64I-NEXT: .cfi_offset s0, -16
|
|
; RV64I-NEXT: .cfi_offset s1, -24
|
|
; RV64I-NEXT: addi s0, sp, 32
|
|
; RV64I-NEXT: .cfi_def_cfa s0, 0
|
|
; RV64I-NEXT: mv s1, a0
|
|
; RV64I-NEXT: addi a1, s0, -28
|
|
; RV64I-NEXT: mv a0, s0
|
|
; RV64I-NEXT: call bar
|
|
; RV64I-NEXT: mv a0, s1
|
|
; RV64I-NEXT: .cfi_def_cfa sp, 32
|
|
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore ra
|
|
; RV64I-NEXT: .cfi_restore s0
|
|
; RV64I-NEXT: .cfi_restore s1
|
|
; RV64I-NEXT: addi sp, sp, 32
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
entry:
|
|
%var = alloca i32, align 4
|
|
%0 = tail call ptr @llvm.frameaddress.p0(i32 0)
|
|
call void @bar(ptr %0, ptr %var)
|
|
ret i32 %x
|
|
}
|
|
|
|
define void @spill_x10() {
|
|
; RV32IZCMP-LABEL: spill_x10:
|
|
; RV32IZCMP: # %bb.0: # %entry
|
|
; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -64
|
|
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-NEXT: .cfi_offset s10, -8
|
|
; RV32IZCMP-NEXT: .cfi_offset s11, -4
|
|
; RV32IZCMP-NEXT: #APP
|
|
; RV32IZCMP-NEXT: li s10, 0
|
|
; RV32IZCMP-NEXT: #NO_APP
|
|
; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 64
|
|
;
|
|
; RV64IZCMP-LABEL: spill_x10:
|
|
; RV64IZCMP: # %bb.0: # %entry
|
|
; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -112
|
|
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 112
|
|
; RV64IZCMP-NEXT: .cfi_offset s10, -16
|
|
; RV64IZCMP-NEXT: .cfi_offset s11, -8
|
|
; RV64IZCMP-NEXT: #APP
|
|
; RV64IZCMP-NEXT: li s10, 0
|
|
; RV64IZCMP-NEXT: #NO_APP
|
|
; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 112
|
|
;
|
|
; RV32IZCMP-SR-LABEL: spill_x10:
|
|
; RV32IZCMP-SR: # %bb.0: # %entry
|
|
; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -64
|
|
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 64
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s10, -8
|
|
; RV32IZCMP-SR-NEXT: .cfi_offset s11, -4
|
|
; RV32IZCMP-SR-NEXT: #APP
|
|
; RV32IZCMP-SR-NEXT: li s10, 0
|
|
; RV32IZCMP-SR-NEXT: #NO_APP
|
|
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 64
|
|
;
|
|
; RV64IZCMP-SR-LABEL: spill_x10:
|
|
; RV64IZCMP-SR: # %bb.0: # %entry
|
|
; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -112
|
|
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 112
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s10, -16
|
|
; RV64IZCMP-SR-NEXT: .cfi_offset s11, -8
|
|
; RV64IZCMP-SR-NEXT: #APP
|
|
; RV64IZCMP-SR-NEXT: li s10, 0
|
|
; RV64IZCMP-SR-NEXT: #NO_APP
|
|
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 112
|
|
;
|
|
; RV32I-LABEL: spill_x10:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV32I-NEXT: sw s10, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: .cfi_offset s10, -4
|
|
; RV32I-NEXT: #APP
|
|
; RV32I-NEXT: li s10, 0
|
|
; RV32I-NEXT: #NO_APP
|
|
; RV32I-NEXT: lw s10, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: .cfi_restore s10
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: spill_x10:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 16
|
|
; RV64I-NEXT: sd s10, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: .cfi_offset s10, -8
|
|
; RV64I-NEXT: #APP
|
|
; RV64I-NEXT: li s10, 0
|
|
; RV64I-NEXT: #NO_APP
|
|
; RV64I-NEXT: ld s10, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: .cfi_restore s10
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: .cfi_def_cfa_offset 0
|
|
; RV64I-NEXT: ret
|
|
entry:
|
|
tail call void asm sideeffect "li s10, 0", "~{s10}"()
|
|
ret void
|
|
}
|