llvm-project/llvm/test/CodeGen/RISCV/sifive-interrupt-attr.ll
Sam Elliott cfc5baf6e6
[RISCV] SiFive CLIC Support (#132481)
This Change adds support for two SiFive vendor attributes in clang:
- "SiFive-CLIC-preemptible"
- "SiFive-CLIC-stack-swap"

These can be given together, and can be combined with "machine", but
cannot be combined with any other interrupt attribute values.

These are handled primarily in RISCVFrameLowering:
- "SiFive-CLIC-stack-swap" entails swapping `sp` with `sf.mscratchcsw`
  at function entry and exit, which holds the trap stack pointer.
- "SiFive-CLIC-preemptible" entails saving `mcause` and `mepc` before
  re-enabling interrupts using `mstatus`. To save these, `s0` and `s1`
  are first spilled to the stack, and then the values are read into
  these registers. If these registers are used in the function, their
  values will be spilled a second time onto the stack with the generic
  callee-saved-register handling. At the end of the function interrupts
  are disabled again before `mepc` and `mcause` are restored.

This Change also adds support for the following two experimental
extensions, which only contain CSRs:
- XSfsclic - for SiFive's CLIC Supervisor-Mode CSRs
- XSfmclic - for SiFive's CLIC Machine-Mode CSRs

The latter is needed for interrupt support.

The CFI information for this implementation is not correct, but I'd
prefer to correct this in a follow-up. While it's unlikely anyone wants
to unwind through a handler, the CFI information is also used by
debuggers so it would be good to get it right.

Co-authored-by: Ana Pazos <apazos@quicinc.com>
2025-04-25 17:12:27 -07:00

1051 lines
40 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv32-unknown-elf -mattr=+experimental-xsfmclic -o - %s \
; RUN: | FileCheck %s --check-prefix=RV32
; RUN: llc -mtriple riscv64-unknown-elf -mattr=+experimental-xsfmclic -o - %s \
; RUN: | FileCheck %s --check-prefix=RV64
; Test Handling of the SiFive-CLIC interrupt attributes.
;
; "stack-swap" means that sp should be swapped into `sf.mscratchcsw`
;
; "preemptible" means that `mcause` and `mepc` should be saved and interrupts
; should be re-enabled by setting a bit in `mstatus`.
; FIXME: A lot of the CFI information here is wrong.
define void @stack_swap_empty() "interrupt"="SiFive-CLIC-stack-swap" {
; RV32-LABEL: stack_swap_empty:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: stack_swap_empty:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
ret void
}
define void @stack_swap_empty_fp() "interrupt"="SiFive-CLIC-stack-swap" "frame-pointer"="all" {
; RV32-LABEL: stack_swap_empty_fp:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: addi s0, sp, 16
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: stack_swap_empty_fp:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: addi s0, sp, 16
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
ret void
}
define void @preemptible_empty() "interrupt"="SiFive-CLIC-preemptible" {
; RV32-LABEL: preemptible_empty:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: csrr s0, mcause
; RV32-NEXT: csrr s1, mepc
; RV32-NEXT: csrsi mstatus, 8
; RV32-NEXT: csrci mstatus, 8
; RV32-NEXT: csrw mepc, s1
; RV32-NEXT: csrw mcause, s0
; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: mret
;
; RV64-LABEL: preemptible_empty:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: csrr s0, mcause
; RV64-NEXT: csrr s1, mepc
; RV64-NEXT: csrsi mstatus, 8
; RV64-NEXT: csrci mstatus, 8
; RV64-NEXT: csrw mepc, s1
; RV64-NEXT: csrw mcause, s0
; RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: mret
ret void
}
define void @both_empty() "interrupt"="SiFive-CLIC-preemptible-stack-swap" {
; RV32-LABEL: both_empty:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: csrr s0, mcause
; RV32-NEXT: csrr s1, mepc
; RV32-NEXT: csrsi mstatus, 8
; RV32-NEXT: csrci mstatus, 8
; RV32-NEXT: csrw mepc, s1
; RV32-NEXT: csrw mcause, s0
; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: both_empty:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: csrr s0, mcause
; RV64-NEXT: csrr s1, mepc
; RV64-NEXT: csrsi mstatus, 8
; RV64-NEXT: csrci mstatus, 8
; RV64-NEXT: csrw mepc, s1
; RV64-NEXT: csrw mcause, s0
; RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
ret void
}
declare void @callee()
define void @stack_swap_caller() "interrupt"="SiFive-CLIC-stack-swap" {
; RV32-LABEL: stack_swap_caller:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -64
; RV32-NEXT: .cfi_def_cfa_offset 64
; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset t0, -8
; RV32-NEXT: .cfi_offset t1, -12
; RV32-NEXT: .cfi_offset t2, -16
; RV32-NEXT: .cfi_offset a0, -20
; RV32-NEXT: .cfi_offset a1, -24
; RV32-NEXT: .cfi_offset a2, -28
; RV32-NEXT: .cfi_offset a3, -32
; RV32-NEXT: .cfi_offset a4, -36
; RV32-NEXT: .cfi_offset a5, -40
; RV32-NEXT: .cfi_offset a6, -44
; RV32-NEXT: .cfi_offset a7, -48
; RV32-NEXT: .cfi_offset t3, -52
; RV32-NEXT: .cfi_offset t4, -56
; RV32-NEXT: .cfi_offset t5, -60
; RV32-NEXT: .cfi_offset t6, -64
; RV32-NEXT: call callee
; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore t0
; RV32-NEXT: .cfi_restore t1
; RV32-NEXT: .cfi_restore t2
; RV32-NEXT: .cfi_restore a0
; RV32-NEXT: .cfi_restore a1
; RV32-NEXT: .cfi_restore a2
; RV32-NEXT: .cfi_restore a3
; RV32-NEXT: .cfi_restore a4
; RV32-NEXT: .cfi_restore a5
; RV32-NEXT: .cfi_restore a6
; RV32-NEXT: .cfi_restore a7
; RV32-NEXT: .cfi_restore t3
; RV32-NEXT: .cfi_restore t4
; RV32-NEXT: .cfi_restore t5
; RV32-NEXT: .cfi_restore t6
; RV32-NEXT: addi sp, sp, 64
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: stack_swap_caller:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -128
; RV64-NEXT: .cfi_def_cfa_offset 128
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset t0, -16
; RV64-NEXT: .cfi_offset t1, -24
; RV64-NEXT: .cfi_offset t2, -32
; RV64-NEXT: .cfi_offset a0, -40
; RV64-NEXT: .cfi_offset a1, -48
; RV64-NEXT: .cfi_offset a2, -56
; RV64-NEXT: .cfi_offset a3, -64
; RV64-NEXT: .cfi_offset a4, -72
; RV64-NEXT: .cfi_offset a5, -80
; RV64-NEXT: .cfi_offset a6, -88
; RV64-NEXT: .cfi_offset a7, -96
; RV64-NEXT: .cfi_offset t3, -104
; RV64-NEXT: .cfi_offset t4, -112
; RV64-NEXT: .cfi_offset t5, -120
; RV64-NEXT: .cfi_offset t6, -128
; RV64-NEXT: call callee
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore t0
; RV64-NEXT: .cfi_restore t1
; RV64-NEXT: .cfi_restore t2
; RV64-NEXT: .cfi_restore a0
; RV64-NEXT: .cfi_restore a1
; RV64-NEXT: .cfi_restore a2
; RV64-NEXT: .cfi_restore a3
; RV64-NEXT: .cfi_restore a4
; RV64-NEXT: .cfi_restore a5
; RV64-NEXT: .cfi_restore a6
; RV64-NEXT: .cfi_restore a7
; RV64-NEXT: .cfi_restore t3
; RV64-NEXT: .cfi_restore t4
; RV64-NEXT: .cfi_restore t5
; RV64-NEXT: .cfi_restore t6
; RV64-NEXT: addi sp, sp, 128
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
call void @callee()
ret void
}
define void @stack_swap_caller_fp() "interrupt"="SiFive-CLIC-stack-swap" "frame-pointer"="all" {
; RV32-LABEL: stack_swap_caller_fp:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -80
; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t0, 72(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t1, 68(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t2, 64(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a1, 52(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a2, 48(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a3, 44(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a4, 40(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a5, 36(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a6, 32(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a7, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t3, 24(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t4, 20(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t5, 16(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t6, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset t0, -8
; RV32-NEXT: .cfi_offset t1, -12
; RV32-NEXT: .cfi_offset t2, -16
; RV32-NEXT: .cfi_offset s0, -20
; RV32-NEXT: .cfi_offset a0, -24
; RV32-NEXT: .cfi_offset a1, -28
; RV32-NEXT: .cfi_offset a2, -32
; RV32-NEXT: .cfi_offset a3, -36
; RV32-NEXT: .cfi_offset a4, -40
; RV32-NEXT: .cfi_offset a5, -44
; RV32-NEXT: .cfi_offset a6, -48
; RV32-NEXT: .cfi_offset a7, -52
; RV32-NEXT: .cfi_offset t3, -56
; RV32-NEXT: .cfi_offset t4, -60
; RV32-NEXT: .cfi_offset t5, -64
; RV32-NEXT: .cfi_offset t6, -68
; RV32-NEXT: addi s0, sp, 80
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: call callee
; RV32-NEXT: .cfi_def_cfa sp, 80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t0, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t1, 68(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t2, 64(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a1, 52(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a2, 48(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a3, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a4, 40(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a5, 36(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a6, 32(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a7, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t3, 24(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t4, 20(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t5, 16(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t6, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore t0
; RV32-NEXT: .cfi_restore t1
; RV32-NEXT: .cfi_restore t2
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_restore a0
; RV32-NEXT: .cfi_restore a1
; RV32-NEXT: .cfi_restore a2
; RV32-NEXT: .cfi_restore a3
; RV32-NEXT: .cfi_restore a4
; RV32-NEXT: .cfi_restore a5
; RV32-NEXT: .cfi_restore a6
; RV32-NEXT: .cfi_restore a7
; RV32-NEXT: .cfi_restore t3
; RV32-NEXT: .cfi_restore t4
; RV32-NEXT: .cfi_restore t5
; RV32-NEXT: .cfi_restore t6
; RV32-NEXT: addi sp, sp, 80
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: stack_swap_caller_fp:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -144
; RV64-NEXT: .cfi_def_cfa_offset 144
; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t0, 128(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t1, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t2, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 104(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset t0, -16
; RV64-NEXT: .cfi_offset t1, -24
; RV64-NEXT: .cfi_offset t2, -32
; RV64-NEXT: .cfi_offset s0, -40
; RV64-NEXT: .cfi_offset a0, -48
; RV64-NEXT: .cfi_offset a1, -56
; RV64-NEXT: .cfi_offset a2, -64
; RV64-NEXT: .cfi_offset a3, -72
; RV64-NEXT: .cfi_offset a4, -80
; RV64-NEXT: .cfi_offset a5, -88
; RV64-NEXT: .cfi_offset a6, -96
; RV64-NEXT: .cfi_offset a7, -104
; RV64-NEXT: .cfi_offset t3, -112
; RV64-NEXT: .cfi_offset t4, -120
; RV64-NEXT: .cfi_offset t5, -128
; RV64-NEXT: .cfi_offset t6, -136
; RV64-NEXT: addi s0, sp, 144
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: call callee
; RV64-NEXT: .cfi_def_cfa sp, 144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t0, 128(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t1, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t2, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 104(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore t0
; RV64-NEXT: .cfi_restore t1
; RV64-NEXT: .cfi_restore t2
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: .cfi_restore a0
; RV64-NEXT: .cfi_restore a1
; RV64-NEXT: .cfi_restore a2
; RV64-NEXT: .cfi_restore a3
; RV64-NEXT: .cfi_restore a4
; RV64-NEXT: .cfi_restore a5
; RV64-NEXT: .cfi_restore a6
; RV64-NEXT: .cfi_restore a7
; RV64-NEXT: .cfi_restore t3
; RV64-NEXT: .cfi_restore t4
; RV64-NEXT: .cfi_restore t5
; RV64-NEXT: .cfi_restore t6
; RV64-NEXT: addi sp, sp, 144
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
call void @callee()
ret void
}
define void @preeemptible_caller() "interrupt"="SiFive-CLIC-preemptible" {
; RV32-LABEL: preeemptible_caller:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -80
; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw s0, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 72(sp) # 4-byte Folded Spill
; RV32-NEXT: csrr s0, mcause
; RV32-NEXT: csrr s1, mepc
; RV32-NEXT: csrsi mstatus, 8
; RV32-NEXT: sw ra, 68(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t0, 64(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t1, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t2, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a1, 48(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a2, 44(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a3, 40(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a4, 36(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a5, 32(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a6, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a7, 24(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t3, 20(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t4, 16(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t5, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t6, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -12
; RV32-NEXT: .cfi_offset t0, -16
; RV32-NEXT: .cfi_offset t1, -20
; RV32-NEXT: .cfi_offset t2, -24
; RV32-NEXT: .cfi_offset a0, -28
; RV32-NEXT: .cfi_offset a1, -32
; RV32-NEXT: .cfi_offset a2, -36
; RV32-NEXT: .cfi_offset a3, -40
; RV32-NEXT: .cfi_offset a4, -44
; RV32-NEXT: .cfi_offset a5, -48
; RV32-NEXT: .cfi_offset a6, -52
; RV32-NEXT: .cfi_offset a7, -56
; RV32-NEXT: .cfi_offset t3, -60
; RV32-NEXT: .cfi_offset t4, -64
; RV32-NEXT: .cfi_offset t5, -68
; RV32-NEXT: .cfi_offset t6, -72
; RV32-NEXT: call callee
; RV32-NEXT: lw ra, 68(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t0, 64(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t1, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t2, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a1, 48(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a2, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a3, 40(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a4, 36(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a5, 32(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a6, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a7, 24(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t3, 20(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t4, 16(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t5, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t6, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore t0
; RV32-NEXT: .cfi_restore t1
; RV32-NEXT: .cfi_restore t2
; RV32-NEXT: .cfi_restore a0
; RV32-NEXT: .cfi_restore a1
; RV32-NEXT: .cfi_restore a2
; RV32-NEXT: .cfi_restore a3
; RV32-NEXT: .cfi_restore a4
; RV32-NEXT: .cfi_restore a5
; RV32-NEXT: .cfi_restore a6
; RV32-NEXT: .cfi_restore a7
; RV32-NEXT: .cfi_restore t3
; RV32-NEXT: .cfi_restore t4
; RV32-NEXT: .cfi_restore t5
; RV32-NEXT: .cfi_restore t6
; RV32-NEXT: csrci mstatus, 8
; RV32-NEXT: csrw mepc, s1
; RV32-NEXT: csrw mcause, s0
; RV32-NEXT: lw s1, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 80
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: mret
;
; RV64-LABEL: preeemptible_caller:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -144
; RV64-NEXT: .cfi_def_cfa_offset 144
; RV64-NEXT: sd s0, 136(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 128(sp) # 8-byte Folded Spill
; RV64-NEXT: csrr s0, mcause
; RV64-NEXT: csrr s1, mepc
; RV64-NEXT: csrsi mstatus, 8
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -24
; RV64-NEXT: .cfi_offset t0, -32
; RV64-NEXT: .cfi_offset t1, -40
; RV64-NEXT: .cfi_offset t2, -48
; RV64-NEXT: .cfi_offset a0, -56
; RV64-NEXT: .cfi_offset a1, -64
; RV64-NEXT: .cfi_offset a2, -72
; RV64-NEXT: .cfi_offset a3, -80
; RV64-NEXT: .cfi_offset a4, -88
; RV64-NEXT: .cfi_offset a5, -96
; RV64-NEXT: .cfi_offset a6, -104
; RV64-NEXT: .cfi_offset a7, -112
; RV64-NEXT: .cfi_offset t3, -120
; RV64-NEXT: .cfi_offset t4, -128
; RV64-NEXT: .cfi_offset t5, -136
; RV64-NEXT: .cfi_offset t6, -144
; RV64-NEXT: call callee
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore t0
; RV64-NEXT: .cfi_restore t1
; RV64-NEXT: .cfi_restore t2
; RV64-NEXT: .cfi_restore a0
; RV64-NEXT: .cfi_restore a1
; RV64-NEXT: .cfi_restore a2
; RV64-NEXT: .cfi_restore a3
; RV64-NEXT: .cfi_restore a4
; RV64-NEXT: .cfi_restore a5
; RV64-NEXT: .cfi_restore a6
; RV64-NEXT: .cfi_restore a7
; RV64-NEXT: .cfi_restore t3
; RV64-NEXT: .cfi_restore t4
; RV64-NEXT: .cfi_restore t5
; RV64-NEXT: .cfi_restore t6
; RV64-NEXT: csrci mstatus, 8
; RV64-NEXT: csrw mepc, s1
; RV64-NEXT: csrw mcause, s0
; RV64-NEXT: ld s1, 128(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: mret
call void @callee()
ret void
}
define void @both_caller() "interrupt"="SiFive-CLIC-preemptible-stack-swap" {
; RV32-LABEL: both_caller:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -80
; RV32-NEXT: .cfi_def_cfa_offset 80
; RV32-NEXT: sw s0, 76(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 72(sp) # 4-byte Folded Spill
; RV32-NEXT: csrr s0, mcause
; RV32-NEXT: csrr s1, mepc
; RV32-NEXT: csrsi mstatus, 8
; RV32-NEXT: sw ra, 68(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t0, 64(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t1, 60(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t2, 56(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a1, 48(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a2, 44(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a3, 40(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a4, 36(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a5, 32(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a6, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: sw a7, 24(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t3, 20(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t4, 16(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t5, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw t6, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -12
; RV32-NEXT: .cfi_offset t0, -16
; RV32-NEXT: .cfi_offset t1, -20
; RV32-NEXT: .cfi_offset t2, -24
; RV32-NEXT: .cfi_offset a0, -28
; RV32-NEXT: .cfi_offset a1, -32
; RV32-NEXT: .cfi_offset a2, -36
; RV32-NEXT: .cfi_offset a3, -40
; RV32-NEXT: .cfi_offset a4, -44
; RV32-NEXT: .cfi_offset a5, -48
; RV32-NEXT: .cfi_offset a6, -52
; RV32-NEXT: .cfi_offset a7, -56
; RV32-NEXT: .cfi_offset t3, -60
; RV32-NEXT: .cfi_offset t4, -64
; RV32-NEXT: .cfi_offset t5, -68
; RV32-NEXT: .cfi_offset t6, -72
; RV32-NEXT: call callee
; RV32-NEXT: lw ra, 68(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t0, 64(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t1, 60(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t2, 56(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a0, 52(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a1, 48(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a2, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a3, 40(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a4, 36(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a5, 32(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a6, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw a7, 24(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t3, 20(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t4, 16(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t5, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw t6, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore t0
; RV32-NEXT: .cfi_restore t1
; RV32-NEXT: .cfi_restore t2
; RV32-NEXT: .cfi_restore a0
; RV32-NEXT: .cfi_restore a1
; RV32-NEXT: .cfi_restore a2
; RV32-NEXT: .cfi_restore a3
; RV32-NEXT: .cfi_restore a4
; RV32-NEXT: .cfi_restore a5
; RV32-NEXT: .cfi_restore a6
; RV32-NEXT: .cfi_restore a7
; RV32-NEXT: .cfi_restore t3
; RV32-NEXT: .cfi_restore t4
; RV32-NEXT: .cfi_restore t5
; RV32-NEXT: .cfi_restore t6
; RV32-NEXT: csrci mstatus, 8
; RV32-NEXT: csrw mepc, s1
; RV32-NEXT: csrw mcause, s0
; RV32-NEXT: lw s1, 72(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 76(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 80
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: both_caller:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -144
; RV64-NEXT: .cfi_def_cfa_offset 144
; RV64-NEXT: sd s0, 136(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 128(sp) # 8-byte Folded Spill
; RV64-NEXT: csrr s0, mcause
; RV64-NEXT: csrr s1, mepc
; RV64-NEXT: csrsi mstatus, 8
; RV64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
; RV64-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -24
; RV64-NEXT: .cfi_offset t0, -32
; RV64-NEXT: .cfi_offset t1, -40
; RV64-NEXT: .cfi_offset t2, -48
; RV64-NEXT: .cfi_offset a0, -56
; RV64-NEXT: .cfi_offset a1, -64
; RV64-NEXT: .cfi_offset a2, -72
; RV64-NEXT: .cfi_offset a3, -80
; RV64-NEXT: .cfi_offset a4, -88
; RV64-NEXT: .cfi_offset a5, -96
; RV64-NEXT: .cfi_offset a6, -104
; RV64-NEXT: .cfi_offset a7, -112
; RV64-NEXT: .cfi_offset t3, -120
; RV64-NEXT: .cfi_offset t4, -128
; RV64-NEXT: .cfi_offset t5, -136
; RV64-NEXT: .cfi_offset t6, -144
; RV64-NEXT: call callee
; RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
; RV64-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore t0
; RV64-NEXT: .cfi_restore t1
; RV64-NEXT: .cfi_restore t2
; RV64-NEXT: .cfi_restore a0
; RV64-NEXT: .cfi_restore a1
; RV64-NEXT: .cfi_restore a2
; RV64-NEXT: .cfi_restore a3
; RV64-NEXT: .cfi_restore a4
; RV64-NEXT: .cfi_restore a5
; RV64-NEXT: .cfi_restore a6
; RV64-NEXT: .cfi_restore a7
; RV64-NEXT: .cfi_restore t3
; RV64-NEXT: .cfi_restore t4
; RV64-NEXT: .cfi_restore t5
; RV64-NEXT: .cfi_restore t6
; RV64-NEXT: csrci mstatus, 8
; RV64-NEXT: csrw mepc, s1
; RV64-NEXT: csrw mcause, s0
; RV64-NEXT: ld s1, 128(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
call void @callee()
ret void
}
define void @stack_swap_clobber() "interrupt"="SiFive-CLIC-stack-swap" {
; RV32-LABEL: stack_swap_clobber:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset s0, -4
; RV32-NEXT: .cfi_offset s1, -8
; RV32-NEXT: #APP
; RV32-NEXT: #NO_APP
; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_restore s1
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: stack_swap_clobber:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset s0, -8
; RV64-NEXT: .cfi_offset s1, -16
; RV64-NEXT: #APP
; RV64-NEXT: #NO_APP
; RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: .cfi_restore s1
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
call void asm sideeffect "", "~{x8},~{x9}"() #4
ret void
}
define void @stack_swap_clobber_fp() "interrupt"="SiFive-CLIC-stack-swap" "frame-pointer"="all" {
; RV32-LABEL: stack_swap_clobber_fp:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: .cfi_offset s1, -12
; RV32-NEXT: addi s0, sp, 16
; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: #APP
; RV32-NEXT: #NO_APP
; RV32-NEXT: .cfi_def_cfa sp, 16
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore ra
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_restore s1
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: stack_swap_clobber_fp:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -32
; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: .cfi_offset s1, -24
; RV64-NEXT: addi s0, sp, 32
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: #APP
; RV64-NEXT: #NO_APP
; RV64-NEXT: .cfi_def_cfa sp, 32
; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore ra
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: .cfi_restore s1
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
call void asm sideeffect "", "~{x8},~{x9}"() #4
ret void
}
define void @preemptible_clobber() "interrupt"="SiFive-CLIC-preemptible" {
; RV32-LABEL: preemptible_clobber:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: csrr s0, mcause
; RV32-NEXT: csrr s1, mepc
; RV32-NEXT: csrsi mstatus, 8
; RV32-NEXT: sw s0, 4(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 0(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset s0, -12
; RV32-NEXT: .cfi_offset s1, -16
; RV32-NEXT: #APP
; RV32-NEXT: #NO_APP
; RV32-NEXT: lw s0, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 0(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_restore s1
; RV32-NEXT: csrci mstatus, 8
; RV32-NEXT: csrw mepc, s1
; RV32-NEXT: csrw mcause, s0
; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: mret
;
; RV64-LABEL: preemptible_clobber:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -32
; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: sd s0, 24(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: csrr s0, mcause
; RV64-NEXT: csrr s1, mepc
; RV64-NEXT: csrsi mstatus, 8
; RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset s0, -24
; RV64-NEXT: .cfi_offset s1, -32
; RV64-NEXT: #APP
; RV64-NEXT: #NO_APP
; RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: .cfi_restore s1
; RV64-NEXT: csrci mstatus, 8
; RV64-NEXT: csrw mepc, s1
; RV64-NEXT: csrw mcause, s0
; RV64-NEXT: ld s1, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: mret
call void asm sideeffect "", "~{x8},~{x9}"() #4
ret void
}
define void @both_clobber() "interrupt"="SiFive-CLIC-preemptible-stack-swap" {
; RV32-LABEL: both_clobber:
; RV32: # %bb.0:
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: csrr s0, mcause
; RV32-NEXT: csrr s1, mepc
; RV32-NEXT: csrsi mstatus, 8
; RV32-NEXT: sw s0, 4(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s1, 0(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset s0, -12
; RV32-NEXT: .cfi_offset s1, -16
; RV32-NEXT: #APP
; RV32-NEXT: #NO_APP
; RV32-NEXT: lw s0, 4(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 0(sp) # 4-byte Folded Reload
; RV32-NEXT: .cfi_restore s0
; RV32-NEXT: .cfi_restore s1
; RV32-NEXT: csrci mstatus, 8
; RV32-NEXT: csrw mepc, s1
; RV32-NEXT: csrw mcause, s0
; RV32-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV32-NEXT: mret
;
; RV64-LABEL: both_clobber:
; RV64: # %bb.0:
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: addi sp, sp, -32
; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: sd s0, 24(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 16(sp) # 8-byte Folded Spill
; RV64-NEXT: csrr s0, mcause
; RV64-NEXT: csrr s1, mepc
; RV64-NEXT: csrsi mstatus, 8
; RV64-NEXT: sd s0, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s1, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset s0, -24
; RV64-NEXT: .cfi_offset s1, -32
; RV64-NEXT: #APP
; RV64-NEXT: #NO_APP
; RV64-NEXT: ld s0, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s1, 0(sp) # 8-byte Folded Reload
; RV64-NEXT: .cfi_restore s0
; RV64-NEXT: .cfi_restore s1
; RV64-NEXT: csrci mstatus, 8
; RV64-NEXT: csrw mepc, s1
; RV64-NEXT: csrw mcause, s0
; RV64-NEXT: ld s1, 16(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: csrrw sp, sf.mscratchcsw, sp
; RV64-NEXT: mret
call void asm sideeffect "", "~{x8},~{x9}"() #4
ret void
}