
This patch reworks how VG is handled around streaming mode changes. Previously, for functions with streaming mode changes, we would: - Save the incoming VG in the prologue - Emit `.cfi_offset vg, <offset>` and `.cfi_restore vg` around streaming mode changes Additionally, for locally streaming functions, we would: - Also save the streaming VG in the prologue - Emit `.cfi_offset vg, <incoming VG offset>` in the prologue - Emit `.cfi_offset vg, <streaming VG offset>` and `.cfi_restore vg` around streaming mode changes In both cases, this ends up doing more than necessary and would be hard for an unwinder to parse, as using `.cfi_offset` in this way does not follow the semantics of the underlying DWARF CFI opcodes. So the new scheme in this patch is to: In functions with streaming mode changes (inc locally streaming) - Save the incoming VG in the prologue - Emit `.cfi_offset vg, <offset>` in the prologue (not at streaming mode changes) - Emit `.cfi_restore vg` after the saved VG has been deallocated - This will be in the function epilogue, where VG is always the same as the entry VG - Explicitly reference the incoming VG expressions for SVE callee-saves in functions with streaming mode changes - Ensure the CFA is not described in terms of VG in functions with streaming mode changes A more in-depth discussion of this scheme is available in: https://gist.github.com/MacDue/b7a5c45d131d2440858165bfc903e97b But the TLDR is that following this scheme, SME unwinding can be implemented with minimal changes to existing unwinders. All unwinders need to do is initialize VG to `CNTD` at the start of unwinding, then everything else is handled by standard opcodes (which don't need changes to handle VG).
680 lines
36 KiB
LLVM
680 lines
36 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-streaming-hazard-size=0 | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK
|
|
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-streaming-hazard-size=0 -aarch64-new-sme-abi | FileCheck %s --check-prefixes=CHECK-COMMON,CHECK-NEWLOWERING
|
|
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve2 -aarch64-streaming-hazard-size=0 -pass-remarks-analysis=stack-frame-layout 2>&1 >/dev/null | FileCheck %s --check-prefixes=CHECK-FRAMELAYOUT
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-16 x vscale], Type: Variable, Align: 4, Size: 4
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 8, Size: 8
|
|
|
|
define i32 @csr_d8_allocnxv4i32i32f64(double %d) "aarch64_pstate_sm_compatible" {
|
|
; CHECK-COMMON-LABEL: csr_d8_allocnxv4i32i32f64:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str x29, [sp, #8] // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: sub sp, sp, #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset b8, -16
|
|
; CHECK-COMMON-NEXT: mov z1.s, #0 // =0x0
|
|
; CHECK-COMMON-NEXT: add x8, sp, #16
|
|
; CHECK-COMMON-NEXT: mov w0, wzr
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: str wzr, [sp, #12]
|
|
; CHECK-COMMON-NEXT: str d0, [sp]
|
|
; CHECK-COMMON-NEXT: str z1, [x8]
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #1
|
|
; CHECK-COMMON-NEXT: add sp, sp, #16
|
|
; CHECK-COMMON-NEXT: ldr x29, [sp, #8] // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ret
|
|
; CHECK-COMMON-NE
|
|
entry:
|
|
%a = alloca <vscale x 4 x i32>
|
|
%b = alloca i32
|
|
%c = alloca double
|
|
tail call void asm sideeffect "", "~{d8}"() #1
|
|
store <vscale x 4 x i32> zeroinitializer, ptr %a
|
|
store i32 zeroinitializer, ptr %b
|
|
store double %d, ptr %c
|
|
ret i32 0
|
|
}
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_fp
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20], Type: Variable, Align: 4, Size: 4
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 16, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40-16 x vscale], Type: Variable, Align: 8, Size: 8
|
|
|
|
define i32 @csr_d8_allocnxv4i32i32f64_fp(double %d) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
|
|
; CHECK-COMMON-LABEL: csr_d8_allocnxv4i32i32f64_fp:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: str d8, [sp, #-32]! // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: add x29, sp, #16
|
|
; CHECK-COMMON-NEXT: sub sp, sp, #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa w29, 16
|
|
; CHECK-COMMON-NEXT: .cfi_offset w30, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -16
|
|
; CHECK-COMMON-NEXT: .cfi_offset b8, -32
|
|
; CHECK-COMMON-NEXT: mov z1.s, #0 // =0x0
|
|
; CHECK-COMMON-NEXT: addvl x8, sp, #1
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: str wzr, [x8, #28]
|
|
; CHECK-COMMON-NEXT: sub x8, x29, #16
|
|
; CHECK-COMMON-NEXT: mov w0, wzr
|
|
; CHECK-COMMON-NEXT: str d0, [sp, #8]
|
|
; CHECK-COMMON-NEXT: str z1, [x8, #-1, mul vl]
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #1
|
|
; CHECK-COMMON-NEXT: add sp, sp, #16
|
|
; CHECK-COMMON-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr d8, [sp], #32 // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ret
|
|
entry:
|
|
%a = alloca <vscale x 4 x i32>
|
|
%b = alloca i32
|
|
%c = alloca double
|
|
tail call void asm sideeffect "", "~{d8}"() #1
|
|
store <vscale x 4 x i32> zeroinitializer, ptr %a
|
|
store i32 zeroinitializer, ptr %b
|
|
store double %d, ptr %c
|
|
ret i32 0
|
|
}
|
|
|
|
; In the presence of dynamic stack-realignment we emit correct offsets for
|
|
; objects which are not realigned. For realigned objects, e.g. the i32 alloca
|
|
; in this test, we emit the correct offset ignoring the re-alignment (i.e. the
|
|
; offset if the alignment requirement is already satisfied).
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_dynamicrealign
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Variable, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 16, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-128-16 x vscale], Type: Variable, Align: 128, Size: 4
|
|
|
|
define i32 @csr_d8_allocnxv4i32i32f64_dynamicrealign(double %d) "aarch64_pstate_sm_compatible" {
|
|
; CHECK-COMMON-LABEL: csr_d8_allocnxv4i32i32f64_dynamicrealign:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: str d8, [sp, #-32]! // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: sub x9, sp, #96
|
|
; CHECK-COMMON-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: add x29, sp, #16
|
|
; CHECK-COMMON-NEXT: addvl x9, x9, #-1
|
|
; CHECK-COMMON-NEXT: and sp, x9, #0xffffffffffffff80
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa w29, 16
|
|
; CHECK-COMMON-NEXT: .cfi_offset w30, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -16
|
|
; CHECK-COMMON-NEXT: .cfi_offset b8, -32
|
|
; CHECK-COMMON-NEXT: mov z1.s, #0 // =0x0
|
|
; CHECK-COMMON-NEXT: sub x8, x29, #16
|
|
; CHECK-COMMON-NEXT: mov w0, wzr
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: str wzr, [sp]
|
|
; CHECK-COMMON-NEXT: stur d0, [x29, #-8]
|
|
; CHECK-COMMON-NEXT: str z1, [x8, #-1, mul vl]
|
|
; CHECK-COMMON-NEXT: sub sp, x29, #16
|
|
; CHECK-COMMON-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr d8, [sp], #32 // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ret
|
|
entry:
|
|
%a = alloca <vscale x 4 x i32>
|
|
%b = alloca i32, align 128
|
|
%c = alloca double
|
|
tail call void asm sideeffect "", "~{d8}"() #1
|
|
store <vscale x 4 x i32> zeroinitializer, ptr %a
|
|
store i32 zeroinitializer, ptr %b
|
|
store double %d, ptr %c
|
|
ret i32 0
|
|
}
|
|
|
|
; In the presence of VLA-area objects, we emit correct offsets for all objects
|
|
; except for these VLA objects.
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_vla
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40-16 x vscale], Type: Variable, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-16 x vscale], Type: VariableSized, Align: 1, Size: 0
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48-16 x vscale], Type: VariableSized, Align: 1, Size: 0
|
|
|
|
define i32 @csr_d8_allocnxv4i32i32f64_vla(double %d, i32 %i) "aarch64_pstate_sm_compatible" {
|
|
; CHECK-COMMON-LABEL: csr_d8_allocnxv4i32i32f64_vla:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: str d8, [sp, #-32]! // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: stp x29, x30, [sp, #8] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: add x29, sp, #8
|
|
; CHECK-COMMON-NEXT: str x19, [sp, #24] // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: sub sp, sp, #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: mov x19, sp
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa w29, 24
|
|
; CHECK-COMMON-NEXT: .cfi_offset w19, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset w30, -16
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -24
|
|
; CHECK-COMMON-NEXT: .cfi_offset b8, -32
|
|
; CHECK-COMMON-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-COMMON-NEXT: ubfiz x8, x0, #2, #32
|
|
; CHECK-COMMON-NEXT: mov x9, sp
|
|
; CHECK-COMMON-NEXT: add x8, x8, #15
|
|
; CHECK-COMMON-NEXT: and x8, x8, #0x7fffffff0
|
|
; CHECK-COMMON-NEXT: sub x9, x9, x8
|
|
; CHECK-COMMON-NEXT: mov sp, x9
|
|
; CHECK-COMMON-NEXT: mov x10, sp
|
|
; CHECK-COMMON-NEXT: sub x8, x10, x8
|
|
; CHECK-COMMON-NEXT: mov sp, x8
|
|
; CHECK-COMMON-NEXT: mov z1.s, #0 // =0x0
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: str wzr, [x8]
|
|
; CHECK-COMMON-NEXT: sub x8, x29, #8
|
|
; CHECK-COMMON-NEXT: mov w0, wzr
|
|
; CHECK-COMMON-NEXT: str wzr, [x9]
|
|
; CHECK-COMMON-NEXT: str d0, [x19, #8]
|
|
; CHECK-COMMON-NEXT: str z1, [x8, #-1, mul vl]
|
|
; CHECK-COMMON-NEXT: sub sp, x29, #8
|
|
; CHECK-COMMON-NEXT: ldp x29, x30, [sp, #8] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr x19, [sp, #24] // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr d8, [sp], #32 // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ret
|
|
entry:
|
|
%a = alloca <vscale x 4 x i32>
|
|
%0 = zext i32 %i to i64
|
|
%vla0 = alloca i32, i64 %0
|
|
%vla1 = alloca i32, i64 %0
|
|
%c = alloca double
|
|
tail call void asm sideeffect "", "~{d8}"() #1
|
|
store <vscale x 4 x i32> zeroinitializer, ptr %a
|
|
store i32 zeroinitializer, ptr %vla0
|
|
store i32 zeroinitializer, ptr %vla1
|
|
store double %d, ptr %c
|
|
ret i32 0
|
|
}
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: csr_d8_allocnxv4i32i32f64_stackargsi32f64
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP+8], Type: Fixed, Align: 8, Size: 4
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP+0], Type: Fixed, Align: 16, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Variable, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-16 x vscale], Type: Variable, Align: 4, Size: 4
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-16 x vscale], Type: Variable, Align: 8, Size: 8
|
|
|
|
define i32 @csr_d8_allocnxv4i32i32f64_stackargsi32f64(double %d0, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8) "aarch64_pstate_sm_compatible" {
|
|
; CHECK-COMMON-LABEL: csr_d8_allocnxv4i32i32f64_stackargsi32f64:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str x29, [sp, #8] // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: sub sp, sp, #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x20, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 32 + 8 * VG
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset b8, -16
|
|
; CHECK-COMMON-NEXT: mov z1.s, #0 // =0x0
|
|
; CHECK-COMMON-NEXT: add x8, sp, #16
|
|
; CHECK-COMMON-NEXT: mov w0, wzr
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: str wzr, [sp, #12]
|
|
; CHECK-COMMON-NEXT: str d0, [sp]
|
|
; CHECK-COMMON-NEXT: str z1, [x8]
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #1
|
|
; CHECK-COMMON-NEXT: add sp, sp, #16
|
|
; CHECK-COMMON-NEXT: ldr x29, [sp, #8] // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ret
|
|
entry:
|
|
%a = alloca <vscale x 4 x i32>
|
|
%b = alloca i32
|
|
%c = alloca double
|
|
tail call void asm sideeffect "", "~{d8}"() #1
|
|
store <vscale x 4 x i32> zeroinitializer, ptr %a
|
|
store i32 zeroinitializer, ptr %b
|
|
store double %d0, ptr %c
|
|
ret i32 0
|
|
}
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: svecc_z8_allocnxv4i32i32f64_fp
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-32 x vscale], Type: Variable, Align: 4, Size: 4
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-32 x vscale], Type: Variable, Align: 8, Size: 8
|
|
|
|
define i32 @svecc_z8_allocnxv4i32i32f64_fp(double %d, <vscale x 4 x i32> %v) "aarch64_pstate_sm_compatible" "frame-pointer"="all" {
|
|
; CHECK-COMMON-LABEL: svecc_z8_allocnxv4i32i32f64_fp:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: mov x29, sp
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: str z8, [sp] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: sub sp, sp, #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa w29, 16
|
|
; CHECK-COMMON-NEXT: .cfi_offset w30, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -16
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
|
|
; CHECK-COMMON-NEXT: mov w0, wzr
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: str wzr, [sp, #12]
|
|
; CHECK-COMMON-NEXT: str z1, [x29, #-2, mul vl]
|
|
; CHECK-COMMON-NEXT: str d0, [sp], #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #1
|
|
; CHECK-COMMON-NEXT: ldr z8, [sp] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #1
|
|
; CHECK-COMMON-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ret
|
|
entry:
|
|
%a = alloca <vscale x 4 x i32>
|
|
%b = alloca i32
|
|
%c = alloca double
|
|
tail call void asm sideeffect "", "~{d8}"() #1
|
|
store <vscale x 4 x i32> %v, ptr %a
|
|
store i32 zeroinitializer, ptr %b
|
|
store double %d, ptr %c
|
|
ret i32 0
|
|
}
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: svecc_z8_allocnxv4i32i32f64_stackargsi32_fp
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP+0], Type: Fixed, Align: 16, Size: 4
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-16 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16-32 x vscale], Type: Variable, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-20-32 x vscale], Type: Variable, Align: 4, Size: 4
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32-32 x vscale], Type: Variable, Align: 8, Size: 8
|
|
|
|
define i32 @svecc_z8_allocnxv4i32i32f64_stackargsi32_fp(double %d, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, <vscale x 4 x i32> %v) "aarch64_pstate_sm_compatible" "frame-pointer"="all"{
|
|
; CHECK-COMMON-LABEL: svecc_z8_allocnxv4i32i32f64_stackargsi32_fp:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: mov x29, sp
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: str z8, [sp] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: sub sp, sp, #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-1
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa w29, 16
|
|
; CHECK-COMMON-NEXT: .cfi_offset w30, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -16
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x48, 0x09, 0x92, 0x2e, 0x00, 0x11, 0x78, 0x1e, 0x22, 0x40, 0x1c // $d8 @ cfa - 8 * VG - 16
|
|
; CHECK-COMMON-NEXT: mov w0, wzr
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: str wzr, [sp, #12]
|
|
; CHECK-COMMON-NEXT: str z1, [x29, #-2, mul vl]
|
|
; CHECK-COMMON-NEXT: str d0, [sp], #16
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #1
|
|
; CHECK-COMMON-NEXT: ldr z8, [sp] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #1
|
|
; CHECK-COMMON-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ret
|
|
entry:
|
|
%a = alloca <vscale x 4 x i32>
|
|
%b = alloca i32
|
|
%c = alloca double
|
|
tail call void asm sideeffect "", "~{d8}"() #1
|
|
store <vscale x 4 x i32> %v, ptr %a
|
|
store i32 zeroinitializer, ptr %b
|
|
store double %d, ptr %c
|
|
ret i32 0
|
|
}
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: svecc_call
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-24], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 16, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-56], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-16 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-32 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-48 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-64 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-80 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-96 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-112 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-128 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-144 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-160 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-176 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-192 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-208 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-224 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-240 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-256 x vscale], Type: Spill, Align: 16, Size: vscale x 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-258 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-260 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-262 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-264 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-266 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-268 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-270 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-272 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-274 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-276 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-278 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64-280 x vscale], Type: Spill, Align: 2, Size: vscale x 2
|
|
|
|
define i32 @svecc_call(<4 x i16> %P0, ptr %P1, i32 %P2, <vscale x 16 x i8> %P3, i16 %P4) "aarch64_pstate_sm_compatible" {
|
|
; CHECK-COMMON-LABEL: svecc_call:
|
|
; CHECK-COMMON: // %bb.0: // %entry
|
|
; CHECK-COMMON-NEXT: stp x29, x30, [sp, #-64]! // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa_offset 64
|
|
; CHECK-COMMON-NEXT: cntd x9
|
|
; CHECK-COMMON-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str x9, [sp, #16] // 8-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: stp x26, x19, [sp, #48] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: mov x29, sp
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa w29, 64
|
|
; CHECK-COMMON-NEXT: .cfi_offset w19, -8
|
|
; CHECK-COMMON-NEXT: .cfi_offset w26, -16
|
|
; CHECK-COMMON-NEXT: .cfi_offset w27, -24
|
|
; CHECK-COMMON-NEXT: .cfi_offset w28, -32
|
|
; CHECK-COMMON-NEXT: .cfi_offset vg, -48
|
|
; CHECK-COMMON-NEXT: .cfi_offset w30, -56
|
|
; CHECK-COMMON-NEXT: .cfi_offset w29, -64
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #-18
|
|
; CHECK-COMMON-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p9, [sp, #10, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p8, [sp, #11, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p7, [sp, #12, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p6, [sp, #13, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p5, [sp, #14, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str p4, [sp, #15, mul vl] // 2-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z23, [sp, #2, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z22, [sp, #3, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z21, [sp, #4, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z20, [sp, #5, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z19, [sp, #6, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z18, [sp, #7, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z17, [sp, #8, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z16, [sp, #9, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z15, [sp, #10, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z14, [sp, #11, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z13, [sp, #12, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z12, [sp, #13, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z11, [sp, #14, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x48, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x78, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d8 @ cfa - 8 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x49, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x70, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d9 @ cfa - 16 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x4a, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x68, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d10 @ cfa - 24 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x4b, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x60, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d11 @ cfa - 32 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x4c, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x58, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d12 @ cfa - 40 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x4d, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x50, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d13 @ cfa - 48 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x4e, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x48, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d14 @ cfa - 56 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: .cfi_escape 0x10, 0x4f, 0x0c, 0x12, 0x11, 0x50, 0x22, 0x06, 0x11, 0x40, 0x1e, 0x22, 0x11, 0x40, 0x22 // $d15 @ cfa - 64 * IncomingVG - 64
|
|
; CHECK-COMMON-NEXT: mov x8, x0
|
|
; CHECK-COMMON-NEXT: bl __arm_sme_state
|
|
; CHECK-COMMON-NEXT: mov x19, x0
|
|
; CHECK-COMMON-NEXT: //APP
|
|
; CHECK-COMMON-NEXT: //NO_APP
|
|
; CHECK-COMMON-NEXT: tbz w19, #0, .LBB7_2
|
|
; CHECK-COMMON-NEXT: // %bb.1: // %entry
|
|
; CHECK-COMMON-NEXT: smstop sm
|
|
; CHECK-COMMON-NEXT: .LBB7_2: // %entry
|
|
; CHECK-COMMON-NEXT: mov x0, x8
|
|
; CHECK-COMMON-NEXT: mov w1, #45 // =0x2d
|
|
; CHECK-COMMON-NEXT: mov w2, #37 // =0x25
|
|
; CHECK-COMMON-NEXT: bl memset
|
|
; CHECK-COMMON-NEXT: tbz w19, #0, .LBB7_4
|
|
; CHECK-COMMON-NEXT: // %bb.3: // %entry
|
|
; CHECK-COMMON-NEXT: smstart sm
|
|
; CHECK-COMMON-NEXT: .LBB7_4: // %entry
|
|
; CHECK-COMMON-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: mov w0, #22647 // =0x5877
|
|
; CHECK-COMMON-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: movk w0, #59491, lsl #16
|
|
; CHECK-COMMON-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: addvl sp, sp, #18
|
|
; CHECK-COMMON-NEXT: .cfi_restore z8
|
|
; CHECK-COMMON-NEXT: .cfi_restore z9
|
|
; CHECK-COMMON-NEXT: .cfi_restore z10
|
|
; CHECK-COMMON-NEXT: .cfi_restore z11
|
|
; CHECK-COMMON-NEXT: .cfi_restore z12
|
|
; CHECK-COMMON-NEXT: .cfi_restore z13
|
|
; CHECK-COMMON-NEXT: .cfi_restore z14
|
|
; CHECK-COMMON-NEXT: .cfi_restore z15
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa wsp, 64
|
|
; CHECK-COMMON-NEXT: ldp x26, x19, [sp, #48] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: ldp x29, x30, [sp], #64 // 16-byte Folded Reload
|
|
; CHECK-COMMON-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-COMMON-NEXT: .cfi_restore w19
|
|
; CHECK-COMMON-NEXT: .cfi_restore w26
|
|
; CHECK-COMMON-NEXT: .cfi_restore w27
|
|
; CHECK-COMMON-NEXT: .cfi_restore w28
|
|
; CHECK-COMMON-NEXT: .cfi_restore vg
|
|
; CHECK-COMMON-NEXT: .cfi_restore w30
|
|
; CHECK-COMMON-NEXT: .cfi_restore w29
|
|
; CHECK-COMMON-NEXT: ret
|
|
entry:
|
|
tail call void asm sideeffect "", "~{x0},~{x28},~{x27},~{x3}"() #2
|
|
%call = call ptr @memset(ptr noundef nonnull %P1, i32 noundef 45, i32 noundef 37)
|
|
ret i32 -396142473
|
|
}
|
|
declare ptr @memset(ptr, i32, i32)
|
|
|
|
; The VA register currently ends up in VLA space - in the presence of VLA-area
|
|
; objects, we emit correct offsets for all objects except for these VLA objects.
|
|
|
|
; CHECK-FRAMELAYOUT-LABEL: Function: vastate
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-8], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-16], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-32], Type: Spill, Align: 16, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-40], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-48], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-56], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-64], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-72], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-80], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-88], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-96], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-104], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-112], Type: Spill, Align: 8, Size: 8
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-128], Type: Variable, Align: 16, Size: 16
|
|
; CHECK-FRAMELAYOUT-NEXT: Offset: [SP-128], Type: VariableSized, Align: 16, Size: 0
|
|
|
|
define i32 @vastate(i32 %x) "aarch64_inout_za" "aarch64_pstate_sm_enabled" "target-features"="+sme" {
|
|
; CHECK-LABEL: vastate:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: stp d15, d14, [sp, #-112]! // 16-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 112
|
|
; CHECK-NEXT: cntd x9
|
|
; CHECK-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
|
|
; CHECK-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
|
|
; CHECK-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
|
|
; CHECK-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
|
|
; CHECK-NEXT: str x9, [sp, #80] // 8-byte Folded Spill
|
|
; CHECK-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
|
|
; CHECK-NEXT: add x29, sp, #64
|
|
; CHECK-NEXT: .cfi_def_cfa w29, 48
|
|
; CHECK-NEXT: .cfi_offset w19, -8
|
|
; CHECK-NEXT: .cfi_offset w20, -16
|
|
; CHECK-NEXT: .cfi_offset vg, -32
|
|
; CHECK-NEXT: .cfi_offset w30, -40
|
|
; CHECK-NEXT: .cfi_offset w29, -48
|
|
; CHECK-NEXT: .cfi_offset b8, -56
|
|
; CHECK-NEXT: .cfi_offset b9, -64
|
|
; CHECK-NEXT: .cfi_offset b10, -72
|
|
; CHECK-NEXT: .cfi_offset b11, -80
|
|
; CHECK-NEXT: .cfi_offset b12, -88
|
|
; CHECK-NEXT: .cfi_offset b13, -96
|
|
; CHECK-NEXT: .cfi_offset b14, -104
|
|
; CHECK-NEXT: .cfi_offset b15, -112
|
|
; CHECK-NEXT: sub sp, sp, #16
|
|
; CHECK-NEXT: rdsvl x8, #1
|
|
; CHECK-NEXT: mov x9, sp
|
|
; CHECK-NEXT: mov w20, w0
|
|
; CHECK-NEXT: msub x9, x8, x8, x9
|
|
; CHECK-NEXT: mov sp, x9
|
|
; CHECK-NEXT: stur x9, [x29, #-80]
|
|
; CHECK-NEXT: sub x9, x29, #80
|
|
; CHECK-NEXT: sturh wzr, [x29, #-70]
|
|
; CHECK-NEXT: stur wzr, [x29, #-68]
|
|
; CHECK-NEXT: sturh w8, [x29, #-72]
|
|
; CHECK-NEXT: msr TPIDR2_EL0, x9
|
|
; CHECK-NEXT: smstop sm
|
|
; CHECK-NEXT: bl other
|
|
; CHECK-NEXT: smstart sm
|
|
; CHECK-NEXT: smstart za
|
|
; CHECK-NEXT: mrs x8, TPIDR2_EL0
|
|
; CHECK-NEXT: sub x0, x29, #80
|
|
; CHECK-NEXT: cbnz x8, .LBB8_2
|
|
; CHECK-NEXT: // %bb.1: // %entry
|
|
; CHECK-NEXT: bl __arm_tpidr2_restore
|
|
; CHECK-NEXT: .LBB8_2: // %entry
|
|
; CHECK-NEXT: mov w0, w20
|
|
; CHECK-NEXT: msr TPIDR2_EL0, xzr
|
|
; CHECK-NEXT: sub sp, x29, #64
|
|
; CHECK-NEXT: .cfi_def_cfa wsp, 112
|
|
; CHECK-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
|
|
; CHECK-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
|
|
; CHECK-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
|
|
; CHECK-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
|
|
; CHECK-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
|
|
; CHECK-NEXT: ldp d15, d14, [sp], #112 // 16-byte Folded Reload
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-NEXT: .cfi_restore w19
|
|
; CHECK-NEXT: .cfi_restore w20
|
|
; CHECK-NEXT: .cfi_restore vg
|
|
; CHECK-NEXT: .cfi_restore w30
|
|
; CHECK-NEXT: .cfi_restore w29
|
|
; CHECK-NEXT: .cfi_restore b8
|
|
; CHECK-NEXT: .cfi_restore b9
|
|
; CHECK-NEXT: .cfi_restore b10
|
|
; CHECK-NEXT: .cfi_restore b11
|
|
; CHECK-NEXT: .cfi_restore b12
|
|
; CHECK-NEXT: .cfi_restore b13
|
|
; CHECK-NEXT: .cfi_restore b14
|
|
; CHECK-NEXT: .cfi_restore b15
|
|
; CHECK-NEXT: ret
|
|
;
|
|
; CHECK-NEWLOWERING-LABEL: vastate:
|
|
; CHECK-NEWLOWERING: // %bb.0: // %entry
|
|
; CHECK-NEWLOWERING-NEXT: stp d15, d14, [sp, #-112]! // 16-byte Folded Spill
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_def_cfa_offset 112
|
|
; CHECK-NEWLOWERING-NEXT: cntd x9
|
|
; CHECK-NEWLOWERING-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
|
|
; CHECK-NEWLOWERING-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
|
|
; CHECK-NEWLOWERING-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
|
|
; CHECK-NEWLOWERING-NEXT: stp x29, x30, [sp, #64] // 16-byte Folded Spill
|
|
; CHECK-NEWLOWERING-NEXT: str x9, [sp, #80] // 8-byte Folded Spill
|
|
; CHECK-NEWLOWERING-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
|
|
; CHECK-NEWLOWERING-NEXT: add x29, sp, #64
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_def_cfa w29, 48
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset w19, -8
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset w20, -16
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset vg, -32
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset w30, -40
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset w29, -48
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b8, -56
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b9, -64
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b10, -72
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b11, -80
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b12, -88
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b13, -96
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b14, -104
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_offset b15, -112
|
|
; CHECK-NEWLOWERING-NEXT: sub sp, sp, #16
|
|
; CHECK-NEWLOWERING-NEXT: rdsvl x8, #1
|
|
; CHECK-NEWLOWERING-NEXT: mov x9, sp
|
|
; CHECK-NEWLOWERING-NEXT: msub x9, x8, x8, x9
|
|
; CHECK-NEWLOWERING-NEXT: mov sp, x9
|
|
; CHECK-NEWLOWERING-NEXT: sub x10, x29, #80
|
|
; CHECK-NEWLOWERING-NEXT: mov w20, w0
|
|
; CHECK-NEWLOWERING-NEXT: stp x9, x8, [x29, #-80]
|
|
; CHECK-NEWLOWERING-NEXT: msr TPIDR2_EL0, x10
|
|
; CHECK-NEWLOWERING-NEXT: smstop sm
|
|
; CHECK-NEWLOWERING-NEXT: bl other
|
|
; CHECK-NEWLOWERING-NEXT: smstart sm
|
|
; CHECK-NEWLOWERING-NEXT: mov w0, w20
|
|
; CHECK-NEWLOWERING-NEXT: mov w8, w0
|
|
; CHECK-NEWLOWERING-NEXT: smstart za
|
|
; CHECK-NEWLOWERING-NEXT: mrs x9, TPIDR2_EL0
|
|
; CHECK-NEWLOWERING-NEXT: sub x0, x29, #80
|
|
; CHECK-NEWLOWERING-NEXT: cbnz x9, .LBB8_2
|
|
; CHECK-NEWLOWERING-NEXT: // %bb.1: // %entry
|
|
; CHECK-NEWLOWERING-NEXT: bl __arm_tpidr2_restore
|
|
; CHECK-NEWLOWERING-NEXT: .LBB8_2: // %entry
|
|
; CHECK-NEWLOWERING-NEXT: mov w0, w8
|
|
; CHECK-NEWLOWERING-NEXT: msr TPIDR2_EL0, xzr
|
|
; CHECK-NEWLOWERING-NEXT: sub sp, x29, #64
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_def_cfa wsp, 112
|
|
; CHECK-NEWLOWERING-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
|
|
; CHECK-NEWLOWERING-NEXT: ldp x29, x30, [sp, #64] // 16-byte Folded Reload
|
|
; CHECK-NEWLOWERING-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
|
|
; CHECK-NEWLOWERING-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
|
|
; CHECK-NEWLOWERING-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
|
|
; CHECK-NEWLOWERING-NEXT: ldp d15, d14, [sp], #112 // 16-byte Folded Reload
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore w19
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore w20
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore vg
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore w30
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore w29
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b8
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b9
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b10
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b11
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b12
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b13
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b14
|
|
; CHECK-NEWLOWERING-NEXT: .cfi_restore b15
|
|
; CHECK-NEWLOWERING-NEXT: ret
|
|
entry:
|
|
tail call void @other()
|
|
ret i32 %x
|
|
}
|
|
declare void @other()
|
|
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
|
; CHECK-FRAMELAYOUT: {{.*}}
|