
The main change in this patch is we go from emitting the expression: @ cfa - NumBytes - NumScalableBytes * VG To: @ cfa - VG * NumScalableBytes - NumBytes That is, VG is the first expression. This is for a future patch that adds an alternative way to resolve VG (which uses the CFA, so it is convenient for the CFA to be at the top of the stack). Since doing this is fairly churn-heavy, I took the opportunity to also save up to 4-bytes per SVE CFI expression. This is done by folding LEB128 constants to literals when in the range 0 to 31, and using the offset in `DW_OP_breg*` expressions.
169 lines
5.9 KiB
LLVM
169 lines
5.9 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
|
|
|
; LEGAL ADDS
|
|
|
|
define <vscale x 16 x i1> @add_nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
|
|
; CHECK-LABEL: add_nxv16i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.b
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = add <vscale x 16 x i1> %a, %b
|
|
ret <vscale x 16 x i1> %res;
|
|
}
|
|
|
|
define <vscale x 8 x i1> @add_nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) {
|
|
; CHECK-LABEL: add_nxv8i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.h
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = add <vscale x 8 x i1> %a, %b
|
|
ret <vscale x 8 x i1> %res;
|
|
}
|
|
|
|
define <vscale x 4 x i1> @add_nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) {
|
|
; CHECK-LABEL: add_nxv4i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.s
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = add <vscale x 4 x i1> %a, %b
|
|
ret <vscale x 4 x i1> %res;
|
|
}
|
|
|
|
define <vscale x 2 x i1> @add_nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
|
|
; CHECK-LABEL: add_nxv2i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.d
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = add <vscale x 2 x i1> %a, %b
|
|
ret <vscale x 2 x i1> %res;
|
|
}
|
|
|
|
|
|
; ILLEGAL ADDS
|
|
|
|
define aarch64_sve_vector_pcs <vscale x 64 x i1> @add_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) uwtable {
|
|
; CHECK-LABEL: add_nxv64i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
|
; CHECK-NEXT: .cfi_offset w29, -16
|
|
; CHECK-NEXT: addvl sp, sp, #-1
|
|
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
|
|
; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: ptrue p6.b
|
|
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: ldr p4, [x0]
|
|
; CHECK-NEXT: ldr p5, [x1]
|
|
; CHECK-NEXT: ldr p7, [x2]
|
|
; CHECK-NEXT: ldr p8, [x3]
|
|
; CHECK-NEXT: eor p0.b, p6/z, p0.b, p4.b
|
|
; CHECK-NEXT: eor p1.b, p6/z, p1.b, p5.b
|
|
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: eor p2.b, p6/z, p2.b, p7.b
|
|
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: eor p3.b, p6/z, p3.b, p8.b
|
|
; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: addvl sp, sp, #1
|
|
; CHECK-NEXT: .cfi_def_cfa wsp, 16
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-NEXT: .cfi_restore w29
|
|
; CHECK-NEXT: ret
|
|
%res = add <vscale x 64 x i1> %a, %b
|
|
ret <vscale x 64 x i1> %res;
|
|
}
|
|
|
|
|
|
; LEGAL SUBS
|
|
|
|
define <vscale x 16 x i1> @sub_xv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
|
|
; CHECK-LABEL: sub_xv16i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.b
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = sub <vscale x 16 x i1> %a, %b
|
|
ret <vscale x 16 x i1> %res;
|
|
}
|
|
|
|
define <vscale x 8 x i1> @sub_xv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) {
|
|
; CHECK-LABEL: sub_xv8i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.h
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = sub <vscale x 8 x i1> %a, %b
|
|
ret <vscale x 8 x i1> %res;
|
|
}
|
|
|
|
define <vscale x 4 x i1> @sub_xv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) {
|
|
; CHECK-LABEL: sub_xv4i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.s
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = sub <vscale x 4 x i1> %a, %b
|
|
ret <vscale x 4 x i1> %res;
|
|
}
|
|
|
|
define <vscale x 2 x i1> @sub_xv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
|
|
; CHECK-LABEL: sub_xv2i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p2.d
|
|
; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b
|
|
; CHECK-NEXT: ret
|
|
%res = sub <vscale x 2 x i1> %a, %b
|
|
ret <vscale x 2 x i1> %res;
|
|
}
|
|
|
|
|
|
; ILLEGAL SUBGS
|
|
|
|
|
|
define aarch64_sve_vector_pcs <vscale x 64 x i1> @sub_nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) uwtable {
|
|
; CHECK-LABEL: sub_nxv64i1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
|
; CHECK-NEXT: .cfi_offset w29, -16
|
|
; CHECK-NEXT: addvl sp, sp, #-1
|
|
; CHECK-NEXT: .cfi_escape 0x0f, 0x08, 0x8f, 0x10, 0x92, 0x2e, 0x00, 0x38, 0x1e, 0x22 // sp + 16 + 8 * VG
|
|
; CHECK-NEXT: str p8, [sp, #3, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: str p7, [sp, #4, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: ptrue p6.b
|
|
; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
|
|
; CHECK-NEXT: ldr p4, [x0]
|
|
; CHECK-NEXT: ldr p5, [x1]
|
|
; CHECK-NEXT: ldr p7, [x2]
|
|
; CHECK-NEXT: ldr p8, [x3]
|
|
; CHECK-NEXT: eor p0.b, p6/z, p0.b, p4.b
|
|
; CHECK-NEXT: eor p1.b, p6/z, p1.b, p5.b
|
|
; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: eor p2.b, p6/z, p2.b, p7.b
|
|
; CHECK-NEXT: ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: eor p3.b, p6/z, p3.b, p8.b
|
|
; CHECK-NEXT: ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
|
|
; CHECK-NEXT: addvl sp, sp, #1
|
|
; CHECK-NEXT: .cfi_def_cfa wsp, 16
|
|
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-NEXT: .cfi_restore w29
|
|
; CHECK-NEXT: ret
|
|
%res = sub <vscale x 64 x i1> %a, %b
|
|
ret <vscale x 64 x i1> %res;
|
|
}
|