llvm-project/llvm/test/CodeGen/AArch64/sve-fixed-length-optimize-ptrue.ll
Ricardo Jesus af03d6b518
[AArch64][SVE] Refactor getPTrue to return splat(1) when pattern=all. (#139236)
Similarly to #135016, refactor getPTrue to return splat (1) for
all-active patterns. The main motivation for this is to improve
code gen for fixed-length vector loads/stores that are converted to SVE
masked memory ops when the vectors are wider than Neon. Emitting the
mask as a splat helps DAGCombiner simplify all-active masked
loads/stores into unmaked ones, for which it already has suitable
combines and ISel has suitable patterns.
2025-05-12 10:35:30 +01:00

113 lines
3.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -aarch64-sve-vector-bits-min=512 -aarch64-sve-vector-bits-max=512 < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
define void @add_v64i8(ptr %a, ptr %b) #0 {
; CHECK-LABEL: add_v64i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: add z0.b, z0.b, z1.b
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <64 x i8>, ptr %a
%op2 = load <64 x i8>, ptr %b
%res = add <64 x i8> %op1, %op2
store <64 x i8> %res, ptr %a
ret void
}
define void @add_v32i16(ptr %a, ptr %b, ptr %c) #0 {
; CHECK-LABEL: add_v32i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: add z0.h, z0.h, z1.h
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <32 x i16>, ptr %a
%op2 = load <32 x i16>, ptr %b
%res = add <32 x i16> %op1, %op2
store <32 x i16> %res, ptr %a
ret void
}
define void @abs_v16i32(ptr %a) #0 {
; CHECK-LABEL: abs_v16i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: abs z0.s, p0/m, z0.s
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <16 x i32>, ptr %a
%res = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %op1, i1 false)
store <16 x i32> %res, ptr %a
ret void
}
define void @abs_v8i64(ptr %a) #0 {
; CHECK-LABEL: abs_v8i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: abs z0.d, p0/m, z0.d
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <8 x i64>, ptr %a
%res = call <8 x i64> @llvm.abs.v8i64(<8 x i64> %op1, i1 false)
store <8 x i64> %res, ptr %a
ret void
}
define void @fadd_v32f16(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v32f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: fadd z0.h, z0.h, z1.h
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <32 x half>, ptr %a
%op2 = load <32 x half>, ptr %b
%res = fadd <32 x half> %op1, %op2
store <32 x half> %res, ptr %a
ret void
}
define void @fadd_v16f32(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v16f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: fadd z0.s, z0.s, z1.s
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <16 x float>, ptr %a
%op2 = load <16 x float>, ptr %b
%res = fadd <16 x float> %op1, %op2
store <16 x float> %res, ptr %a
ret void
}
define void @fadd_v8f64(ptr %a, ptr %b) #0 {
; CHECK-LABEL: fadd_v8f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x1]
; CHECK-NEXT: fadd z0.d, z0.d, z1.d
; CHECK-NEXT: str z0, [x0]
; CHECK-NEXT: ret
%op1 = load <8 x double>, ptr %a
%op2 = load <8 x double>, ptr %b
%res = fadd <8 x double> %op1, %op2
store <8 x double> %res, ptr %a
ret void
}
declare <16 x i32> @llvm.abs.v16i32(<16 x i32>, i1)
declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1)
attributes #0 = { "target-features"="+sve" }