
Whilst at first glance there appears to be no native bfloat instructions to modify the sign bit, this is only the case when FEAT_AFP is implemented. Without this feature vector FABS/FNEG does not care about the floating point format beyond needing to know the position of the sign bit. From what I can see LLVM has no support for FEAT_AFP in terms of feature detection or ACLE builtins and so I believe the compiler can work under the assumption the feature is not enabled. In fact, if FEAT_AFP is enabled then I believe the current isel is likely broken for half, float and double anyway.
1570 lines
68 KiB
LLVM
1570 lines
68 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mattr=+sve,+bf16 < %s | FileCheck %s
|
|
; RUN: llc -mattr=+sme2 -force-streaming < %s | FileCheck %s
|
|
|
|
target triple = "aarch64-unknown-linux-gnu"
|
|
|
|
define <vscale x 16 x i8> @abs_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: abs_nxv16i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %b, i1 0)
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @abs_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: abs_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %b, i1 0)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @abs_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: abs_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %b, i1 0)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @abs_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: abs_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %b, i1 0)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @clz_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: clz_nxv16i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: clz z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8> %b)
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @clz_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: clz_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: clz z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @clz_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: clz_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: clz z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @clz_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: clz_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: clz z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @cnt_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: cnt_nxv16i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: cnt z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.ctpop.nxv16i8(<vscale x 16 x i8> %b)
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @cnt_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: cnt_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: cnt z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x i16> @llvm.ctpop.nxv8i16(<vscale x 8 x i16> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @cnt_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: cnt_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: cnt z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x i32> @llvm.ctpop.nxv4i32(<vscale x 4 x i32> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @cnt_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: cnt_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: cnt z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x i64> @llvm.ctpop.nxv2i64(<vscale x 2 x i64> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @fabs_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: fabs_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @fabs_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: fabs_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @fabs_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: fabs_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @fabs_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fabs_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @fabs_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: fabs_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @fabs_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: fabs_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x bfloat> @fabs_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
|
|
; CHECK-LABEL: fabs_nxv2bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
|
|
ret <vscale x 2 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 4 x bfloat> @fabs_nxv4bf16(<vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
|
|
; CHECK-LABEL: fabs_nxv4bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %b.op, <vscale x 4 x bfloat> %a
|
|
ret <vscale x 4 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 8 x bfloat> @fabs_nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
|
|
; CHECK-LABEL: fabs_nxv8bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fabs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %b.op, <vscale x 8 x bfloat> %a
|
|
ret <vscale x 8 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @fcvt_nxv2f16_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2f16_to_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.s, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fpext <vscale x 2 x half> %b to <vscale x 2 x float>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @fcvt_nxv2f16_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2f16_to_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.d, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fpext <vscale x 2 x half> %b to <vscale x 2 x double>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @fcvt_nxv4f16_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: fcvt_nxv4f16_to_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.s, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fpext <vscale x 4 x half> %b to <vscale x 4 x float>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @fcvt_nxv2f32_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2f32_to_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptrunc <vscale x 2 x float> %b to <vscale x 2 x half>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @fcvt_nxv2f32_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2f32_to_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.d, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fpext <vscale x 2 x float> %b to <vscale x 2 x double>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x bfloat> @fcvt_nxv2f32_to_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2f32_to_nxv2bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: bfcvt z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptrunc <vscale x 2 x float> %b to <vscale x 2 x bfloat>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
|
|
ret <vscale x 2 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @fcvt_nxv4f32_to_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: fcvt_nxv4f32_to_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptrunc <vscale x 4 x float> %b to <vscale x 4 x half>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x bfloat> @fcvt_nxv4f32_to_nxv4bf16(<vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: fcvt_nxv4f32_to_nxv4bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: bfcvt z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptrunc <vscale x 4 x float> %b to <vscale x 4 x bfloat>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %b.op, <vscale x 4 x bfloat> %a
|
|
ret <vscale x 4 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @fcvt_nxv2f64_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2f64_to_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.h, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptrunc <vscale x 2 x double> %b to <vscale x 2 x half>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @fcvt_nxv2f64_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2f64_to_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvt z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptrunc <vscale x 2 x float> %b to <vscale x 2 x half>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x bfloat> @fcvt_nxv2f64_to_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x double> %b) "target-features"="+sve2" {
|
|
; CHECK-LABEL: fcvt_nxv2f64_to_nxv2bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.d
|
|
; CHECK-NEXT: fcvtx z1.s, p1/m, z1.d
|
|
; CHECK-NEXT: bfcvt z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptrunc <vscale x 2 x double> %b to <vscale x 2 x bfloat>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
|
|
ret <vscale x 2 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @fcvt_nxv2bf16_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x bfloat> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2bf16_to_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: lsl z1.s, z1.s, #16
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x float>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @fcvt_nxv2bf16_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x bfloat> %b) {
|
|
; CHECK-LABEL: fcvt_nxv2bf16_to_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: lsl z1.s, z1.s, #16
|
|
; CHECK-NEXT: fcvt z0.d, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x double>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @fcvt_nxv4bf16_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x bfloat> %b) {
|
|
; CHECK-LABEL: fcvt_nxv4bf16_to_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: lsl z1.s, z1.s, #16
|
|
; CHECK-NEXT: mov z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fpext <vscale x 4 x bfloat> %b to <vscale x 4 x float>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @fcvtsu_nxv2f16_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: fcvtsu_nxv2f16_to_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptosi <vscale x 2 x half> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @fcvtsu_nxv4f16_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: fcvtsu_nxv4f16_to_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptosi <vscale x 4 x half> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @fcvtsu_nxv8f16_to_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: fcvtsu_nxv8f16_to_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzs z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptosi <vscale x 8 x half> %b to <vscale x 8 x i16>
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @fcvtsu_nxv2f32_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fcvtsu_nxv2f32_to_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptosi <vscale x 2 x float> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @fcvtsu_nxv4f32_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: fcvtsu_nxv4f32_to_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptosi <vscale x 4 x float> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @fcvtsu_nxv2f64_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: fcvtsu_nxv2f64_to_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptosi <vscale x 2 x double> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @fcvtzu_nxv2f16_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: fcvtzu_nxv2f16_to_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptoui <vscale x 2 x half> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @fcvtzu_nxv4f16_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: fcvtzu_nxv4f16_to_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptoui <vscale x 4 x half> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @fcvtzu_nxv8f16_to_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: fcvtzu_nxv8f16_to_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzu z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptoui <vscale x 8 x half> %b to <vscale x 8 x i16>
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @fcvtzu_nxv2f32_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fcvtzu_nxv2f32_to_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptoui <vscale x 2 x float> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @fcvtzu_nxv4f32_to_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: fcvtzu_nxv4f32_to_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptoui <vscale x 4 x float> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @fcvtzu_nxv2f64_to_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: fcvtzu_nxv2f64_to_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = fptoui <vscale x 2 x double> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @fneg_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: fneg_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 2 x half> %b
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @fneg_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: fneg_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 4 x half> %b
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @fneg_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: fneg_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 8 x half> %b
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @fneg_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fneg_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 2 x float> %b
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @fneg_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: fneg_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 4 x float> %b
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @fneg_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: fneg_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 2 x double> %b
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x bfloat> @fneg_nxv2bf16(<vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
|
|
; CHECK-LABEL: fneg_nxv2bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 2 x bfloat> %b
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x bfloat> %b.op, <vscale x 2 x bfloat> %a
|
|
ret <vscale x 2 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 4 x bfloat> @fneg_nxv4bf16(<vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
|
|
; CHECK-LABEL: fneg_nxv4bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 4 x bfloat> %b
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x bfloat> %b.op, <vscale x 4 x bfloat> %a
|
|
ret <vscale x 4 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 8 x bfloat> @fneg_nxv8bf16(<vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
|
|
; CHECK-LABEL: fneg_nxv8bf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fneg z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = fneg <vscale x 8 x bfloat> %b
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x bfloat> %b.op, <vscale x 8 x bfloat> %a
|
|
ret <vscale x 8 x bfloat> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @frinta_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: frinta_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinta z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @frinta_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: frinta_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinta z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @frinta_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: frinta_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinta z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @frinta_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: frinta_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinta z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @frinta_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: frinta_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinta z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @frinta_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: frinta_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinta z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @frinti_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: frinti_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinti z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @frinti_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: frinti_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinti z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @frinti_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: frinti_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinti z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @frinti_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: frinti_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinti z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.nearbyint.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @frinti_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: frinti_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinti z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @frinti_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: frinti_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frinti z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @frintm_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: frintm_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintm z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @frintm_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: frintm_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintm z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @frintm_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: frintm_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintm z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @frintm_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: frintm_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintm z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.floor.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @frintm_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: frintm_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintm z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @frintm_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: frintm_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintm z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @frintn_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: frintn_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintn z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @frintn_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: frintn_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintn z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @frintn_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: frintn_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintn z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @frintn_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: frintn_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintn z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @frintn_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: frintn_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintn z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @frintn_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: frintn_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintn z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @frintp_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: frintp_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintp z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @frintp_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: frintp_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintp z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @frintp_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: frintp_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintp z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @frintp_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: frintp_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintp z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.ceil.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @frintp_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: frintp_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintp z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @frintp_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: frintp_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintp z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @frintx_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: frintx_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintx z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @frintx_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: frintx_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintx z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @frintx_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: frintx_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintx z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @frintx_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: frintx_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintx z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.rint.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @frintx_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: frintx_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintx z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @frintx_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: frintx_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintx z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @frintz_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: frintz_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintz z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @frintz_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: frintz_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintz z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @frintz_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: frintz_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintz z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @frintz_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: frintz_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintz z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @frintz_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: frintz_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintz z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @frintz_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: frintz_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: frintz z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @fsqrt_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x half> %b) {
|
|
; CHECK-LABEL: fsqrt_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x half> @llvm.sqrt.nxv2f16(<vscale x 2 x half> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @fsqrt_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x half> %b) {
|
|
; CHECK-LABEL: fsqrt_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x half> @llvm.sqrt.nxv4f16(<vscale x 4 x half> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @fsqrt_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
|
|
; CHECK-LABEL: fsqrt_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @fsqrt_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x float> %b) {
|
|
; CHECK-LABEL: fsqrt_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @fsqrt_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
|
|
; CHECK-LABEL: fsqrt_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @fsqrt_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
|
|
; CHECK-LABEL: fsqrt_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fsqrt z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @neg_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: neg_nxv16i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: subr z1.b, z1.b, #0 // =0x0
|
|
; CHECK-NEXT: mov z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = sub <vscale x 16 x i8> zeroinitializer, %b
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @neg_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: neg_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: subr z1.h, z1.h, #0 // =0x0
|
|
; CHECK-NEXT: mov z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = sub <vscale x 8 x i16> zeroinitializer, %b
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @neg_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: neg_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: subr z1.s, z1.s, #0 // =0x0
|
|
; CHECK-NEXT: mov z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = sub <vscale x 4 x i32> zeroinitializer, %b
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @neg_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: neg_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: subr z1.d, z1.d, #0 // =0x0
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = sub <vscale x 2 x i64> zeroinitializer, %b
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @rbit_nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: rbit_nxv16i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: rbit z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8> %b)
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @rbit_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: rbit_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: rbit z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @rbit_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: rbit_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: rbit z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @rbit_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: rbit_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: rbit z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @revb_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: revb_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: revb z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16> %b)
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @revb_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: revb_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: revb z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32> %b)
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @revb_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: revb_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: revb z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64> %b)
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @scvtf_nxv8i16_to_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: scvtf_nxv8i16_to_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: scvtf z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = sitofp <vscale x 8 x i16> %b to <vscale x 8 x half>
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @scvtf_nxv4i32_to_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: scvtf_nxv4i32_to_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: scvtf z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = sitofp <vscale x 4 x i32> %b to <vscale x 4 x half>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @scvtf_nxv4i32_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: scvtf_nxv4i32_to_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: scvtf z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = sitofp <vscale x 4 x i32> %b to <vscale x 4 x float>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @scvtf_nxv2i64_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: scvtf_nxv2i64_to_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: scvtf z0.h, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = sitofp <vscale x 2 x i64> %b to <vscale x 2 x half>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @scvtf_nxv2i64_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: scvtf_nxv2i64_to_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: scvtf z0.s, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = sitofp <vscale x 2 x i64> %b to <vscale x 2 x float>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @scvtf_nxv2i64_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: scvtf_nxv2i64_to_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: scvtf z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = sitofp <vscale x 2 x i64> %b to <vscale x 2 x double>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @sxtb_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i8> %b) {
|
|
; CHECK-LABEL: sxtb_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.h
|
|
; CHECK-NEXT: sxtb z1.h, p1/m, z1.h
|
|
; CHECK-NEXT: mov z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @sxtb_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i8> %b) {
|
|
; CHECK-LABEL: sxtb_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.s
|
|
; CHECK-NEXT: sxtb z1.s, p1/m, z1.s
|
|
; CHECK-NEXT: mov z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @sxtb_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i8> %b) {
|
|
; CHECK-LABEL: sxtb_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.d
|
|
; CHECK-NEXT: sxtb z1.d, p1/m, z1.d
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = sext <vscale x 2 x i8> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @sxth_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i16> %b) {
|
|
; CHECK-LABEL: sxth_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.s
|
|
; CHECK-NEXT: sxth z1.s, p1/m, z1.s
|
|
; CHECK-NEXT: mov z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @sxth_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i16> %b) {
|
|
; CHECK-LABEL: sxth_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.d
|
|
; CHECK-NEXT: sxth z1.d, p1/m, z1.d
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = sext <vscale x 2 x i16> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @sxtw_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i32> %b) {
|
|
; CHECK-LABEL: sxtw_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.d
|
|
; CHECK-NEXT: sxtw z1.d, p1/m, z1.d
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 8 x half> @ucvtf_nxv8i16_to_nxv8f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x i16> %b) {
|
|
; CHECK-LABEL: ucvtf_nxv8i16_to_nxv8f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ucvtf z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = uitofp <vscale x 8 x i16> %b to <vscale x 8 x half>
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %b.op, <vscale x 8 x half> %a
|
|
ret <vscale x 8 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x half> @ucvtf_nxv4i32_to_nxv4f16(<vscale x 4 x i1> %pg, <vscale x 4 x half> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: ucvtf_nxv4i32_to_nxv4f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ucvtf z0.h, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = uitofp <vscale x 4 x i32> %b to <vscale x 4 x half>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x half> %b.op, <vscale x 4 x half> %a
|
|
ret <vscale x 4 x half> %res
|
|
}
|
|
|
|
define <vscale x 4 x float> @ucvtf_nxv4i32_to_nxv4f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x i32> %b) {
|
|
; CHECK-LABEL: ucvtf_nxv4i32_to_nxv4f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ucvtf z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = uitofp <vscale x 4 x i32> %b to <vscale x 4 x float>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %b.op, <vscale x 4 x float> %a
|
|
ret <vscale x 4 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x half> @ucvtf_nxv2i64_to_nxv2f16(<vscale x 2 x i1> %pg, <vscale x 2 x half> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: ucvtf_nxv2i64_to_nxv2f16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ucvtf z0.h, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = uitofp <vscale x 2 x i64> %b to <vscale x 2 x half>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x half> %b.op, <vscale x 2 x half> %a
|
|
ret <vscale x 2 x half> %res
|
|
}
|
|
|
|
define <vscale x 2 x float> @ucvtf_nxv2i64_to_nxv2f32(<vscale x 2 x i1> %pg, <vscale x 2 x float> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: ucvtf_nxv2i64_to_nxv2f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ucvtf z0.s, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = uitofp <vscale x 2 x i64> %b to <vscale x 2 x float>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x float> %b.op, <vscale x 2 x float> %a
|
|
ret <vscale x 2 x float> %res
|
|
}
|
|
|
|
define <vscale x 2 x double> @ucvtf_nxv2i64_to_nxv2f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x i64> %b) {
|
|
; CHECK-LABEL: ucvtf_nxv2i64_to_nxv2f64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ucvtf z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = uitofp <vscale x 2 x i64> %b to <vscale x 2 x double>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %b.op, <vscale x 2 x double> %a
|
|
ret <vscale x 2 x double> %res
|
|
}
|
|
|
|
define <vscale x 8 x i16> @uxtb_nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i8> %b) {
|
|
; CHECK-LABEL: uxtb_nxv8i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: and z1.h, z1.h, #0xff
|
|
; CHECK-NEXT: mov z0.h, p0/m, z1.h
|
|
; CHECK-NEXT: ret
|
|
%b.op = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
|
|
%res = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b.op, <vscale x 8 x i16> %a
|
|
ret <vscale x 8 x i16> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @uxtb_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i8> %b) {
|
|
; CHECK-LABEL: uxtb_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: and z1.s, z1.s, #0xff
|
|
; CHECK-NEXT: mov z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = zext <vscale x 4 x i8> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @uxtb_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i8> %b) {
|
|
; CHECK-LABEL: uxtb_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: and z1.d, z1.d, #0xff
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = zext <vscale x 2 x i8> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 4 x i32> @uxth_nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i16> %b) {
|
|
; CHECK-LABEL: uxth_nxv4i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: and z1.s, z1.s, #0xffff
|
|
; CHECK-NEXT: mov z0.s, p0/m, z1.s
|
|
; CHECK-NEXT: ret
|
|
%b.op = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
|
|
%res = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b.op, <vscale x 4 x i32> %a
|
|
ret <vscale x 4 x i32> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @uxth_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i16> %b) {
|
|
; CHECK-LABEL: uxth_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: and z1.d, z1.d, #0xffff
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = zext <vscale x 2 x i16> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 2 x i64> @uxtw_nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i32> %b) {
|
|
; CHECK-LABEL: uxtw_nxv2i64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: and z1.d, z1.d, #0xffffffff
|
|
; CHECK-NEXT: mov z0.d, p0/m, z1.d
|
|
; CHECK-NEXT: ret
|
|
%b.op = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
|
|
%res = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b.op, <vscale x 2 x i64> %a
|
|
ret <vscale x 2 x i64> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @abs_nxv16i8_all_active_predicate(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
|
; CHECK-LABEL: abs_nxv16i8_all_active_predicate:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i1> splat(i1 true), <vscale x 16 x i8> %b)
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @abs_nxv16i8_same_predicate(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
|
; CHECK-LABEL: abs_nxv16i8_same_predicate:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
define <vscale x 16 x i8> @abs_nxv16i8_inactive_lanes_are_not_defined(<vscale x 16 x i1> %sel_pg, <vscale x 16 x i1> %op_pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: abs_nxv16i8_inactive_lanes_are_not_defined:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> poison, <vscale x 16 x i1> %op_pg, <vscale x 16 x i8> %b)
|
|
%res = select <vscale x 16 x i1> %sel_pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
; Merging op has multiple users.
|
|
declare void @use(<vscale x 16 x i8>,<vscale x 16 x i8>)
|
|
define void @abs_nxv16i8_multi_use(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
|
|
; CHECK-LABEL: abs_nxv16i8_multi_use:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ptrue p1.b
|
|
; CHECK-NEXT: abs z1.b, p1/m, z1.b
|
|
; CHECK-NEXT: mov z0.b, p0/m, z1.b
|
|
; CHECK-NEXT: b use
|
|
%b.op = tail call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %b, i1 0)
|
|
%res = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
tail call void @use(<vscale x 16 x i8> %res, <vscale x 16 x i8> %b.op)
|
|
ret void
|
|
}
|
|
|
|
; Inactive lanes of the merging op remain live after the select.
|
|
define <vscale x 16 x i8> @abs_nxv16i8_predicate_mismatch(<vscale x 16 x i1> %sel_pg, <vscale x 16 x i1> %op_pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
|
; CHECK-LABEL: abs_nxv16i8_predicate_mismatch:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: abs z2.b, p1/m, z1.b
|
|
; CHECK-NEXT: mov z0.b, p0/m, z2.b
|
|
; CHECK-NEXT: ret
|
|
%b.op = call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i1> %op_pg, <vscale x 16 x i8> %b)
|
|
%res = select <vscale x 16 x i1> %sel_pg, <vscale x 16 x i8> %b.op, <vscale x 16 x i8> %a
|
|
ret <vscale x 16 x i8> %res
|
|
}
|
|
|
|
declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
|
|
declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
|
|
declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
|
|
declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
|
|
|
|
declare <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64>)
|
|
|
|
declare <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64>)
|
|
|
|
declare <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.ceil.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.ctlz.nxv2i64(<vscale x 2 x i64>)
|
|
|
|
declare <vscale x 16 x i8> @llvm.ctpop.nxv16i8(<vscale x 16 x i8>)
|
|
declare <vscale x 8 x i16> @llvm.ctpop.nxv8i16(<vscale x 8 x i16>)
|
|
declare <vscale x 4 x i32> @llvm.ctpop.nxv4i32(<vscale x 4 x i32>)
|
|
declare <vscale x 2 x i64> @llvm.ctpop.nxv2i64(<vscale x 2 x i64>)
|
|
|
|
declare <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double>)
|
|
declare <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat>)
|
|
declare <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat>)
|
|
declare <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat>)
|
|
|
|
declare <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.floor.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.floor.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double>)
|
|
|
|
declare <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.nearbyint.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.nearbyint.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double>)
|
|
|
|
declare <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.rint.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.rint.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double>)
|
|
|
|
declare <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.round.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.round.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double>)
|
|
declare <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half>)
|
|
|
|
declare <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.roundeven.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.roundeven.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double>)
|
|
|
|
declare <vscale x 2 x half> @llvm.sqrt.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.sqrt.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double>)
|
|
|
|
declare <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half>)
|
|
declare <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half>)
|
|
declare <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half>)
|
|
declare <vscale x 2 x float> @llvm.trunc.nxv2f32(<vscale x 2 x float>)
|
|
declare <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float>)
|
|
declare <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double>)
|