
We already support computing known bits for extending loads, but not for masked loads. For now I've only added support for zero-extends because that's the only thing currently tested. Even when the passthru value is poison we still know the top X bits are zero.
1405 lines
49 KiB
LLVM
1405 lines
49 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+sve | FileCheck %s -check-prefixes=CHECK,SVE
|
|
; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+sve2 | FileCheck %s -check-prefixes=CHECK,SVE2
|
|
|
|
define <vscale x 2 x i64> @hadds_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: hadds_v2i64:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.d, z2.d, #1
|
|
; SVE-NEXT: add z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v2i64:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
|
|
%s1s = sext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
|
|
%m = add nsw <vscale x 2 x i128> %s0s, %s1s
|
|
%s = ashr <vscale x 2 x i128> %m, splat (i128 1)
|
|
%s2 = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
|
|
ret <vscale x 2 x i64> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i64> @hadds_v2i64_lsh(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: hadds_v2i64_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.d, z2.d, #1
|
|
; SVE-NEXT: add z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v2i64_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
|
|
%s1s = sext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
|
|
%m = add nsw <vscale x 2 x i128> %s0s, %s1s
|
|
%s = lshr <vscale x 2 x i128> %m, splat (i128 1)
|
|
%s2 = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
|
|
ret <vscale x 2 x i64> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i64> @haddu_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: haddu_v2i64:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.d, z2.d, #1
|
|
; SVE-NEXT: add z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v2i64:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
|
|
%s1s = zext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
|
|
%m = add nuw nsw <vscale x 2 x i128> %s0s, %s1s
|
|
%s = lshr <vscale x 2 x i128> %m, splat (i128 1)
|
|
%s2 = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
|
|
ret <vscale x 2 x i64> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i32> @hadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
|
|
; SVE-LABEL: hadds_v2i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.d
|
|
; SVE-NEXT: sxtw z0.d, p0/m, z0.d
|
|
; SVE-NEXT: adr z0.d, [z0.d, z1.d, sxtw]
|
|
; SVE-NEXT: asr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v2i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: sxtw z1.d, p0/m, z1.d
|
|
; SVE2-NEXT: sxtw z0.d, p0/m, z0.d
|
|
; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
|
|
%s1s = sext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
|
|
%m = add nsw <vscale x 2 x i64> %s0s, %s1s
|
|
%s = ashr <vscale x 2 x i64> %m, splat (i64 1)
|
|
%s2 = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
|
|
ret <vscale x 2 x i32> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i32> @hadds_v2i32_lsh(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
|
|
; CHECK-LABEL: hadds_v2i32_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
|
|
; CHECK-NEXT: adr z0.d, [z0.d, z1.d, sxtw]
|
|
; CHECK-NEXT: lsr z0.d, z0.d, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
|
|
%s1s = sext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
|
|
%m = add nsw <vscale x 2 x i64> %s0s, %s1s
|
|
%s = lshr <vscale x 2 x i64> %m, splat (i64 1)
|
|
%s2 = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
|
|
ret <vscale x 2 x i32> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i32> @haddu_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
|
|
; SVE-LABEL: haddu_v2i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: and z0.d, z0.d, #0xffffffff
|
|
; SVE-NEXT: adr z0.d, [z0.d, z1.d, uxtw]
|
|
; SVE-NEXT: lsr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v2i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.d, z1.d, #0xffffffff
|
|
; SVE2-NEXT: and z0.d, z0.d, #0xffffffff
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
|
|
%s1s = zext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
|
|
%m = add nuw nsw <vscale x 2 x i64> %s0s, %s1s
|
|
%s = lshr <vscale x 2 x i64> %m, splat (i64 1)
|
|
%s2 = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
|
|
ret <vscale x 2 x i32> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i32> @hadds_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i32> %s1) {
|
|
; SVE-LABEL: hadds_v4i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.s, z2.s, #1
|
|
; SVE-NEXT: add z0.s, z0.s, z1.s
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v4i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: shadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
|
|
%s1s = sext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
|
|
%m = add nsw <vscale x 4 x i64> %s0s, %s1s
|
|
%s = ashr <vscale x 4 x i64> %m, splat (i64 1)
|
|
%s2 = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
|
|
ret <vscale x 4 x i32> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i32> @hadds_v4i32_lsh(<vscale x 4 x i32> %s0, <vscale x 4 x i32> %s1) {
|
|
; SVE-LABEL: hadds_v4i32_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.s, z2.s, #1
|
|
; SVE-NEXT: add z0.s, z0.s, z1.s
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v4i32_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: shadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
|
|
%s1s = sext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
|
|
%m = add nsw <vscale x 4 x i64> %s0s, %s1s
|
|
%s = lshr <vscale x 4 x i64> %m, splat (i64 1)
|
|
%s2 = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
|
|
ret <vscale x 4 x i32> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i32> @haddu_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i32> %s1) {
|
|
; SVE-LABEL: haddu_v4i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.s, z2.s, #1
|
|
; SVE-NEXT: add z0.s, z0.s, z1.s
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v4i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
|
|
%s1s = zext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
|
|
%m = add nuw nsw <vscale x 4 x i64> %s0s, %s1s
|
|
%s = lshr <vscale x 4 x i64> %m, splat (i64 1)
|
|
%s2 = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
|
|
ret <vscale x 4 x i32> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i16> @hadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
|
|
; SVE-LABEL: hadds_v2i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.d
|
|
; SVE-NEXT: sxth z1.d, p0/m, z1.d
|
|
; SVE-NEXT: sxth z0.d, p0/m, z0.d
|
|
; SVE-NEXT: add z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v2i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: sxth z1.d, p0/m, z1.d
|
|
; SVE2-NEXT: sxth z0.d, p0/m, z0.d
|
|
; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
|
|
%s1s = sext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
|
|
%m = add nsw <vscale x 2 x i32> %s0s, %s1s
|
|
%s = ashr <vscale x 2 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
|
|
ret <vscale x 2 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i16> @hadds_v2i16_lsh(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
|
|
; CHECK-LABEL: hadds_v2i16_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: sxth z0.d, p0/m, z0.d
|
|
; CHECK-NEXT: sxth z1.d, p0/m, z1.d
|
|
; CHECK-NEXT: add z0.d, z0.d, z1.d
|
|
; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
|
|
; CHECK-NEXT: lsr z0.d, z0.d, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
|
|
%s1s = sext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
|
|
%m = add nsw <vscale x 2 x i32> %s0s, %s1s
|
|
%s = lshr <vscale x 2 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
|
|
ret <vscale x 2 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i16> @haddu_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
|
|
; SVE-LABEL: haddu_v2i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: and z1.d, z1.d, #0xffff
|
|
; SVE-NEXT: and z0.d, z0.d, #0xffff
|
|
; SVE-NEXT: add z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v2i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.d, z1.d, #0xffff
|
|
; SVE2-NEXT: and z0.d, z0.d, #0xffff
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
|
|
%s1s = zext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
|
|
%m = add nuw nsw <vscale x 2 x i32> %s0s, %s1s
|
|
%s = lshr <vscale x 2 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
|
|
ret <vscale x 2 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i16> @hadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
|
|
; SVE-LABEL: hadds_v4i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.s
|
|
; SVE-NEXT: sxth z1.s, p0/m, z1.s
|
|
; SVE-NEXT: sxth z0.s, p0/m, z0.s
|
|
; SVE-NEXT: add z0.s, z0.s, z1.s
|
|
; SVE-NEXT: asr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v4i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: sxth z1.s, p0/m, z1.s
|
|
; SVE2-NEXT: sxth z0.s, p0/m, z0.s
|
|
; SVE2-NEXT: shadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
|
|
%s1s = sext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
|
|
%m = add nsw <vscale x 4 x i32> %s0s, %s1s
|
|
%s = ashr <vscale x 4 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
|
|
ret <vscale x 4 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i16> @hadds_v4i16_lsh(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
|
|
; CHECK-LABEL: hadds_v4i16_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: sxth z0.s, p0/m, z0.s
|
|
; CHECK-NEXT: sxth z1.s, p0/m, z1.s
|
|
; CHECK-NEXT: add z0.s, z0.s, z1.s
|
|
; CHECK-NEXT: lsr z0.s, z0.s, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
|
|
%s1s = sext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
|
|
%m = add nsw <vscale x 4 x i32> %s0s, %s1s
|
|
%s = lshr <vscale x 4 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
|
|
ret <vscale x 4 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i16> @haddu_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
|
|
; SVE-LABEL: haddu_v4i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: and z1.s, z1.s, #0xffff
|
|
; SVE-NEXT: and z0.s, z0.s, #0xffff
|
|
; SVE-NEXT: add z0.s, z0.s, z1.s
|
|
; SVE-NEXT: lsr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v4i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.s, z1.s, #0xffff
|
|
; SVE2-NEXT: and z0.s, z0.s, #0xffff
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
|
|
%s1s = zext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
|
|
%m = add nuw nsw <vscale x 4 x i32> %s0s, %s1s
|
|
%s = lshr <vscale x 4 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
|
|
ret <vscale x 4 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 8 x i16> @hadds_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i16> %s1) {
|
|
; SVE-LABEL: hadds_v8i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.h, z2.h, #1
|
|
; SVE-NEXT: add z0.h, z0.h, z1.h
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v8i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: shadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
|
|
%s1s = sext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
|
|
%m = add nsw <vscale x 8 x i32> %s0s, %s1s
|
|
%s = ashr <vscale x 8 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
|
|
ret <vscale x 8 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 8 x i16> @hadds_v8i16_lsh(<vscale x 8 x i16> %s0, <vscale x 8 x i16> %s1) {
|
|
; SVE-LABEL: hadds_v8i16_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.h, z2.h, #1
|
|
; SVE-NEXT: add z0.h, z0.h, z1.h
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v8i16_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: shadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
|
|
%s1s = sext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
|
|
%m = add nsw <vscale x 8 x i32> %s0s, %s1s
|
|
%s = lshr <vscale x 8 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
|
|
ret <vscale x 8 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 8 x i16> @haddu_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i16> %s1) {
|
|
; SVE-LABEL: haddu_v8i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.h, z2.h, #1
|
|
; SVE-NEXT: add z0.h, z0.h, z1.h
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v8i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: uhadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
|
|
%s1s = zext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
|
|
%m = add nuw nsw <vscale x 8 x i32> %s0s, %s1s
|
|
%s = lshr <vscale x 8 x i32> %m, splat (i32 1)
|
|
%s2 = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
|
|
ret <vscale x 8 x i16> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i8> @hadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
|
|
; SVE-LABEL: hadds_v4i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.s
|
|
; SVE-NEXT: sxtb z1.s, p0/m, z1.s
|
|
; SVE-NEXT: sxtb z0.s, p0/m, z0.s
|
|
; SVE-NEXT: add z0.s, z0.s, z1.s
|
|
; SVE-NEXT: asr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v4i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: sxtb z1.s, p0/m, z1.s
|
|
; SVE2-NEXT: sxtb z0.s, p0/m, z0.s
|
|
; SVE2-NEXT: shadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
|
|
%s1s = sext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
|
|
%m = add nsw <vscale x 4 x i16> %s0s, %s1s
|
|
%s = ashr <vscale x 4 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
|
|
ret <vscale x 4 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i8> @hadds_v4i8_lsh(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
|
|
; CHECK-LABEL: hadds_v4i8_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
|
|
; CHECK-NEXT: sxtb z1.s, p0/m, z1.s
|
|
; CHECK-NEXT: add z0.s, z0.s, z1.s
|
|
; CHECK-NEXT: and z0.s, z0.s, #0xffff
|
|
; CHECK-NEXT: lsr z0.s, z0.s, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
|
|
%s1s = sext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
|
|
%m = add nsw <vscale x 4 x i16> %s0s, %s1s
|
|
%s = lshr <vscale x 4 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
|
|
ret <vscale x 4 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 4 x i8> @haddu_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
|
|
; SVE-LABEL: haddu_v4i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: and z1.s, z1.s, #0xff
|
|
; SVE-NEXT: and z0.s, z0.s, #0xff
|
|
; SVE-NEXT: add z0.s, z0.s, z1.s
|
|
; SVE-NEXT: lsr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v4i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.s, z1.s, #0xff
|
|
; SVE2-NEXT: and z0.s, z0.s, #0xff
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: uhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
|
|
%s1s = zext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
|
|
%m = add nuw nsw <vscale x 4 x i16> %s0s, %s1s
|
|
%s = lshr <vscale x 4 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
|
|
ret <vscale x 4 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 8 x i8> @hadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
|
|
; SVE-LABEL: hadds_v8i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.h
|
|
; SVE-NEXT: sxtb z1.h, p0/m, z1.h
|
|
; SVE-NEXT: sxtb z0.h, p0/m, z0.h
|
|
; SVE-NEXT: add z0.h, z0.h, z1.h
|
|
; SVE-NEXT: asr z0.h, z0.h, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v8i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: sxtb z1.h, p0/m, z1.h
|
|
; SVE2-NEXT: sxtb z0.h, p0/m, z0.h
|
|
; SVE2-NEXT: shadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
|
|
%s1s = sext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
|
|
%m = add nsw <vscale x 8 x i16> %s0s, %s1s
|
|
%s = ashr <vscale x 8 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
|
|
ret <vscale x 8 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 8 x i8> @hadds_v8i8_lsh(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
|
|
; CHECK-LABEL: hadds_v8i8_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: sxtb z0.h, p0/m, z0.h
|
|
; CHECK-NEXT: sxtb z1.h, p0/m, z1.h
|
|
; CHECK-NEXT: add z0.h, z0.h, z1.h
|
|
; CHECK-NEXT: lsr z0.h, z0.h, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
|
|
%s1s = sext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
|
|
%m = add nsw <vscale x 8 x i16> %s0s, %s1s
|
|
%s = lshr <vscale x 8 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
|
|
ret <vscale x 8 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 8 x i8> @haddu_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
|
|
; SVE-LABEL: haddu_v8i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: and z1.h, z1.h, #0xff
|
|
; SVE-NEXT: and z0.h, z0.h, #0xff
|
|
; SVE-NEXT: add z0.h, z0.h, z1.h
|
|
; SVE-NEXT: lsr z0.h, z0.h, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v8i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.h, z1.h, #0xff
|
|
; SVE2-NEXT: and z0.h, z0.h, #0xff
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: uhadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
|
|
%s1s = zext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
|
|
%m = add nuw nsw <vscale x 8 x i16> %s0s, %s1s
|
|
%s = lshr <vscale x 8 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
|
|
ret <vscale x 8 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 16 x i8> @hadds_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i8> %s1) {
|
|
; SVE-LABEL: hadds_v16i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.b, z2.b, #1
|
|
; SVE-NEXT: add z0.b, z0.b, z1.b
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v16i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.b
|
|
; SVE2-NEXT: shadd z0.b, p0/m, z0.b, z1.b
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
|
|
%s1s = sext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
|
|
%m = add nsw <vscale x 16 x i16> %s0s, %s1s
|
|
%s = ashr <vscale x 16 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
|
|
ret <vscale x 16 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 16 x i8> @hadds_v16i8_lsh(<vscale x 16 x i8> %s0, <vscale x 16 x i8> %s1) {
|
|
; SVE-LABEL: hadds_v16i8_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.b, z2.b, #1
|
|
; SVE-NEXT: add z0.b, z0.b, z1.b
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v16i8_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.b
|
|
; SVE2-NEXT: shadd z0.b, p0/m, z0.b, z1.b
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
|
|
%s1s = sext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
|
|
%m = add nsw <vscale x 16 x i16> %s0s, %s1s
|
|
%s = lshr <vscale x 16 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
|
|
ret <vscale x 16 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 16 x i8> @haddu_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i8> %s1) {
|
|
; SVE-LABEL: haddu_v16i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.b, z2.b, #1
|
|
; SVE-NEXT: add z0.b, z0.b, z1.b
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v16i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.b
|
|
; SVE2-NEXT: uhadd z0.b, p0/m, z0.b, z1.b
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
|
|
%s1s = zext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
|
|
%m = add nuw nsw <vscale x 16 x i16> %s0s, %s1s
|
|
%s = lshr <vscale x 16 x i16> %m, splat (i16 1)
|
|
%s2 = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
|
|
ret <vscale x 16 x i8> %s2
|
|
}
|
|
|
|
define <vscale x 2 x i64> @rhadds_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: rhadds_v2i64:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.d, z2.d, #1
|
|
; SVE-NEXT: sub z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v2i64:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: srhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
|
|
%s1s = sext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
|
|
%add = add <vscale x 2 x i128> %s0s, splat (i128 1)
|
|
%add2 = add <vscale x 2 x i128> %add, %s1s
|
|
%s = ashr <vscale x 2 x i128> %add2, splat (i128 1)
|
|
%result = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
|
|
ret <vscale x 2 x i64> %result
|
|
}
|
|
|
|
define <vscale x 2 x i64> @rhadds_v2i64_lsh(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: rhadds_v2i64_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.d, z2.d, #1
|
|
; SVE-NEXT: sub z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v2i64_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: srhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
|
|
%s1s = sext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
|
|
%add = add <vscale x 2 x i128> %s0s, splat (i128 1)
|
|
%add2 = add <vscale x 2 x i128> %add, %s1s
|
|
%s = lshr <vscale x 2 x i128> %add2, splat (i128 1)
|
|
%result = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
|
|
ret <vscale x 2 x i64> %result
|
|
}
|
|
|
|
define <vscale x 2 x i64> @rhaddu_v2i64(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: rhaddu_v2i64:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.d, z2.d, #1
|
|
; SVE-NEXT: sub z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v2i64:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: urhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 2 x i64> %s0 to <vscale x 2 x i128>
|
|
%s1s = zext <vscale x 2 x i64> %s1 to <vscale x 2 x i128>
|
|
%add = add nuw nsw <vscale x 2 x i128> %s0s, splat (i128 1)
|
|
%add2 = add nuw nsw <vscale x 2 x i128> %add, %s1s
|
|
%s = lshr <vscale x 2 x i128> %add2, splat (i128 1)
|
|
%result = trunc <vscale x 2 x i128> %s to <vscale x 2 x i64>
|
|
ret <vscale x 2 x i64> %result
|
|
}
|
|
|
|
define <vscale x 2 x i32> @rhadds_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
|
|
; SVE-LABEL: rhadds_v2i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.d
|
|
; SVE-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: sxtw z0.d, p0/m, z0.d
|
|
; SVE-NEXT: sxtw z1.d, p0/m, z1.d
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.d, z1.d, z0.d
|
|
; SVE-NEXT: asr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v2i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: sxtw z1.d, p0/m, z1.d
|
|
; SVE2-NEXT: sxtw z0.d, p0/m, z0.d
|
|
; SVE2-NEXT: srhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
|
|
%s1s = sext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
|
|
%add = add <vscale x 2 x i64> %s0s, splat (i64 1)
|
|
%add2 = add <vscale x 2 x i64> %add, %s1s
|
|
%s = ashr <vscale x 2 x i64> %add2, splat (i64 1)
|
|
%result = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
|
|
ret <vscale x 2 x i32> %result
|
|
}
|
|
|
|
define <vscale x 2 x i32> @rhadds_v2i32_lsh(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
|
|
; CHECK-LABEL: rhadds_v2i32_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
|
|
; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
|
|
; CHECK-NEXT: sxtw z1.d, p0/m, z1.d
|
|
; CHECK-NEXT: eor z0.d, z0.d, z2.d
|
|
; CHECK-NEXT: sub z0.d, z1.d, z0.d
|
|
; CHECK-NEXT: lsr z0.d, z0.d, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
|
|
%s1s = sext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
|
|
%add = add <vscale x 2 x i64> %s0s, splat (i64 1)
|
|
%add2 = add <vscale x 2 x i64> %add, %s1s
|
|
%s = lshr <vscale x 2 x i64> %add2, splat (i64 1)
|
|
%result = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
|
|
ret <vscale x 2 x i32> %result
|
|
}
|
|
|
|
define <vscale x 2 x i32> @rhaddu_v2i32(<vscale x 2 x i32> %s0, <vscale x 2 x i32> %s1) {
|
|
; SVE-LABEL: rhaddu_v2i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: and z0.d, z0.d, #0xffffffff
|
|
; SVE-NEXT: and z1.d, z1.d, #0xffffffff
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.d, z1.d, z0.d
|
|
; SVE-NEXT: lsr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v2i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.d, z1.d, #0xffffffff
|
|
; SVE2-NEXT: and z0.d, z0.d, #0xffffffff
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: urhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 2 x i32> %s0 to <vscale x 2 x i64>
|
|
%s1s = zext <vscale x 2 x i32> %s1 to <vscale x 2 x i64>
|
|
%add = add nuw nsw <vscale x 2 x i64> %s0s, splat (i64 1)
|
|
%add2 = add nuw nsw <vscale x 2 x i64> %add, %s1s
|
|
%s = lshr <vscale x 2 x i64> %add2, splat (i64 1)
|
|
%result = trunc <vscale x 2 x i64> %s to <vscale x 2 x i32>
|
|
ret <vscale x 2 x i32> %result
|
|
}
|
|
|
|
define <vscale x 4 x i32> @rhadds_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i32> %s1) {
|
|
; SVE-LABEL: rhadds_v4i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.s, z2.s, #1
|
|
; SVE-NEXT: sub z0.s, z0.s, z1.s
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v4i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: srhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
|
|
%s1s = sext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
|
|
%add = add <vscale x 4 x i64> %s0s, splat (i64 1)
|
|
%add2 = add <vscale x 4 x i64> %add, %s1s
|
|
%s = ashr <vscale x 4 x i64> %add2, splat (i64 1)
|
|
%result = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
|
|
ret <vscale x 4 x i32> %result
|
|
}
|
|
|
|
define <vscale x 4 x i32> @rhadds_v4i32_lsh(<vscale x 4 x i32> %s0, <vscale x 4 x i32> %s1) {
|
|
; SVE-LABEL: rhadds_v4i32_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.s, z2.s, #1
|
|
; SVE-NEXT: sub z0.s, z0.s, z1.s
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v4i32_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: srhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
|
|
%s1s = sext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
|
|
%add = add <vscale x 4 x i64> %s0s, splat (i64 1)
|
|
%add2 = add <vscale x 4 x i64> %add, %s1s
|
|
%s = lshr <vscale x 4 x i64> %add2, splat (i64 1)
|
|
%result = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
|
|
ret <vscale x 4 x i32> %result
|
|
}
|
|
|
|
define <vscale x 4 x i32> @rhaddu_v4i32(<vscale x 4 x i32> %s0, <vscale x 4 x i32> %s1) {
|
|
; SVE-LABEL: rhaddu_v4i32:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.s, z2.s, #1
|
|
; SVE-NEXT: sub z0.s, z0.s, z1.s
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v4i32:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: urhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 4 x i32> %s0 to <vscale x 4 x i64>
|
|
%s1s = zext <vscale x 4 x i32> %s1 to <vscale x 4 x i64>
|
|
%add = add nuw nsw <vscale x 4 x i64> %s0s, splat (i64 1)
|
|
%add2 = add nuw nsw <vscale x 4 x i64> %add, %s1s
|
|
%s = lshr <vscale x 4 x i64> %add2, splat (i64 1)
|
|
%result = trunc <vscale x 4 x i64> %s to <vscale x 4 x i32>
|
|
ret <vscale x 4 x i32> %result
|
|
}
|
|
|
|
define <vscale x 2 x i16> @rhadds_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
|
|
; SVE-LABEL: rhadds_v2i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.d
|
|
; SVE-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: sxth z0.d, p0/m, z0.d
|
|
; SVE-NEXT: sxth z1.d, p0/m, z1.d
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.d, z1.d, z0.d
|
|
; SVE-NEXT: asr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v2i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: sxth z1.d, p0/m, z1.d
|
|
; SVE2-NEXT: sxth z0.d, p0/m, z0.d
|
|
; SVE2-NEXT: srhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
|
|
%s1s = sext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
|
|
%add = add <vscale x 2 x i32> %s0s, splat (i32 1)
|
|
%add2 = add <vscale x 2 x i32> %add, %s1s
|
|
%s = ashr <vscale x 2 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
|
|
ret <vscale x 2 x i16> %result
|
|
}
|
|
|
|
define <vscale x 2 x i16> @rhadds_v2i16_lsh(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
|
|
; CHECK-LABEL: rhadds_v2i16_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.d
|
|
; CHECK-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
|
|
; CHECK-NEXT: sxth z0.d, p0/m, z0.d
|
|
; CHECK-NEXT: sxth z1.d, p0/m, z1.d
|
|
; CHECK-NEXT: eor z0.d, z0.d, z2.d
|
|
; CHECK-NEXT: sub z0.d, z1.d, z0.d
|
|
; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
|
|
; CHECK-NEXT: lsr z0.d, z0.d, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
|
|
%s1s = sext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
|
|
%add = add <vscale x 2 x i32> %s0s, splat (i32 1)
|
|
%add2 = add <vscale x 2 x i32> %add, %s1s
|
|
%s = lshr <vscale x 2 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
|
|
ret <vscale x 2 x i16> %result
|
|
}
|
|
|
|
define <vscale x 2 x i16> @rhaddu_v2i16(<vscale x 2 x i16> %s0, <vscale x 2 x i16> %s1) {
|
|
; SVE-LABEL: rhaddu_v2i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: mov z2.d, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: and z0.d, z0.d, #0xffff
|
|
; SVE-NEXT: and z1.d, z1.d, #0xffff
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.d, z1.d, z0.d
|
|
; SVE-NEXT: lsr z0.d, z0.d, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v2i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.d, z1.d, #0xffff
|
|
; SVE2-NEXT: and z0.d, z0.d, #0xffff
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: urhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 2 x i16> %s0 to <vscale x 2 x i32>
|
|
%s1s = zext <vscale x 2 x i16> %s1 to <vscale x 2 x i32>
|
|
%add = add nuw nsw <vscale x 2 x i32> %s0s, splat (i32 1)
|
|
%add2 = add nuw nsw <vscale x 2 x i32> %add, %s1s
|
|
%s = lshr <vscale x 2 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 2 x i32> %s to <vscale x 2 x i16>
|
|
ret <vscale x 2 x i16> %result
|
|
}
|
|
|
|
define <vscale x 4 x i16> @rhadds_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
|
|
; SVE-LABEL: rhadds_v4i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.s
|
|
; SVE-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: sxth z0.s, p0/m, z0.s
|
|
; SVE-NEXT: sxth z1.s, p0/m, z1.s
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.s, z1.s, z0.s
|
|
; SVE-NEXT: asr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v4i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: sxth z1.s, p0/m, z1.s
|
|
; SVE2-NEXT: sxth z0.s, p0/m, z0.s
|
|
; SVE2-NEXT: srhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
|
|
%s1s = sext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
|
|
%add = add <vscale x 4 x i32> %s0s, splat (i32 1)
|
|
%add2 = add <vscale x 4 x i32> %add, %s1s
|
|
%s = ashr <vscale x 4 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
|
|
ret <vscale x 4 x i16> %result
|
|
}
|
|
|
|
define <vscale x 4 x i16> @rhadds_v4i16_lsh(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
|
|
; CHECK-LABEL: rhadds_v4i16_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
|
|
; CHECK-NEXT: sxth z0.s, p0/m, z0.s
|
|
; CHECK-NEXT: sxth z1.s, p0/m, z1.s
|
|
; CHECK-NEXT: eor z0.d, z0.d, z2.d
|
|
; CHECK-NEXT: sub z0.s, z1.s, z0.s
|
|
; CHECK-NEXT: lsr z0.s, z0.s, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
|
|
%s1s = sext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
|
|
%add = add <vscale x 4 x i32> %s0s, splat (i32 1)
|
|
%add2 = add <vscale x 4 x i32> %add, %s1s
|
|
%s = lshr <vscale x 4 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
|
|
ret <vscale x 4 x i16> %result
|
|
}
|
|
|
|
define <vscale x 4 x i16> @rhaddu_v4i16(<vscale x 4 x i16> %s0, <vscale x 4 x i16> %s1) {
|
|
; SVE-LABEL: rhaddu_v4i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: and z0.s, z0.s, #0xffff
|
|
; SVE-NEXT: and z1.s, z1.s, #0xffff
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.s, z1.s, z0.s
|
|
; SVE-NEXT: lsr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v4i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.s, z1.s, #0xffff
|
|
; SVE2-NEXT: and z0.s, z0.s, #0xffff
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: urhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 4 x i16> %s0 to <vscale x 4 x i32>
|
|
%s1s = zext <vscale x 4 x i16> %s1 to <vscale x 4 x i32>
|
|
%add = add nuw nsw <vscale x 4 x i32> %s0s, splat (i32 1)
|
|
%add2 = add nuw nsw <vscale x 4 x i32> %add, %s1s
|
|
%s = lshr <vscale x 4 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 4 x i32> %s to <vscale x 4 x i16>
|
|
ret <vscale x 4 x i16> %result
|
|
}
|
|
|
|
define <vscale x 8 x i16> @rhadds_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i16> %s1) {
|
|
; SVE-LABEL: rhadds_v8i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.h, z2.h, #1
|
|
; SVE-NEXT: sub z0.h, z0.h, z1.h
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v8i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: srhadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
|
|
%s1s = sext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
|
|
%add = add <vscale x 8 x i32> %s0s, splat (i32 1)
|
|
%add2 = add <vscale x 8 x i32> %add, %s1s
|
|
%s = ashr <vscale x 8 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
|
|
ret <vscale x 8 x i16> %result
|
|
}
|
|
|
|
define <vscale x 8 x i16> @rhadds_v8i16_lsh(<vscale x 8 x i16> %s0, <vscale x 8 x i16> %s1) {
|
|
; SVE-LABEL: rhadds_v8i16_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.h, z2.h, #1
|
|
; SVE-NEXT: sub z0.h, z0.h, z1.h
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v8i16_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: srhadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
|
|
%s1s = sext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
|
|
%add = add <vscale x 8 x i32> %s0s, splat (i32 1)
|
|
%add2 = add <vscale x 8 x i32> %add, %s1s
|
|
%s = lshr <vscale x 8 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
|
|
ret <vscale x 8 x i16> %result
|
|
}
|
|
|
|
define <vscale x 8 x i16> @rhaddu_v8i16(<vscale x 8 x i16> %s0, <vscale x 8 x i16> %s1) {
|
|
; SVE-LABEL: rhaddu_v8i16:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.h, z2.h, #1
|
|
; SVE-NEXT: sub z0.h, z0.h, z1.h
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v8i16:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: urhadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 8 x i16> %s0 to <vscale x 8 x i32>
|
|
%s1s = zext <vscale x 8 x i16> %s1 to <vscale x 8 x i32>
|
|
%add = add nuw nsw <vscale x 8 x i32> %s0s, splat (i32 1)
|
|
%add2 = add nuw nsw <vscale x 8 x i32> %add, %s1s
|
|
%s = lshr <vscale x 8 x i32> %add2, splat (i32 1)
|
|
%result = trunc <vscale x 8 x i32> %s to <vscale x 8 x i16>
|
|
ret <vscale x 8 x i16> %result
|
|
}
|
|
|
|
define <vscale x 4 x i8> @rhadds_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
|
|
; SVE-LABEL: rhadds_v4i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.s
|
|
; SVE-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: sxtb z0.s, p0/m, z0.s
|
|
; SVE-NEXT: sxtb z1.s, p0/m, z1.s
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.s, z1.s, z0.s
|
|
; SVE-NEXT: asr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v4i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: sxtb z1.s, p0/m, z1.s
|
|
; SVE2-NEXT: sxtb z0.s, p0/m, z0.s
|
|
; SVE2-NEXT: srhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
|
|
%s1s = sext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
|
|
%add = add <vscale x 4 x i16> %s0s, splat (i16 1)
|
|
%add2 = add <vscale x 4 x i16> %add, %s1s
|
|
%s = ashr <vscale x 4 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
|
|
ret <vscale x 4 x i8> %result
|
|
}
|
|
|
|
define <vscale x 4 x i8> @rhadds_v4i8_lsh(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
|
|
; CHECK-LABEL: rhadds_v4i8_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.s
|
|
; CHECK-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
|
|
; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
|
|
; CHECK-NEXT: sxtb z1.s, p0/m, z1.s
|
|
; CHECK-NEXT: eor z0.d, z0.d, z2.d
|
|
; CHECK-NEXT: sub z0.s, z1.s, z0.s
|
|
; CHECK-NEXT: and z0.s, z0.s, #0xffff
|
|
; CHECK-NEXT: lsr z0.s, z0.s, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
|
|
%s1s = sext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
|
|
%add = add <vscale x 4 x i16> %s0s, splat (i16 1)
|
|
%add2 = add <vscale x 4 x i16> %add, %s1s
|
|
%s = lshr <vscale x 4 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
|
|
ret <vscale x 4 x i8> %result
|
|
}
|
|
|
|
define <vscale x 4 x i8> @rhaddu_v4i8(<vscale x 4 x i8> %s0, <vscale x 4 x i8> %s1) {
|
|
; SVE-LABEL: rhaddu_v4i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: mov z2.s, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: and z0.s, z0.s, #0xff
|
|
; SVE-NEXT: and z1.s, z1.s, #0xff
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.s, z1.s, z0.s
|
|
; SVE-NEXT: lsr z0.s, z0.s, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v4i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.s, z1.s, #0xff
|
|
; SVE2-NEXT: and z0.s, z0.s, #0xff
|
|
; SVE2-NEXT: ptrue p0.s
|
|
; SVE2-NEXT: urhadd z0.s, p0/m, z0.s, z1.s
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 4 x i8> %s0 to <vscale x 4 x i16>
|
|
%s1s = zext <vscale x 4 x i8> %s1 to <vscale x 4 x i16>
|
|
%add = add nuw nsw <vscale x 4 x i16> %s0s, splat (i16 1)
|
|
%add2 = add nuw nsw <vscale x 4 x i16> %add, %s1s
|
|
%s = lshr <vscale x 4 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 4 x i16> %s to <vscale x 4 x i8>
|
|
ret <vscale x 4 x i8> %result
|
|
}
|
|
|
|
define <vscale x 8 x i8> @rhadds_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
|
|
; SVE-LABEL: rhadds_v8i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: ptrue p0.h
|
|
; SVE-NEXT: mov z2.h, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: sxtb z0.h, p0/m, z0.h
|
|
; SVE-NEXT: sxtb z1.h, p0/m, z1.h
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.h, z1.h, z0.h
|
|
; SVE-NEXT: asr z0.h, z0.h, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v8i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: sxtb z1.h, p0/m, z1.h
|
|
; SVE2-NEXT: sxtb z0.h, p0/m, z0.h
|
|
; SVE2-NEXT: srhadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
|
|
%s1s = sext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
|
|
%add = add <vscale x 8 x i16> %s0s, splat (i16 1)
|
|
%add2 = add <vscale x 8 x i16> %add, %s1s
|
|
%s = ashr <vscale x 8 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
|
|
ret <vscale x 8 x i8> %result
|
|
}
|
|
|
|
define <vscale x 8 x i8> @rhadds_v8i8_lsh(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
|
|
; CHECK-LABEL: rhadds_v8i8_lsh:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: ptrue p0.h
|
|
; CHECK-NEXT: mov z2.h, #-1 // =0xffffffffffffffff
|
|
; CHECK-NEXT: sxtb z0.h, p0/m, z0.h
|
|
; CHECK-NEXT: sxtb z1.h, p0/m, z1.h
|
|
; CHECK-NEXT: eor z0.d, z0.d, z2.d
|
|
; CHECK-NEXT: sub z0.h, z1.h, z0.h
|
|
; CHECK-NEXT: lsr z0.h, z0.h, #1
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
|
|
%s1s = sext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
|
|
%add = add <vscale x 8 x i16> %s0s, splat (i16 1)
|
|
%add2 = add <vscale x 8 x i16> %add, %s1s
|
|
%s = lshr <vscale x 8 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
|
|
ret <vscale x 8 x i8> %result
|
|
}
|
|
|
|
define <vscale x 8 x i8> @rhaddu_v8i8(<vscale x 8 x i8> %s0, <vscale x 8 x i8> %s1) {
|
|
; SVE-LABEL: rhaddu_v8i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: mov z2.h, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: and z0.h, z0.h, #0xff
|
|
; SVE-NEXT: and z1.h, z1.h, #0xff
|
|
; SVE-NEXT: eor z0.d, z0.d, z2.d
|
|
; SVE-NEXT: sub z0.h, z1.h, z0.h
|
|
; SVE-NEXT: lsr z0.h, z0.h, #1
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v8i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: and z1.h, z1.h, #0xff
|
|
; SVE2-NEXT: and z0.h, z0.h, #0xff
|
|
; SVE2-NEXT: ptrue p0.h
|
|
; SVE2-NEXT: urhadd z0.h, p0/m, z0.h, z1.h
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 8 x i8> %s0 to <vscale x 8 x i16>
|
|
%s1s = zext <vscale x 8 x i8> %s1 to <vscale x 8 x i16>
|
|
%add = add nuw nsw <vscale x 8 x i16> %s0s, splat (i16 1)
|
|
%add2 = add nuw nsw <vscale x 8 x i16> %add, %s1s
|
|
%s = lshr <vscale x 8 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 8 x i16> %s to <vscale x 8 x i8>
|
|
ret <vscale x 8 x i8> %result
|
|
}
|
|
|
|
define <vscale x 16 x i8> @rhadds_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i8> %s1) {
|
|
; SVE-LABEL: rhadds_v16i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.b, z2.b, #1
|
|
; SVE-NEXT: sub z0.b, z0.b, z1.b
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v16i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.b
|
|
; SVE2-NEXT: srhadd z0.b, p0/m, z0.b, z1.b
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
|
|
%s1s = sext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
|
|
%add = add <vscale x 16 x i16> %s0s, splat (i16 1)
|
|
%add2 = add <vscale x 16 x i16> %add, %s1s
|
|
%s = ashr <vscale x 16 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
|
|
ret <vscale x 16 x i8> %result
|
|
}
|
|
|
|
define <vscale x 16 x i8> @rhadds_v16i8_lsh(<vscale x 16 x i8> %s0, <vscale x 16 x i8> %s1) {
|
|
; SVE-LABEL: rhadds_v16i8_lsh:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.b, z2.b, #1
|
|
; SVE-NEXT: sub z0.b, z0.b, z1.b
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhadds_v16i8_lsh:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.b
|
|
; SVE2-NEXT: srhadd z0.b, p0/m, z0.b, z1.b
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = sext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
|
|
%s1s = sext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
|
|
%add = add <vscale x 16 x i16> %s0s, splat (i16 1)
|
|
%add2 = add <vscale x 16 x i16> %add, %s1s
|
|
%s = lshr <vscale x 16 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
|
|
ret <vscale x 16 x i8> %result
|
|
}
|
|
|
|
define <vscale x 16 x i8> @rhaddu_v16i8(<vscale x 16 x i8> %s0, <vscale x 16 x i8> %s1) {
|
|
; SVE-LABEL: rhaddu_v16i8:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: orr z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.b, z2.b, #1
|
|
; SVE-NEXT: sub z0.b, z0.b, z1.b
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: rhaddu_v16i8:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.b
|
|
; SVE2-NEXT: urhadd z0.b, p0/m, z0.b, z1.b
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%s0s = zext <vscale x 16 x i8> %s0 to <vscale x 16 x i16>
|
|
%s1s = zext <vscale x 16 x i8> %s1 to <vscale x 16 x i16>
|
|
%add = add nuw nsw <vscale x 16 x i16> %s0s, splat (i16 1)
|
|
%add2 = add nuw nsw <vscale x 16 x i16> %add, %s1s
|
|
%s = lshr <vscale x 16 x i16> %add2, splat (i16 1)
|
|
%result = trunc <vscale x 16 x i16> %s to <vscale x 16 x i8>
|
|
ret <vscale x 16 x i8> %result
|
|
}
|
|
|
|
define <vscale x 2 x i64> @haddu_v2i64_add(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: haddu_v2i64_add:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: lsr z1.d, z2.d, #1
|
|
; SVE-NEXT: add z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: haddu_v2i64_add:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: uhadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%add = add nuw nsw <vscale x 2 x i64> %s0, %s1
|
|
%avg = lshr <vscale x 2 x i64> %add, splat (i64 1)
|
|
ret <vscale x 2 x i64> %avg
|
|
}
|
|
|
|
define <vscale x 2 x i64> @hadds_v2i64_add(<vscale x 2 x i64> %s0, <vscale x 2 x i64> %s1) {
|
|
; SVE-LABEL: hadds_v2i64_add:
|
|
; SVE: // %bb.0: // %entry
|
|
; SVE-NEXT: eor z2.d, z0.d, z1.d
|
|
; SVE-NEXT: and z0.d, z0.d, z1.d
|
|
; SVE-NEXT: asr z1.d, z2.d, #1
|
|
; SVE-NEXT: add z0.d, z0.d, z1.d
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: hadds_v2i64_add:
|
|
; SVE2: // %bb.0: // %entry
|
|
; SVE2-NEXT: ptrue p0.d
|
|
; SVE2-NEXT: shadd z0.d, p0/m, z0.d, z1.d
|
|
; SVE2-NEXT: ret
|
|
entry:
|
|
%add = add nuw nsw <vscale x 2 x i64> %s0, %s1
|
|
%avg = ashr <vscale x 2 x i64> %add, splat (i64 1)
|
|
ret <vscale x 2 x i64> %avg
|
|
}
|
|
|
|
define void @zext_mload_avgflooru(ptr %p1, ptr %p2, <vscale x 8 x i1> %mask) {
|
|
; SVE-LABEL: zext_mload_avgflooru:
|
|
; SVE: // %bb.0:
|
|
; SVE-NEXT: ld1b { z0.h }, p0/z, [x0]
|
|
; SVE-NEXT: ld1b { z1.h }, p0/z, [x1]
|
|
; SVE-NEXT: add z0.h, z0.h, z1.h
|
|
; SVE-NEXT: lsr z0.h, z0.h, #1
|
|
; SVE-NEXT: st1h { z0.h }, p0, [x0]
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: zext_mload_avgflooru:
|
|
; SVE2: // %bb.0:
|
|
; SVE2-NEXT: ld1b { z0.h }, p0/z, [x0]
|
|
; SVE2-NEXT: ld1b { z1.h }, p0/z, [x1]
|
|
; SVE2-NEXT: ptrue p1.h
|
|
; SVE2-NEXT: uhadd z0.h, p1/m, z0.h, z1.h
|
|
; SVE2-NEXT: st1h { z0.h }, p0, [x0]
|
|
; SVE2-NEXT: ret
|
|
%ld1 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p1, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
|
|
%ld2 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p2, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
|
|
%and = and <vscale x 8 x i8> %ld1, %ld2
|
|
%xor = xor <vscale x 8 x i8> %ld1, %ld2
|
|
%shift = lshr <vscale x 8 x i8> %xor, splat(i8 1)
|
|
%avg = add <vscale x 8 x i8> %and, %shift
|
|
%avgext = zext <vscale x 8 x i8> %avg to <vscale x 8 x i16>
|
|
call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %avgext, ptr %p1, i32 16, <vscale x 8 x i1> %mask)
|
|
ret void
|
|
}
|
|
|
|
define void @zext_mload_avgceilu(ptr %p1, ptr %p2, <vscale x 8 x i1> %mask) {
|
|
; SVE-LABEL: zext_mload_avgceilu:
|
|
; SVE: // %bb.0:
|
|
; SVE-NEXT: ld1b { z0.h }, p0/z, [x0]
|
|
; SVE-NEXT: mov z1.h, #-1 // =0xffffffffffffffff
|
|
; SVE-NEXT: ld1b { z2.h }, p0/z, [x1]
|
|
; SVE-NEXT: eor z0.d, z0.d, z1.d
|
|
; SVE-NEXT: sub z0.h, z2.h, z0.h
|
|
; SVE-NEXT: lsr z0.h, z0.h, #1
|
|
; SVE-NEXT: st1b { z0.h }, p0, [x0]
|
|
; SVE-NEXT: ret
|
|
;
|
|
; SVE2-LABEL: zext_mload_avgceilu:
|
|
; SVE2: // %bb.0:
|
|
; SVE2-NEXT: ld1b { z0.h }, p0/z, [x0]
|
|
; SVE2-NEXT: ld1b { z1.h }, p0/z, [x1]
|
|
; SVE2-NEXT: ptrue p1.h
|
|
; SVE2-NEXT: urhadd z0.h, p1/m, z0.h, z1.h
|
|
; SVE2-NEXT: st1b { z0.h }, p0, [x0]
|
|
; SVE2-NEXT: ret
|
|
%ld1 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p1, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
|
|
%ld2 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p2, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
|
|
%zext1 = zext <vscale x 8 x i8> %ld1 to <vscale x 8 x i16>
|
|
%zext2 = zext <vscale x 8 x i8> %ld2 to <vscale x 8 x i16>
|
|
%add1 = add nuw nsw <vscale x 8 x i16> %zext1, splat(i16 1)
|
|
%add2 = add nuw nsw <vscale x 8 x i16> %add1, %zext2
|
|
%shift = lshr <vscale x 8 x i16> %add2, splat(i16 1)
|
|
%trunc = trunc <vscale x 8 x i16> %shift to <vscale x 8 x i8>
|
|
call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, ptr %p1, i32 16, <vscale x 8 x i1> %mask)
|
|
ret void
|
|
}
|