169 lines
5.5 KiB
LLVM
169 lines
5.5 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc --mtriple=loongarch64 --mattr=+lsx %s -o - | FileCheck %s
|
|
|
|
define void @vec_reduce_umax_v16i8(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v16i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vld $vr0, $a0, 0
|
|
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <16 x i8>, ptr %src
|
|
%res = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %v)
|
|
store i8 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v8i8(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v8i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ld.d $a0, $a0, 0
|
|
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
|
|
; CHECK-NEXT: vsrli.d $vr1, $vr0, 32
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <8 x i8>, ptr %src
|
|
%res = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %v)
|
|
store i8 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v4i8(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v4i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ld.w $a0, $a0, 0
|
|
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
|
|
; CHECK-NEXT: vshuf4i.b $vr1, $vr0, 14
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <4 x i8>, ptr %src
|
|
%res = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> %v)
|
|
store i8 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v2i8(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v2i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ld.h $a0, $a0, 0
|
|
; CHECK-NEXT: vinsgr2vr.h $vr0, $a0, 0
|
|
; CHECK-NEXT: vreplvei.b $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.bu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.b $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <2 x i8>, ptr %src
|
|
%res = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> %v)
|
|
store i8 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v8i16(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v8i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vld $vr0, $a0, 0
|
|
; CHECK-NEXT: vbsrl.v $vr1, $vr0, 8
|
|
; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
|
|
; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <8 x i16>, ptr %src
|
|
%res = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %v)
|
|
store i16 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v4i16(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v4i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ld.d $a0, $a0, 0
|
|
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
|
|
; CHECK-NEXT: vshuf4i.h $vr1, $vr0, 14
|
|
; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <4 x i16>, ptr %src
|
|
%res = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %v)
|
|
store i16 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v2i16(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v2i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ld.w $a0, $a0, 0
|
|
; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
|
|
; CHECK-NEXT: vreplvei.h $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.hu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.h $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <2 x i16>, ptr %src
|
|
%res = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> %v)
|
|
store i16 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v4i32(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v4i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vld $vr0, $a0, 0
|
|
; CHECK-NEXT: vshuf4i.w $vr1, $vr0, 14
|
|
; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <4 x i32>, ptr %src
|
|
%res = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %v)
|
|
store i32 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v2i32(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v2i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: ld.d $a0, $a0, 0
|
|
; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
|
|
; CHECK-NEXT: vreplvei.w $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.wu $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.w $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <2 x i32>, ptr %src
|
|
%res = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %v)
|
|
store i32 %res, ptr %dst
|
|
ret void
|
|
}
|
|
|
|
define void @vec_reduce_umax_v2i64(ptr %src, ptr %dst) nounwind {
|
|
; CHECK-LABEL: vec_reduce_umax_v2i64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vld $vr0, $a0, 0
|
|
; CHECK-NEXT: vreplvei.d $vr1, $vr0, 1
|
|
; CHECK-NEXT: vmax.du $vr0, $vr0, $vr1
|
|
; CHECK-NEXT: vstelm.d $vr0, $a1, 0, 0
|
|
; CHECK-NEXT: ret
|
|
%v = load <2 x i64>, ptr %src
|
|
%res = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %v)
|
|
store i64 %res, ptr %dst
|
|
ret void
|
|
}
|