; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc --mtriple=loongarch64 --mattr=+lasx %s -o - | FileCheck %s define void @vec_reduce_umax_v32i8(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umax_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78 ; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 228 ; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8 ; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvsrli.d $xr1, $xr1, 32 ; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvshuf4i.b $xr1, $xr1, 14 ; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvrepl128vei.b $xr1, $xr1, 1 ; CHECK-NEXT: xvmax.bu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvstelm.b $xr0, $a1, 0, 0 ; CHECK-NEXT: ret %v = load <32 x i8>, ptr %src %res = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %v) store i8 %res, ptr %dst ret void } define void @vec_reduce_umax_v16i16(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umax_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78 ; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 228 ; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvbsrl.v $xr1, $xr1, 8 ; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvshuf4i.h $xr1, $xr1, 14 ; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvrepl128vei.h $xr1, $xr1, 1 ; CHECK-NEXT: xvmax.hu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvstelm.h $xr0, $a1, 0, 0 ; CHECK-NEXT: ret %v = load <16 x i16>, ptr %src %res = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %v) store i16 %res, ptr %dst ret void } define void @vec_reduce_umax_v8i32(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umax_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 78 ; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 228 ; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvshuf4i.w $xr1, $xr1, 14 ; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvrepl128vei.w $xr1, $xr1, 1 ; CHECK-NEXT: xvmax.wu $xr0, $xr0, $xr1 ; CHECK-NEXT: xvstelm.w $xr0, $a1, 0, 0 ; CHECK-NEXT: ret %v = load <8 x i32>, ptr %src %res = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %v) store i32 %res, ptr %dst ret void } define void @vec_reduce_umax_v4i64(ptr %src, ptr %dst) nounwind { ; CHECK-LABEL: vec_reduce_umax_v4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: xvld $xr0, $a0, 0 ; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI3_0) ; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI3_0) ; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78 ; CHECK-NEXT: xvshuf.d $xr1, $xr0, $xr2 ; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1 ; CHECK-NEXT: xvpermi.d $xr1, $xr0, 68 ; CHECK-NEXT: xvrepl128vei.d $xr1, $xr1, 1 ; CHECK-NEXT: xvmax.du $xr0, $xr0, $xr1 ; CHECK-NEXT: xvstelm.d $xr0, $a1, 0, 0 ; CHECK-NEXT: ret %v = load <4 x i64>, ptr %src %res = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %v) store i64 %res, ptr %dst ret void }