
Directly emit shl instead of a multiply if VF * Step is a power-of-2. The main motivation here is to prepare the code and test for directly generating and expanding a SCEV expression of the minimum iteration count. SCEVExpander will directly emit shl for multiplies with powers-of-2. InstCombine will also performs this combine, so end-to-end this should effectively by NFC. PR: https://github.com/llvm/llvm-project/pull/153495
108 lines
6.3 KiB
LLVM
108 lines
6.3 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt -passes=loop-vectorize \
|
|
; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \
|
|
; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
|
|
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL
|
|
|
|
; RUN: opt -passes=loop-vectorize \
|
|
; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \
|
|
; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
|
|
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP
|
|
|
|
define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
|
|
; IF-EVL-LABEL: @fadd(
|
|
; IF-EVL-NEXT: entry:
|
|
; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; IF-EVL: vector.ph:
|
|
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; IF-EVL: vector.body:
|
|
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
|
|
; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N:%.*]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
|
|
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
|
|
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
|
|
; IF-EVL-NEXT: [[TMP14]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
|
|
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
|
|
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
|
|
; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP9]], [[TMP15]]
|
|
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
|
|
; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; IF-EVL: middle.block:
|
|
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
|
|
; IF-EVL: scalar.ph:
|
|
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
|
|
; IF-EVL: for.body:
|
|
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
|
|
; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
|
|
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
|
|
; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP17]], [[SUM_07]]
|
|
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
|
|
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
|
|
; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
; IF-EVL: for.end:
|
|
; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ]
|
|
; IF-EVL-NEXT: ret float [[ADD_LCSSA]]
|
|
;
|
|
; NO-VP-LABEL: @fadd(
|
|
; NO-VP-NEXT: entry:
|
|
; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NO-VP-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
|
|
; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N1:%.*]], [[TMP1]]
|
|
; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
|
|
; NO-VP: vector.ph:
|
|
; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
|
|
; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N1]], [[TMP3]]
|
|
; NO-VP-NEXT: [[N:%.*]] = sub i64 [[N1]], [[N_MOD_VF]]
|
|
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
|
|
; NO-VP: vector.body:
|
|
; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
|
|
; NO-VP-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
|
|
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
|
|
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[ARRAYIDX]], align 4
|
|
; NO-VP-NEXT: [[ADD]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[SUM_07]], <vscale x 4 x float> [[WIDE_LOAD]])
|
|
; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
|
|
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
|
|
; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; NO-VP: middle.block:
|
|
; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N1]], [[N]]
|
|
; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
|
|
; NO-VP: scalar.ph:
|
|
; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
|
|
; NO-VP-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[ADD]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY1]] ]
|
|
; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
|
|
; NO-VP: for.body:
|
|
; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
|
|
; NO-VP-NEXT: [[SUM_7:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY1]] ]
|
|
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
|
|
; NO-VP-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
|
|
; NO-VP-NEXT: [[ADD1]] = fadd float [[TMP9]], [[SUM_7]]
|
|
; NO-VP-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
|
|
; NO-VP-NEXT: [[EXITCOND_NOT1:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N1]]
|
|
; NO-VP-NEXT: br i1 [[EXITCOND_NOT1]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
; NO-VP: for.end:
|
|
; NO-VP-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY1]] ], [ [[ADD]], [[MIDDLE_BLOCK]] ]
|
|
; NO-VP-NEXT: ret float [[ADD_LCSSA]]
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
|
|
%arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
|
|
%0 = load float, ptr %arrayidx, align 4
|
|
%add = fadd float %0, %sum.07
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond.not = icmp eq i64 %iv.next, %n
|
|
br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
|
|
|
|
for.end:
|
|
ret float %add
|
|
}
|
|
|
|
!0 = distinct !{!0, !1}
|
|
!1 = !{!"llvm.loop.vectorize.enable", i1 true}
|