llvm-project/llvm/test/Transforms/LoopVectorize/scalable-loop-unpredicated-body-scalar-tail.ll
Florian Hahn db98ac43ec
[LV] Use shl for ((VF * Step) * vscale) in createStepForVF. (#153495)
Directly emit shl instead of a multiply if VF * Step is a power-of-2. The
main motivation here is to prepare the code and test for directly
generating and expanding a SCEV expression of the minimum iteration
count. SCEVExpander will directly emit shl for multiplies with
powers-of-2.

InstCombine will also performs this combine, so end-to-end this should
effectively by NFC.

PR: https://github.com/llvm/llvm-project/pull/153495
2025-08-14 19:27:51 +01:00

93 lines
5.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5
; RUN: opt -S -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF1
; RUN: opt -S -passes=loop-vectorize -force-vector-interleave=2 -force-vector-width=4 -force-target-supports-scalable-vectors=true -scalable-vectorization=on < %s | FileCheck %s --check-prefix=CHECKUF2
; For an interleave factor of 2, vscale is scaled by 8 instead of 4 (and thus shifted left by 3 instead of 2).
; There is also the increment for the next iteration, e.g. instead of indexing IDXB, it indexes at IDXB + vscale * 4.
define void @loop(i64 %N, ptr noalias %a, ptr noalias %b) {
; CHECKUF1-LABEL: define void @loop(
; CHECKUF1-SAME: i64 [[N:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
; CHECKUF1-NEXT: [[ENTRY:.*:]]
; CHECKUF1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECKUF1-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2
; CHECKUF1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECKUF1-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECKUF1: [[VECTOR_PH]]:
; CHECKUF1-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECKUF1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 4
; CHECKUF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; CHECKUF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECKUF1-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECKUF1: [[VECTOR_BODY]]:
; CHECKUF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECKUF1-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDEX]]
; CHECKUF1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP7]], align 8
; CHECKUF1-NEXT: [[TMP8:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
; CHECKUF1-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
; CHECKUF1-NEXT: store <vscale x 4 x double> [[TMP8]], ptr [[TMP9]], align 8
; CHECKUF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECKUF1-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECKUF1-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECKUF1: [[MIDDLE_BLOCK]]:
; CHECKUF1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECKUF1-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
; CHECKUF1: [[SCALAR_PH]]:
;
; CHECKUF2-LABEL: define void @loop(
; CHECKUF2-SAME: i64 [[N:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
; CHECKUF2-NEXT: [[ENTRY:.*:]]
; CHECKUF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECKUF2-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 3
; CHECKUF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
; CHECKUF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECKUF2: [[VECTOR_PH]]:
; CHECKUF2-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECKUF2-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 8
; CHECKUF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
; CHECKUF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECKUF2-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECKUF2: [[VECTOR_BODY]]:
; CHECKUF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECKUF2-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDEX]]
; CHECKUF2-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECKUF2-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP8]], 2
; CHECKUF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, ptr [[TMP7]], i64 [[TMP16]]
; CHECKUF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x double>, ptr [[TMP7]], align 8
; CHECKUF2-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x double>, ptr [[TMP9]], align 8
; CHECKUF2-NEXT: [[TMP10:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD]], splat (double 1.000000e+00)
; CHECKUF2-NEXT: [[TMP11:%.*]] = fadd <vscale x 4 x double> [[WIDE_LOAD3]], splat (double 1.000000e+00)
; CHECKUF2-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
; CHECKUF2-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
; CHECKUF2-NEXT: [[TMP17:%.*]] = shl nuw i64 [[TMP13]], 2
; CHECKUF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[TMP12]], i64 [[TMP17]]
; CHECKUF2-NEXT: store <vscale x 4 x double> [[TMP10]], ptr [[TMP12]], align 8
; CHECKUF2-NEXT: store <vscale x 4 x double> [[TMP11]], ptr [[TMP14]], align 8
; CHECKUF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECKUF2-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECKUF2-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECKUF2: [[MIDDLE_BLOCK]]:
; CHECKUF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECKUF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
; CHECKUF2: [[SCALAR_PH]]:
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%arrayidx = getelementptr inbounds double, ptr %b, i64 %iv
%0 = load double, ptr %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
%arrayidx2 = getelementptr inbounds double, ptr %a, i64 %iv
store double %add, ptr %arrayidx2, align 8
%iv.next = add nuw nsw i64 %iv, 1
%ec = icmp eq i64 %iv.next, %N
br i1 %ec, label %exit, label %loop, !llvm.loop !1
exit:
ret void
}
!1 = distinct !{!1, !2}
!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}