
Materialize the vector trip count computation using VPInstruction instead of directly creating IR. This is one of the last few steps needed to model the full vector skeleton in VPlan. It also simplifies vector-trip count computations for scalable vectors, as we can re-use the UF x VF computation. PR: https://github.com/llvm/llvm-project/pull/151925
230 lines
13 KiB
LLVM
230 lines
13 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
|
|
; RUN: opt < %s -scalable-vectorization=on -force-target-supports-scalable-vectors=true -passes=loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -S | FileCheck %s
|
|
|
|
define void @test1(ptr noalias nocapture %a, ptr noalias nocapture readonly %b) {
|
|
; CHECK-LABEL: define void @test1(
|
|
; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1600, [[TMP1]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
|
|
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP3]]
|
|
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]]
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
|
|
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP8]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[TMP6]], align 4
|
|
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x float>, ptr [[TMP9]], align 4
|
|
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 2 x float> [[WIDE_LOAD]], i32 0
|
|
; CHECK-NEXT: [[FCMP1:%.*]] = fcmp ogt float [[TMP10]], 1.000000e+02
|
|
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <vscale x 2 x float> [[WIDE_LOAD1]], i32 0
|
|
; CHECK-NEXT: [[FCMP2:%.*]] = fcmp ogt float [[TMP12]], 1.000000e+02
|
|
; CHECK-NEXT: tail call void @llvm.assume(i1 [[FCMP1]])
|
|
; CHECK-NEXT: tail call void @llvm.assume(i1 [[FCMP2]])
|
|
; CHECK-NEXT: [[TMP14:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00)
|
|
; CHECK-NEXT: [[TMP15:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD1]], splat (float 1.000000e+00)
|
|
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 2
|
|
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
|
|
; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4
|
|
; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
|
|
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1600, [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[CMP_N]], [[FOR_END:label %.*]], label %[[SCALAR_PH]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds float, ptr %b, i64 %indvars.iv
|
|
%0 = load float, ptr %arrayidx, align 4
|
|
%cmp1 = fcmp ogt float %0, 1.000000e+02
|
|
tail call void @llvm.assume(i1 %cmp1)
|
|
%add = fadd float %0, 1.000000e+00
|
|
%arrayidx5 = getelementptr inbounds float, ptr %a, i64 %indvars.iv
|
|
store float %add, ptr %arrayidx5, align 4
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
%exitcond = icmp eq i64 %indvars.iv, 1599
|
|
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
|
|
|
|
for.end: ; preds = %for.body
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.assume(i1) #0
|
|
|
|
attributes #0 = { nounwind willreturn }
|
|
|
|
%struct.data = type { ptr, ptr }
|
|
|
|
define void @test2(ptr %a, ptr noalias %b) {
|
|
; CHECK-LABEL: define void @test2(
|
|
; CHECK-SAME: ptr [[A:%.*]], ptr noalias [[B:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint ptr [[A]] to i64
|
|
; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[PTRINT1]], 0
|
|
; CHECK-NEXT: [[PTRINT2:%.*]] = ptrtoint ptr [[B]] to i64
|
|
; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[PTRINT2]], 0
|
|
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1600, [[TMP1]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
|
|
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1600, [[TMP7]]
|
|
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1600, [[N_MOD_VF]]
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
|
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
|
|
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
|
|
; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
|
|
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[TMP12]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[TMP10]], align 4
|
|
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x float>, ptr [[TMP13]], align 4
|
|
; CHECK-NEXT: [[TMP14:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD]], splat (float 1.000000e+00)
|
|
; CHECK-NEXT: [[TMP15:%.*]] = fadd <vscale x 2 x float> [[WIDE_LOAD3]], splat (float 1.000000e+00)
|
|
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
|
|
; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])
|
|
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
|
|
; CHECK-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 2
|
|
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[TMP18]]
|
|
; CHECK-NEXT: store <vscale x 2 x float> [[TMP14]], ptr [[TMP16]], align 4
|
|
; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP19]], align 4
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
|
|
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1600, [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[CMP_N]], [[FOR_END:label %.*]], label %[[SCALAR_PH]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
;
|
|
entry:
|
|
%ptrint1 = ptrtoint ptr %a to i64
|
|
%maskcond = icmp eq i64 %ptrint1, 0
|
|
%ptrint2 = ptrtoint ptr %b to i64
|
|
%maskcond4 = icmp eq i64 %ptrint2, 0
|
|
br label %for.body
|
|
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
|
tail call void @llvm.assume(i1 %maskcond)
|
|
%arrayidx = getelementptr inbounds float, ptr %a, i64 %indvars.iv
|
|
%0 = load float, ptr %arrayidx, align 4
|
|
%add = fadd float %0, 1.000000e+00
|
|
tail call void @llvm.assume(i1 %maskcond4)
|
|
%arrayidx5 = getelementptr inbounds float, ptr %b, i64 %indvars.iv
|
|
store float %add, ptr %arrayidx5, align 4
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
%exitcond = icmp eq i64 %indvars.iv, 1599
|
|
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
|
|
|
|
for.end: ; preds = %for.body
|
|
ret void
|
|
}
|
|
|
|
; Test case for PR43620. Make sure we can vectorize with predication in presence
|
|
; of assume calls. For now, check that we drop all assumes in predicated blocks
|
|
; in the vector body.
|
|
define void @predicated_assume(ptr noalias nocapture readonly %a, ptr noalias nocapture %b, i64 %n) {
|
|
; Check that the vector.body does not contain any assumes.
|
|
; CHECK-LABEL: define void @predicated_assume(
|
|
; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) {
|
|
; CHECK-NEXT: [[ENTRY:.*:]]
|
|
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; CHECK: [[VECTOR_PH]]:
|
|
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
|
|
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2
|
|
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
|
|
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
|
|
; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 2 x i64> [[TMP7]], splat (i64 1)
|
|
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP8]]
|
|
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; CHECK: [[VECTOR_BODY]]:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
|
|
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
|
|
; CHECK-NEXT: [[STEP_ADD:%.*]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
|
|
; CHECK-NEXT: [[TMP9:%.*]] = icmp ult <vscale x 2 x i64> [[VEC_IND]], splat (i64 495616)
|
|
; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <vscale x 2 x i64> [[STEP_ADD]], splat (i64 495616)
|
|
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP9]], <vscale x 2 x float> splat (float 2.300000e+01), <vscale x 2 x float> splat (float 4.200000e+01)
|
|
; CHECK-NEXT: [[PREDPHI1:%.*]] = select <vscale x 2 x i1> [[TMP10]], <vscale x 2 x float> splat (float 2.300000e+01), <vscale x 2 x float> splat (float 4.200000e+01)
|
|
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
|
|
; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i64 [[TMP12]], 2
|
|
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[TMP13]]
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[TMP11]], align 4
|
|
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x float>, ptr [[TMP14]], align 4
|
|
; CHECK-NEXT: [[TMP15:%.*]] = fmul <vscale x 2 x float> [[PREDPHI]], [[WIDE_LOAD]]
|
|
; CHECK-NEXT: [[TMP16:%.*]] = fmul <vscale x 2 x float> [[PREDPHI1]], [[WIDE_LOAD2]]
|
|
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
|
|
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 2
|
|
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[TMP19]]
|
|
; CHECK-NEXT: store <vscale x 2 x float> [[TMP15]], ptr [[TMP17]], align 4
|
|
; CHECK-NEXT: store <vscale x 2 x float> [[TMP16]], ptr [[TMP20]], align 4
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
|
|
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[STEP_ADD]], [[BROADCAST_SPLAT]]
|
|
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
; CHECK: [[MIDDLE_BLOCK]]:
|
|
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[CMP_N]], [[FOR_COND_CLEANUP:label %.*]], label %[[SCALAR_PH]]
|
|
; CHECK: [[SCALAR_PH]]:
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body: ; preds = %for.body.preheader, %if.end5
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %if.end5 ]
|
|
%cmp1 = icmp ult i64 %indvars.iv, 495616
|
|
br i1 %cmp1, label %if.end5, label %if.else
|
|
|
|
if.else: ; preds = %for.body
|
|
%cmp2 = icmp ult i64 %indvars.iv, 991232
|
|
tail call void @llvm.assume(i1 %cmp2)
|
|
br label %if.end5
|
|
|
|
if.end5: ; preds = %for.body, %if.else
|
|
%x.0 = phi float [ 4.200000e+01, %if.else ], [ 2.300000e+01, %for.body ]
|
|
%arrayidx = getelementptr inbounds float, ptr %a, i64 %indvars.iv
|
|
%0 = load float, ptr %arrayidx, align 4
|
|
%mul = fmul float %x.0, %0
|
|
%arrayidx7 = getelementptr inbounds float, ptr %b, i64 %indvars.iv
|
|
store float %mul, ptr %arrayidx7, align 4
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
%cmp = icmp eq i64 %indvars.iv.next, %n
|
|
br i1 %cmp, label %for.cond.cleanup, label %for.body, !llvm.loop !0
|
|
|
|
for.cond.cleanup: ; preds = %if.end5, %entry
|
|
ret void
|
|
}
|
|
|
|
!0 = distinct !{!0, !1}
|
|
!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|