llvm-project/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
Florian Hahn 1de3dc7d23 [LV] Bail out early if BTC+1 wraps.
Currently we fail to detect the case where BTC + 1 wraps, i.e. the
vector trip count is 0, In those cases, the minimum iteration count
check will fail, and the vector code will never be executed.

Explicitly check for this condition in computeMaxVF and avoid trying to
vectorize alltogether.

Note that a number of tests needed to be updated, because the vector
loop would never be executed given the input IR.

Fixes https://github.com/llvm/llvm-project/issues/122558.
2025-01-14 22:07:38 +00:00

244 lines
13 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -lv-strided-pointer-ivs=true -S %s | FileCheck %s
define void @step_direction_unknown(i32 %arg, ptr %dst) {
; CHECK-LABEL: define void @step_direction_unknown
; CHECK-SAME: (i32 [[ARG:%.*]], ptr [[DST:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ARG]], 1
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
; CHECK-NEXT: [[TMP0:%.*]] = sub i32 -1, [[ARG]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[ADD]], 0
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[TMP0]], i32 [[ADD]]
; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP2]], i32 1023)
; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
; CHECK-NEXT: [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0
; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP1]], i1 [[TMP4]], i1 false
; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP5]], [[MUL_OVERFLOW]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i32 [[ADD]], 0
; CHECK-NEXT: br i1 [[TMP6]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[ADD]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP8:%.*]] = mul <4 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]]
; CHECK-NEXT: [[TMP9:%.*]] = zext <4 x i32> [[TMP8]] to <4 x i64>
; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[TMP9]], i32 0
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i64> [[TMP9]], i32 1
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i64> [[TMP9]], i32 2
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP14]]
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP9]], i32 3
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP16]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP11]], align 8
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP13]], align 8
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP15]], align 8
; CHECK-NEXT: store double 0.000000e+00, ptr [[TMP17]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[MIDDLE_BLOCK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[PHI]] to i32
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[ADD]], [[TRUNC]]
; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[MUL]] to i64
; CHECK-NEXT: [[GETELEMENTPTR:%.*]] = getelementptr double, ptr [[DST]], i64 [[ZEXT]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GETELEMENTPTR]], align 8
; CHECK-NEXT: [[ADD2]] = add i64 [[PHI]], 1
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i64 [[ADD2]], 1024
; CHECK-NEXT: br i1 [[ICMP]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
entry:
%add = add i32 %arg, 1
br label %loop
loop:
%phi = phi i64 [ 0, %entry ], [ %add2, %loop ]
%trunc = trunc i64 %phi to i32
%mul = mul i32 %add, %trunc
%zext = zext i32 %mul to i64
%getelementptr = getelementptr double, ptr %dst, i64 %zext
store double 0.000000e+00, ptr %getelementptr, align 8
%add2 = add i64 %phi, 1
%icmp = icmp eq i64 %add2, 1024
br i1 %icmp, label %exit, label %loop
exit:
ret void
}
define void @integer_induction_wraps_scev_predicate_known(i32 %x, ptr %call, ptr %start) {
; CHECK-LABEL: define void @integer_induction_wraps_scev_predicate_known
; CHECK-SAME: (i32 [[X:%.*]], ptr [[CALL:%.*]], ptr [[START:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[X]], 1
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 992, [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP1]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP0]], 4
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]]
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP4]]
; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 30, [[DOTCAST]]
; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr ptr, ptr [[TMP6]], i32 0
; CHECK-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP7]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 1022, [[MIDDLE_BLOCK]] ], [ 30, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[TMP2]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_COND:%.*]]
; CHECK: for.cond:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[P_0:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[ADD_PTR:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[ADD_PTR]] = getelementptr i8, ptr [[P_0]], i32 [[MUL]]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[IV]]
; CHECK-NEXT: store ptr [[P_0]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INC]] = add i32 [[IV]], 1
; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[IV]], 1024
; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_END]], label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
%mul = shl i32 %x, 1
br label %for.cond
for.cond: ; preds = %for.body, %entry
%iv = phi i32 [ 30, %entry ], [ %inc, %for.cond ]
%p.0 = phi ptr [ %start, %entry ], [ %add.ptr, %for.cond ]
%add.ptr = getelementptr i8, ptr %p.0, i32 %mul
%arrayidx = getelementptr ptr, ptr %call, i32 %iv
store ptr %p.0, ptr %arrayidx, align 4
%inc = add i32 %iv, 1
%tobool.not = icmp eq i32 %iv, 1024
br i1 %tobool.not, label %for.end, label %for.cond
for.end: ; preds = %for.cond
ret void
}
@h = global i64 0
define void @implied_wrap_predicate(ptr %A, ptr %B, ptr %C) {
; CHECK-LABEL: define void @implied_wrap_predicate
; CHECK-SAME: (ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A3:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[C2:%.*]] = ptrtoint ptr [[C]] to i64
; CHECK-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[A3]], 16
; CHECK-NEXT: [[UMAX4:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP0]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[UMAX4]], -9
; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[A3]]
; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[A1]], 16
; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP5]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[UMAX]], -9
; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[A1]]
; CHECK-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP7]], 3
; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP8]] to i16
; CHECK-NEXT: [[TMP10:%.*]] = add i16 2, [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp ult i16 [[TMP10]], 2
; CHECK-NEXT: [[TMP12:%.*]] = icmp ugt i64 [[TMP8]], 65535
; CHECK-NEXT: [[TMP13:%.*]] = or i1 [[TMP11]], [[TMP12]]
; CHECK-NEXT: br i1 [[TMP13]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP14:%.*]] = sub i64 [[C2]], [[A3]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], 32
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16
; CHECK-NEXT: [[IND_END:%.*]] = add i16 1, [[DOTCAST]]
; CHECK-NEXT: [[IND_END5:%.*]] = add i64 1, [[N_VEC]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 1, [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 0
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[A]], i64 [[TMP15]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[TMP16]], i32 0
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP17]], align 4
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[C]], i64 [[TMP15]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[TMP18]], i32 0
; CHECK-NEXT: store <4 x i64> zeroinitializer, ptr [[TMP19]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 1, [[VECTOR_MEMCHECK]] ], [ 1, [[VECTOR_SCEVCHECK]] ], [ 1, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[BC_RESUME_VAL6:%.*]] = phi i64 [ [[IND_END5]], [[MIDDLE_BLOCK]] ], [ 1, [[VECTOR_MEMCHECK]] ], [ 1, [[VECTOR_SCEVCHECK]] ], [ 1, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_EXT:%.*]] = phi i64 [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ], [ [[IV_EXT_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV_EXT]]
; CHECK-NEXT: store i64 0, ptr [[GEP_A]], align 4
; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr i64, ptr [[C]], i64 [[IV_EXT]]
; CHECK-NEXT: store i64 0, ptr [[GEP_C]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
; CHECK-NEXT: [[IV_EXT_NEXT]] = zext i16 [[IV_NEXT]] to i64
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV_EXT_NEXT]]
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
; CHECK-NEXT: br i1 [[CMP]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
entry:
br label %loop
loop:
%iv = phi i16 [ 1, %entry ], [ %iv.next, %loop ]
%iv.ext = phi i64 [ 1, %entry ], [ %iv.ext.next, %loop ]
%gep.A = getelementptr i64, ptr %A, i64 %iv.ext
store i64 0, ptr %gep.A
%gep.C = getelementptr i64, ptr %C, i64 %iv.ext
store i64 0, ptr %gep.C
%iv.next = add i16 %iv, 1
%iv.ext.next = zext i16 %iv.next to i64
%gep = getelementptr i64, ptr %A, i64 %iv.ext.next
%cmp = icmp ugt ptr %gep, @h
br i1 %cmp, label %exit, label %loop
exit:
ret void
}