
This patch implements explicit unrolling by UF as VPlan transform. In follow up patches this will allow simplifying VPTransform state (no need to store unrolled parts) as well as recipe execution (no need to generate code for multiple parts in an each recipe). It also allows for more general optimziations (e.g. avoid generating code for recipes that are uniform-across parts). It also unifies the logic dealing with unrolled parts in a single place, rather than spreading it out across multiple places (e.g. VPlan post processing for header-phi recipes previously.) In the initial implementation, a number of recipes still take the unrolled part as additional, optional argument, if their execution depends on the unrolled part. The computation for start/step values for scalable inductions changed slightly. Previously the step would be computed as scalar and then splatted, now vscale gets splatted and multiplied by the step in a vector mul. This has been split off https://github.com/llvm/llvm-project/pull/94339 which also includes changes to simplify VPTransfomState and recipes' ::execute. The current version mostly leaves existing ::execute untouched and instead sets VPTransfomState::UF to 1. A follow-up patch will clean up all references to VPTransformState::UF. Another follow-up patch will simplify VPTransformState to only store a single vector value per VPValue. PR: https://github.com/llvm/llvm-project/pull/95842
424 lines
24 KiB
LLVM
424 lines
24 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
|
|
; This is the loop in c++ being vectorize in this file with
|
|
;vector.reverse
|
|
; #pragma clang loop vectorize_width(4, scalable)
|
|
; for (int i = N-1; i >= 0; --i)
|
|
; a[i] = b[i] + 1.0;
|
|
|
|
; REQUIRES: asserts
|
|
; RUN: opt -passes=loop-vectorize,dce,instcombine -mtriple riscv64-linux-gnu \
|
|
; RUN: -mattr=+v -debug-only=loop-vectorize -scalable-vectorization=on \
|
|
; RUN: -riscv-v-vector-bits-min=128 -disable-output < %s 2>&1 | FileCheck %s
|
|
|
|
define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) {
|
|
; CHECK-LABEL: 'vector_reverse_i64'
|
|
; CHECK-NEXT: LV: Loop hints: force=enabled width=vscale x 4 interleave=0
|
|
; CHECK-NEXT: LV: Found a loop: for.body
|
|
; CHECK-NEXT: LV: Found an induction variable.
|
|
; CHECK-NEXT: LV: Found an induction variable.
|
|
; CHECK-NEXT: LV: Did not find one integer induction var.
|
|
; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)!
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Found trip count: 0
|
|
; CHECK-NEXT: LV: Scalable vectorization is available
|
|
; CHECK-NEXT: LV: The max safe fixed VF is: 67108864.
|
|
; CHECK-NEXT: LV: The max safe scalable VF is: vscale x 4294967295.
|
|
; CHECK-NEXT: LV: Found uniform instruction: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Found uniform instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found uniform instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: LV: Found uniform instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
; CHECK-NEXT: LV: Found uniform instruction: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
|
|
; CHECK-NEXT: LV: Using user VF vscale x 4.
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Scalarizing: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Scalarizing: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Scalarizing: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: VPlan 'Initial VPlan for VF={vscale x 4},UF>=1' {
|
|
; CHECK-NEXT: Live-in vp<%0> = VF * UF
|
|
; CHECK-NEXT: Live-in vp<%1> = vector-trip-count
|
|
; CHECK-NEXT: vp<%2> = original trip-count
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.body.preheader>:
|
|
; CHECK-NEXT: IR %0 = zext i32 %n to i64
|
|
; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64)
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: vector.ph:
|
|
; CHECK-NEXT: Successor(s): vector loop
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: <x1> vector loop: {
|
|
; CHECK-NEXT: vector.body:
|
|
; CHECK-NEXT: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%8>
|
|
; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%n> + vp<%3> * ir<-1>
|
|
; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%4>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%5>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%6> = vector-pointer (reverse) ir<%arrayidx>
|
|
; CHECK-NEXT: WIDEN ir<%1> = load vp<%6>
|
|
; CHECK-NEXT: WIDEN ir<%add9> = add ir<%1>, ir<1>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%7> = vector-pointer (reverse) ir<%arrayidx3>
|
|
; CHECK-NEXT: WIDEN store vp<%7>, ir<%add9>
|
|
; CHECK-NEXT: EMIT vp<%8> = add nuw vp<%3>, vp<%0>
|
|
; CHECK-NEXT: EMIT branch-on-count vp<%8>, vp<%1>
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: Successor(s): middle.block
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: middle.block:
|
|
; CHECK-NEXT: EMIT vp<%10> = icmp eq vp<%2>, vp<%1>
|
|
; CHECK-NEXT: EMIT branch-on-cond vp<%10>
|
|
; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, scalar.ph
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: scalar.ph:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load i32, ptr %arrayidx, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 2 for VF vscale x 4 For instruction: %add9 = add i32 %1, 1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store i32 %add9, ptr %arrayidx3, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
|
|
; CHECK-NEXT: LV(REG): Calculating max register usage:
|
|
; CHECK-NEXT: LV(REG): At #0 Interval # 0
|
|
; CHECK-NEXT: LV(REG): At #1 Interval # 1
|
|
; CHECK-NEXT: LV(REG): At #2 Interval # 2
|
|
; CHECK-NEXT: LV(REG): At #3 Interval # 2
|
|
; CHECK-NEXT: LV(REG): At #4 Interval # 2
|
|
; CHECK-NEXT: LV(REG): At #5 Interval # 3
|
|
; CHECK-NEXT: LV(REG): At #6 Interval # 3
|
|
; CHECK-NEXT: LV(REG): At #7 Interval # 3
|
|
; CHECK-NEXT: LV(REG): At #9 Interval # 1
|
|
; CHECK-NEXT: LV(REG): At #10 Interval # 2
|
|
; CHECK-NEXT: LV(REG): VF = vscale x 4
|
|
; CHECK-NEXT: LV(REG): Found max usage: 2 item
|
|
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 3 registers
|
|
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::VRRC, 2 registers
|
|
; CHECK-NEXT: LV(REG): Found invariant usage: 1 item
|
|
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers
|
|
; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class
|
|
; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Loop cost is 32
|
|
; CHECK-NEXT: LV: IC is 1
|
|
; CHECK-NEXT: LV: VF is vscale x 4
|
|
; CHECK-NEXT: LV: Not Interleaving.
|
|
; CHECK-NEXT: LV: Interleaving is not beneficial.
|
|
; CHECK-NEXT: LV: Found a vectorizable loop (vscale x 4) in <stdin>
|
|
; CHECK-NEXT: LEV: Epilogue vectorization is not profitable for this loop
|
|
; CHECK-NEXT: Executing best plan with VF=vscale x 4, UF=1
|
|
; CHECK-NEXT: VPlan 'Final VPlan for VF={vscale x 4},UF={1}' {
|
|
; CHECK-NEXT: Live-in vp<%0> = VF * UF
|
|
; CHECK-NEXT: Live-in vp<%1> = vector-trip-count
|
|
; CHECK-NEXT: vp<%2> = original trip-count
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.body.preheader>:
|
|
; CHECK-NEXT: IR %0 = zext i32 %n to i64
|
|
; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64)
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: vector.ph:
|
|
; CHECK-NEXT: Successor(s): vector loop
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: <x1> vector loop: {
|
|
; CHECK-NEXT: vector.body:
|
|
; CHECK-NEXT: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%8>
|
|
; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%n> + vp<%3> * ir<-1>
|
|
; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%4>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%5>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%6> = vector-pointer (reverse) ir<%arrayidx>
|
|
; CHECK-NEXT: WIDEN ir<%13> = load vp<%6>
|
|
; CHECK-NEXT: WIDEN ir<%add9> = add ir<%13>, ir<1>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%7> = vector-pointer (reverse) ir<%arrayidx3>
|
|
; CHECK-NEXT: WIDEN store vp<%7>, ir<%add9>
|
|
; CHECK-NEXT: EMIT vp<%8> = add nuw vp<%3>, vp<%0>
|
|
; CHECK-NEXT: EMIT branch-on-count vp<%8>, vp<%1>
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: Successor(s): middle.block
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: middle.block:
|
|
; CHECK-NEXT: EMIT vp<%10> = icmp eq vp<%2>, vp<%1>
|
|
; CHECK-NEXT: EMIT branch-on-cond vp<%10>
|
|
; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, scalar.ph
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: scalar.ph:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Interleaving disabled by the pass manager
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Vectorizing: innermost loop.
|
|
; CHECK-EMPTY:
|
|
;
|
|
entry:
|
|
%cmp7 = icmp sgt i32 %n, 0
|
|
br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
%0 = zext i32 %n to i64
|
|
br label %for.body
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
ret void
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
%indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
%i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
%i.0 = add nsw i32 %i.0.in8, -1
|
|
%idxprom = zext i32 %i.0 to i64
|
|
%arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
|
|
%1 = load i32, ptr %arrayidx, align 4
|
|
%add9 = add i32 %1, 1
|
|
%arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
|
|
store i32 %add9, ptr %arrayidx3, align 4
|
|
%cmp = icmp ugt i64 %indvars.iv, 1
|
|
%indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
|
|
}
|
|
|
|
define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) {
|
|
; CHECK-LABEL: 'vector_reverse_f32'
|
|
; CHECK-NEXT: LV: Loop hints: force=enabled width=vscale x 4 interleave=0
|
|
; CHECK-NEXT: LV: Found a loop: for.body
|
|
; CHECK-NEXT: LV: Found an induction variable.
|
|
; CHECK-NEXT: LV: Found an induction variable.
|
|
; CHECK-NEXT: LV: Found FP op with unsafe algebra.
|
|
; CHECK-NEXT: LV: Did not find one integer induction var.
|
|
; CHECK-NEXT: LV: We can vectorize this loop (with a runtime bound check)!
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Found trip count: 0
|
|
; CHECK-NEXT: LV: Scalable vectorization is available
|
|
; CHECK-NEXT: LV: The max safe fixed VF is: 67108864.
|
|
; CHECK-NEXT: LV: The max safe scalable VF is: vscale x 4294967295.
|
|
; CHECK-NEXT: LV: Found uniform instruction: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Found uniform instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found uniform instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found uniform instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
; CHECK-NEXT: LV: Found uniform instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: LV: Found uniform instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
; CHECK-NEXT: LV: Found uniform instruction: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 4 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
|
|
; CHECK-NEXT: LV: Using user VF vscale x 4.
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Scalarizing: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Scalarizing: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Scalarizing: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Scalarizing: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Scalarizing: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Scalarizing: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: VPlan 'Initial VPlan for VF={vscale x 4},UF>=1' {
|
|
; CHECK-NEXT: Live-in vp<%0> = VF * UF
|
|
; CHECK-NEXT: Live-in vp<%1> = vector-trip-count
|
|
; CHECK-NEXT: vp<%2> = original trip-count
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.body.preheader>:
|
|
; CHECK-NEXT: IR %0 = zext i32 %n to i64
|
|
; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64)
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: vector.ph:
|
|
; CHECK-NEXT: Successor(s): vector loop
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: <x1> vector loop: {
|
|
; CHECK-NEXT: vector.body:
|
|
; CHECK-NEXT: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%8>
|
|
; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%n> + vp<%3> * ir<-1>
|
|
; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%4>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%5>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%6> = vector-pointer (reverse) ir<%arrayidx>
|
|
; CHECK-NEXT: WIDEN ir<%1> = load vp<%6>
|
|
; CHECK-NEXT: WIDEN ir<%conv1> = fadd ir<%1>, ir<1.000000e+00>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%7> = vector-pointer (reverse) ir<%arrayidx3>
|
|
; CHECK-NEXT: WIDEN store vp<%7>, ir<%conv1>
|
|
; CHECK-NEXT: EMIT vp<%8> = add nuw vp<%3>, vp<%0>
|
|
; CHECK-NEXT: EMIT branch-on-count vp<%8>, vp<%1>
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: Successor(s): middle.block
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: middle.block:
|
|
; CHECK-NEXT: EMIT vp<%10> = icmp eq vp<%2>, vp<%1>
|
|
; CHECK-NEXT: EMIT branch-on-cond vp<%10>
|
|
; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, scalar.ph
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: scalar.ph:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %i.0 = add nsw i32 %i.0.in8, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %idxprom = zext i32 %i.0 to i64
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: %1 = load float, ptr %arrayidx, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 4 for VF vscale x 4 For instruction: %conv1 = fadd float %1, 1.000000e+00
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: %arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
|
|
; CHECK-NEXT: LV: Found an estimated cost of 13 for VF vscale x 4 For instruction: store float %conv1, ptr %arrayidx3, align 4
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %cmp = icmp ugt i64 %indvars.iv, 1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 1 for VF vscale x 4 For instruction: %indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
; CHECK-NEXT: LV: Found an estimated cost of 0 for VF vscale x 4 For instruction: br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit, !llvm.loop !0
|
|
; CHECK-NEXT: LV(REG): Calculating max register usage:
|
|
; CHECK-NEXT: LV(REG): At #0 Interval # 0
|
|
; CHECK-NEXT: LV(REG): At #1 Interval # 1
|
|
; CHECK-NEXT: LV(REG): At #2 Interval # 2
|
|
; CHECK-NEXT: LV(REG): At #3 Interval # 2
|
|
; CHECK-NEXT: LV(REG): At #4 Interval # 2
|
|
; CHECK-NEXT: LV(REG): At #5 Interval # 3
|
|
; CHECK-NEXT: LV(REG): At #6 Interval # 3
|
|
; CHECK-NEXT: LV(REG): At #7 Interval # 3
|
|
; CHECK-NEXT: LV(REG): At #9 Interval # 1
|
|
; CHECK-NEXT: LV(REG): At #10 Interval # 2
|
|
; CHECK-NEXT: LV(REG): VF = vscale x 4
|
|
; CHECK-NEXT: LV(REG): Found max usage: 2 item
|
|
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 3 registers
|
|
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::VRRC, 2 registers
|
|
; CHECK-NEXT: LV(REG): Found invariant usage: 1 item
|
|
; CHECK-NEXT: LV(REG): RegisterClass: RISCV::GPRRC, 1 registers
|
|
; CHECK-NEXT: LV: The target has 31 registers of RISCV::GPRRC register class
|
|
; CHECK-NEXT: LV: The target has 32 registers of RISCV::VRRC register class
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Loop cost is 34
|
|
; CHECK-NEXT: LV: IC is 1
|
|
; CHECK-NEXT: LV: VF is vscale x 4
|
|
; CHECK-NEXT: LV: Not Interleaving.
|
|
; CHECK-NEXT: LV: Interleaving is not beneficial.
|
|
; CHECK-NEXT: LV: Found a vectorizable loop (vscale x 4) in <stdin>
|
|
; CHECK-NEXT: LEV: Epilogue vectorization is not profitable for this loop
|
|
; CHECK-NEXT: Executing best plan with VF=vscale x 4, UF=1
|
|
; CHECK-NEXT: VPlan 'Final VPlan for VF={vscale x 4},UF={1}' {
|
|
; CHECK-NEXT: Live-in vp<%0> = VF * UF
|
|
; CHECK-NEXT: Live-in vp<%1> = vector-trip-count
|
|
; CHECK-NEXT: vp<%2> = original trip-count
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.body.preheader>:
|
|
; CHECK-NEXT: IR %0 = zext i32 %n to i64
|
|
; CHECK-NEXT: EMIT vp<%2> = EXPAND SCEV (zext i32 %n to i64)
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: vector.ph:
|
|
; CHECK-NEXT: Successor(s): vector loop
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: <x1> vector loop: {
|
|
; CHECK-NEXT: vector.body:
|
|
; CHECK-NEXT: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%8>
|
|
; CHECK-NEXT: vp<%4> = DERIVED-IV ir<%n> + vp<%3> * ir<-1>
|
|
; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%4>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%i.0> = add nsw vp<%5>, ir<-1>
|
|
; CHECK-NEXT: CLONE ir<%idxprom> = zext ir<%i.0>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx> = getelementptr inbounds ir<%B>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%6> = vector-pointer (reverse) ir<%arrayidx>
|
|
; CHECK-NEXT: WIDEN ir<%13> = load vp<%6>
|
|
; CHECK-NEXT: WIDEN ir<%conv1> = fadd ir<%13>, ir<1.000000e+00>
|
|
; CHECK-NEXT: CLONE ir<%arrayidx3> = getelementptr inbounds ir<%A>, ir<%idxprom>
|
|
; CHECK-NEXT: vp<%7> = vector-pointer (reverse) ir<%arrayidx3>
|
|
; CHECK-NEXT: WIDEN store vp<%7>, ir<%conv1>
|
|
; CHECK-NEXT: EMIT vp<%8> = add nuw vp<%3>, vp<%0>
|
|
; CHECK-NEXT: EMIT branch-on-count vp<%8>, vp<%1>
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: Successor(s): middle.block
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: middle.block:
|
|
; CHECK-NEXT: EMIT vp<%10> = icmp eq vp<%2>, vp<%1>
|
|
; CHECK-NEXT: EMIT branch-on-cond vp<%10>
|
|
; CHECK-NEXT: Successor(s): ir-bb<for.cond.cleanup.loopexit>, scalar.ph
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: ir-bb<for.cond.cleanup.loopexit>:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-EMPTY:
|
|
; CHECK-NEXT: scalar.ph:
|
|
; CHECK-NEXT: No successors
|
|
; CHECK-NEXT: }
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Interleaving disabled by the pass manager
|
|
; CHECK-NEXT: LV: Loop does not require scalar epilogue
|
|
; CHECK-NEXT: LV: Vectorizing: innermost loop.
|
|
;
|
|
entry:
|
|
%cmp7 = icmp sgt i32 %n, 0
|
|
br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
%0 = zext i32 %n to i64
|
|
br label %for.body
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
ret void
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
%indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
%i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
%i.0 = add nsw i32 %i.0.in8, -1
|
|
%idxprom = zext i32 %i.0 to i64
|
|
%arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
|
|
%1 = load float, ptr %arrayidx, align 4
|
|
%conv1 = fadd float %1, 1.000000e+00
|
|
%arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
|
|
store float %conv1, ptr %arrayidx3, align 4
|
|
%cmp = icmp ugt i64 %indvars.iv, 1
|
|
%indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
|
|
}
|
|
|
|
!0 = distinct !{!0, !1, !2, !3, !4}
|
|
!1 = !{!"llvm.loop.mustprogress"}
|
|
!2 = !{!"llvm.loop.vectorize.width", i32 4}
|
|
!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|
|
!4 = !{!"llvm.loop.vectorize.enable", i1 true}
|