
With EVL tail folding an AnyOf reduction will end up emitting an i1 vp.merge. Unfortunately due to RVV not containing any tail undisturbed mask instructions, an i1 vp.merge will get expanded to a lengthy sequence: ```asm vsetvli a1, zero, e64, m1, ta, ma vid.v v10 vmsltu.vx v10, v10, a0 vmand.mm v9, v9, v10 vmandn.mm v8, v8, v9 vmand.mm v9, v0, v9 vmor.mm v0, v9, v8 ``` This addresses this by matching this specific AnyOf pattern in RISCVCodegenPrepare and widening it from i1 to i8, which will end up producing a single masked i8 vor.vi inside the loop: ```llvm loop: %phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ] %cmp = icmp ... %rec = call <vscale x 4 x i1> @llvm.vp.merge(%cmp, true, %phi, %evl) ``` ```llvm loop: %phi = phi <vscale x 4 x i8> [ zeroinitializer, %entry ], [ %rec, %loop ] %cmp = icmp ... %rec = call <vscale x 4 x i8> @llvm.vp.merge(%cmp, true, %phi, %evl) %trunc = trunc <vscale x 4 x i8> %rec to <vscale x 4 x i1> ``` I ended up adding this in RISCVCodegenPrepare instead of the LoopVectorizer itself since it would have required adding a target hook. It may also be possible to generalize this to other i1 vp.merges in future. Normally the trunc will be sunk outside of the loop. But it also doesn't check to see if all the non-phi users of the vp.merge are outside of the loop: If there are in-loop users this still seems to be profitable, see the test diff in `@widen_anyof_rdx_use_in_loop` Fixes #132180
228 lines
8.8 KiB
LLVM
228 lines
8.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
|
|
|
|
|
|
; Make sure we don't emit a pair of shift for the zext in the preheader. We
|
|
; can tell that bit 31 is 0 in the preheader and rely on %n already being
|
|
; sign extended without adding zeros explicitly.
|
|
define void @test1(ptr nocapture noundef %a, i32 noundef signext %n) {
|
|
; CHECK-LABEL: test1:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: blez a1, .LBB0_3
|
|
; CHECK-NEXT: # %bb.1: # %for.body.preheader
|
|
; CHECK-NEXT: slli a1, a1, 2
|
|
; CHECK-NEXT: add a1, a0, a1
|
|
; CHECK-NEXT: .LBB0_2: # %for.body
|
|
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: lw a2, 0(a0)
|
|
; CHECK-NEXT: addi a2, a2, 4
|
|
; CHECK-NEXT: sw a2, 0(a0)
|
|
; CHECK-NEXT: addi a0, a0, 4
|
|
; CHECK-NEXT: bne a0, a1, .LBB0_2
|
|
; CHECK-NEXT: .LBB0_3: # %for.cond.cleanup
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%cmp3 = icmp sgt i32 %n, 0
|
|
br i1 %cmp3, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
%wide.trip.count = zext nneg i32 %n to i64
|
|
br label %for.body
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
ret void
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
|
|
%0 = load i32, ptr %arrayidx, align 4
|
|
%add = add nsw i32 %0, 4
|
|
store i32 %add, ptr %arrayidx, align 4
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
%exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
|
|
br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
|
|
}
|
|
|
|
; Make sure we convert the 4294967294 in for.body.preheader.new to -2 based on
|
|
; the upper 33 bits being zero by the dominating condition %cmp3.
|
|
define void @test2(ptr nocapture noundef %a, i32 noundef signext %n) {
|
|
; CHECK-LABEL: test2:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: blez a1, .LBB1_7
|
|
; CHECK-NEXT: # %bb.1: # %for.body.preheader
|
|
; CHECK-NEXT: li a3, 1
|
|
; CHECK-NEXT: andi a2, a1, 1
|
|
; CHECK-NEXT: bne a1, a3, .LBB1_3
|
|
; CHECK-NEXT: # %bb.2:
|
|
; CHECK-NEXT: li a3, 0
|
|
; CHECK-NEXT: j .LBB1_5
|
|
; CHECK-NEXT: .LBB1_3: # %for.body.preheader.new
|
|
; CHECK-NEXT: li a3, 0
|
|
; CHECK-NEXT: andi a1, a1, -2
|
|
; CHECK-NEXT: addi a4, a0, 4
|
|
; CHECK-NEXT: .LBB1_4: # %for.body
|
|
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: lw a5, -4(a4)
|
|
; CHECK-NEXT: lw a6, 0(a4)
|
|
; CHECK-NEXT: addi a3, a3, 2
|
|
; CHECK-NEXT: addi a5, a5, 4
|
|
; CHECK-NEXT: addi a6, a6, 4
|
|
; CHECK-NEXT: sw a5, -4(a4)
|
|
; CHECK-NEXT: sw a6, 0(a4)
|
|
; CHECK-NEXT: addi a4, a4, 8
|
|
; CHECK-NEXT: bne a1, a3, .LBB1_4
|
|
; CHECK-NEXT: .LBB1_5: # %for.cond.cleanup.loopexit.unr-lcssa
|
|
; CHECK-NEXT: beqz a2, .LBB1_7
|
|
; CHECK-NEXT: # %bb.6: # %for.body.epil
|
|
; CHECK-NEXT: slli a3, a3, 2
|
|
; CHECK-NEXT: add a0, a0, a3
|
|
; CHECK-NEXT: lw a1, 0(a0)
|
|
; CHECK-NEXT: addi a1, a1, 4
|
|
; CHECK-NEXT: sw a1, 0(a0)
|
|
; CHECK-NEXT: .LBB1_7: # %for.cond.cleanup
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%cmp3 = icmp sgt i32 %n, 0
|
|
br i1 %cmp3, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
%wide.trip.count = zext nneg i32 %n to i64
|
|
%xtraiter = and i64 %wide.trip.count, 1
|
|
%0 = icmp eq i32 %n, 1
|
|
br i1 %0, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
|
|
|
|
for.body.preheader.new: ; preds = %for.body.preheader
|
|
%unroll_iter = and i64 %wide.trip.count, 4294967294
|
|
br label %for.body
|
|
|
|
for.cond.cleanup.loopexit.unr-lcssa: ; preds = %for.body, %for.body.preheader
|
|
%indvars.iv.unr = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next.1, %for.body ]
|
|
%lcmp.mod.not = icmp eq i64 %xtraiter, 0
|
|
br i1 %lcmp.mod.not, label %for.cond.cleanup, label %for.body.epil
|
|
|
|
for.body.epil: ; preds = %for.cond.cleanup.loopexit.unr-lcssa
|
|
%arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
|
|
%1 = load i32, ptr %arrayidx.epil, align 4
|
|
%add.epil = add nsw i32 %1, 4
|
|
store i32 %add.epil, ptr %arrayidx.epil, align 4
|
|
br label %for.cond.cleanup
|
|
|
|
for.cond.cleanup: ; preds = %for.body.epil, %for.cond.cleanup.loopexit.unr-lcssa, %entry
|
|
ret void
|
|
|
|
for.body: ; preds = %for.body, %for.body.preheader.new
|
|
%indvars.iv = phi i64 [ 0, %for.body.preheader.new ], [ %indvars.iv.next.1, %for.body ]
|
|
%niter = phi i64 [ 0, %for.body.preheader.new ], [ %niter.next.1, %for.body ]
|
|
%arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
|
|
%2 = load i32, ptr %arrayidx, align 4
|
|
%add = add nsw i32 %2, 4
|
|
store i32 %add, ptr %arrayidx, align 4
|
|
%indvars.iv.next = or disjoint i64 %indvars.iv, 1
|
|
%arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
|
|
%3 = load i32, ptr %arrayidx.1, align 4
|
|
%add.1 = add nsw i32 %3, 4
|
|
store i32 %add.1, ptr %arrayidx.1, align 4
|
|
%indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
|
|
%niter.next.1 = add i64 %niter, 2
|
|
%niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter
|
|
br i1 %niter.ncmp.1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
|
|
}
|
|
|
|
define i1 @widen_anyof_rdx(ptr %p, i64 %n) {
|
|
; CHECK-LABEL: widen_anyof_rdx:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: li a2, 0
|
|
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
|
|
; CHECK-NEXT: vmv.v.i v8, 0
|
|
; CHECK-NEXT: .LBB2_1: # %loop
|
|
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: sub a3, a1, a2
|
|
; CHECK-NEXT: slli a4, a2, 2
|
|
; CHECK-NEXT: vsetvli a3, a3, e32, m2, ta, ma
|
|
; CHECK-NEXT: add a4, a0, a4
|
|
; CHECK-NEXT: vle32.v v10, (a4)
|
|
; CHECK-NEXT: vmsne.vi v0, v10, 0
|
|
; CHECK-NEXT: add a2, a2, a3
|
|
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma
|
|
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
|
|
; CHECK-NEXT: blt a2, a1, .LBB2_1
|
|
; CHECK-NEXT: # %bb.2: # %exit
|
|
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
|
|
; CHECK-NEXT: vand.vi v8, v8, 1
|
|
; CHECK-NEXT: vmsne.vi v8, v8, 0
|
|
; CHECK-NEXT: vcpop.m a0, v8
|
|
; CHECK-NEXT: snez a0, a0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ]
|
|
%avl = sub i64 %n, %iv
|
|
%evl = call i32 @llvm.experimental.get.vector.length(i64 %avl, i32 4, i1 true)
|
|
|
|
%gep = getelementptr i32, ptr %p, i64 %iv
|
|
%x = call <vscale x 4 x i32> @llvm.vp.load(ptr %gep, <vscale x 4 x i1> splat (i1 true), i32 %evl)
|
|
%cmp = icmp ne <vscale x 4 x i32> %x, zeroinitializer
|
|
%rec = call <vscale x 4 x i1> @llvm.vp.merge(<vscale x 4 x i1> %cmp, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> %phi, i32 %evl)
|
|
|
|
%evl.zext = zext i32 %evl to i64
|
|
%iv.next = add i64 %iv, %evl.zext
|
|
%done = icmp sge i64 %iv.next, %n
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
%res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
|
|
ret i1 %res
|
|
}
|
|
|
|
|
|
define i1 @widen_anyof_rdx_use_in_loop(ptr %p, i64 %n) {
|
|
; CHECK-LABEL: widen_anyof_rdx_use_in_loop:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: li a2, 0
|
|
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
|
|
; CHECK-NEXT: vmv.v.i v8, 0
|
|
; CHECK-NEXT: .LBB3_1: # %loop
|
|
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; CHECK-NEXT: sub a3, a1, a2
|
|
; CHECK-NEXT: slli a4, a2, 2
|
|
; CHECK-NEXT: vsetvli a3, a3, e32, m2, ta, ma
|
|
; CHECK-NEXT: add a4, a0, a4
|
|
; CHECK-NEXT: vle32.v v10, (a4)
|
|
; CHECK-NEXT: vmsne.vi v0, v10, 0
|
|
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma
|
|
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
|
|
; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
|
|
; CHECK-NEXT: vand.vi v9, v8, 1
|
|
; CHECK-NEXT: vmsne.vi v9, v9, 0
|
|
; CHECK-NEXT: add a2, a2, a3
|
|
; CHECK-NEXT: vsm.v v9, (a4)
|
|
; CHECK-NEXT: blt a2, a1, .LBB3_1
|
|
; CHECK-NEXT: # %bb.2: # %exit
|
|
; CHECK-NEXT: vcpop.m a0, v9
|
|
; CHECK-NEXT: snez a0, a0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
|
|
%phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ]
|
|
%avl = sub i64 %n, %iv
|
|
%evl = call i32 @llvm.experimental.get.vector.length(i64 %avl, i32 4, i1 true)
|
|
|
|
%gep = getelementptr i32, ptr %p, i64 %iv
|
|
%x = call <vscale x 4 x i32> @llvm.vp.load(ptr %gep, <vscale x 4 x i1> splat (i1 true), i32 %evl)
|
|
%cmp = icmp ne <vscale x 4 x i32> %x, zeroinitializer
|
|
%rec = call <vscale x 4 x i1> @llvm.vp.merge(<vscale x 4 x i1> %cmp, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> %phi, i32 %evl)
|
|
|
|
store <vscale x 4 x i1> %rec, ptr %gep
|
|
|
|
%evl.zext = zext i32 %evl to i64
|
|
%iv.next = add i64 %iv, %evl.zext
|
|
%done = icmp sge i64 %iv.next, %n
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
%res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
|
|
ret i1 %res
|
|
}
|