llvm-project/llvm/test/CodeGen/RISCV/riscv-codegenprepare.ll
Luke Lau 185ba025da
[RISCV] Widen i1 AnyOf reductions (#134898)
With EVL tail folding an AnyOf reduction will end up emitting an i1
vp.merge.

Unfortunately due to RVV not containing any tail undisturbed mask
instructions, an i1 vp.merge will get expanded to a lengthy sequence:

```asm
  vsetvli a1, zero, e64, m1, ta, ma
  vid.v v10                        
  vmsltu.vx v10, v10, a0           
  vmand.mm v9, v9, v10             
  vmandn.mm v8, v8, v9             
  vmand.mm v9, v0, v9              
  vmor.mm v0, v9, v8               
```

This addresses this by matching this specific AnyOf pattern in
RISCVCodegenPrepare and widening it from i1 to i8, which will end up
producing a single masked i8 vor.vi inside the loop:

```llvm
loop:                                                                      
  %phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ]
  %cmp = icmp ...                                                                                          
  %rec = call <vscale x 4 x i1> @llvm.vp.merge(%cmp, true, %phi, %evl)     
```

```llvm
loop:                                                                      
  %phi = phi <vscale x 4 x i8> [ zeroinitializer, %entry ], [ %rec, %loop ]
  %cmp = icmp ...                             
  %rec = call <vscale x 4 x i8> @llvm.vp.merge(%cmp, true, %phi, %evl)     
  %trunc = trunc <vscale x 4 x i8> %rec to <vscale x 4 x i1>               
```

I ended up adding this in RISCVCodegenPrepare instead of the
LoopVectorizer itself since it would have required adding a target hook.

It may also be possible to generalize this to other i1 vp.merges in
future.

Normally the trunc will be sunk outside of the loop. But it also doesn't
check to see if all the non-phi users of the vp.merge are outside of the
loop: If there are in-loop users this still seems to be profitable, see
the test diff in `@widen_anyof_rdx_use_in_loop`

Fixes #132180
2025-04-28 10:07:51 +08:00

198 lines
10 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt %s -S -riscv-codegenprepare -mtriple=riscv64 | FileCheck %s
; Make sure we convert the 4294967294 in for.body.preheader.new to -2 based on
; the upper 33 bits being zero by the dominating condition %cmp3.
define void @test2(ptr nocapture noundef %a, i32 noundef signext %n) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[N:%.*]], 0
; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[N]] to i64
; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 1
; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[N]], 1
; CHECK-NEXT: br i1 [[TMP0]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]]
; CHECK: for.body.preheader.new:
; CHECK-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[WIDE_TRIP_COUNT]], -2
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.cond.cleanup.loopexit.unr-lcssa:
; CHECK-NEXT: [[INDVARS_IV_UNR:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT_1:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[LCMP_MOD_NOT:%.*]] = icmp eq i64 [[XTRAITER]], 0
; CHECK-NEXT: br i1 [[LCMP_MOD_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_EPIL:%.*]]
; CHECK: for.body.epil:
; CHECK-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_UNR]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX_EPIL]], align 4
; CHECK-NEXT: [[ADD_EPIL:%.*]] = add nsw i32 [[TMP1]], 4
; CHECK-NEXT: store i32 [[ADD_EPIL]], ptr [[ARRAYIDX_EPIL]], align 4
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_1]], [[FOR_BODY]] ]
; CHECK-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_1:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 4
; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = or i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[TMP3]], 4
; CHECK-NEXT: store i32 [[ADD_1]], ptr [[ARRAYIDX_1]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT_1]] = add nuw nsw i64 [[INDVARS_IV]], 2
; CHECK-NEXT: [[NITER_NEXT_1]] = add i64 [[NITER]], 2
; CHECK-NEXT: [[NITER_NCMP_1:%.*]] = icmp eq i64 [[NITER_NEXT_1]], [[UNROLL_ITER]]
; CHECK-NEXT: br i1 [[NITER_NCMP_1]], label [[FOR_COND_CLEANUP_LOOPEXIT_UNR_LCSSA]], label [[FOR_BODY]]
;
entry:
%cmp3 = icmp sgt i32 %n, 0
br i1 %cmp3, label %for.body.preheader, label %for.cond.cleanup
for.body.preheader: ; preds = %entry
%wide.trip.count = zext nneg i32 %n to i64
%xtraiter = and i64 %wide.trip.count, 1
%0 = icmp eq i32 %n, 1
br i1 %0, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
for.body.preheader.new: ; preds = %for.body.preheader
%unroll_iter = and i64 %wide.trip.count, 4294967294
br label %for.body
for.cond.cleanup.loopexit.unr-lcssa: ; preds = %for.body, %for.body.preheader
%indvars.iv.unr = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next.1, %for.body ]
%lcmp.mod.not = icmp eq i64 %xtraiter, 0
br i1 %lcmp.mod.not, label %for.cond.cleanup, label %for.body.epil
for.body.epil: ; preds = %for.cond.cleanup.loopexit.unr-lcssa
%arrayidx.epil = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.unr
%1 = load i32, ptr %arrayidx.epil, align 4
%add.epil = add nsw i32 %1, 4
store i32 %add.epil, ptr %arrayidx.epil, align 4
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %for.body.epil, %for.cond.cleanup.loopexit.unr-lcssa, %entry
ret void
for.body: ; preds = %for.body, %for.body.preheader.new
%indvars.iv = phi i64 [ 0, %for.body.preheader.new ], [ %indvars.iv.next.1, %for.body ]
%niter = phi i64 [ 0, %for.body.preheader.new ], [ %niter.next.1, %for.body ]
%arrayidx = getelementptr inbounds i32, ptr %a, i64 %indvars.iv
%2 = load i32, ptr %arrayidx, align 4
%add = add nsw i32 %2, 4
store i32 %add, ptr %arrayidx, align 4
%indvars.iv.next = or i64 %indvars.iv, 1
%arrayidx.1 = getelementptr inbounds i32, ptr %a, i64 %indvars.iv.next
%3 = load i32, ptr %arrayidx.1, align 4
%add.1 = add nsw i32 %3, 4
store i32 %add.1, ptr %arrayidx.1, align 4
%indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
%niter.next.1 = add i64 %niter, 2
%niter.ncmp.1 = icmp eq i64 %niter.next.1, %unroll_iter
br i1 %niter.ncmp.1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
}
; Make sure we do not change 4294967295 to -1 here.
define i64 @bug(i32 %x) {
; CHECK-LABEL: @bug(
; CHECK-NEXT: [[A:%.*]] = sext i32 [[X:%.*]] to i64
; CHECK-NEXT: [[B:%.*]] = and i64 [[A]], 4294967295
; CHECK-NEXT: ret i64 [[B]]
;
%a = sext i32 %x to i64
%b = and i64 %a, 4294967295
ret i64 %b
}
define i1 @widen_anyof_rdx(ptr %p, i64 %n) {
; CHECK-LABEL: @widen_anyof_rdx(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP0:%.*]] = phi <vscale x 4 x i8> [ zeroinitializer, [[ENTRY]] ], [ [[TMP1:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[N:%.*]], [[IV]]
; CHECK-NEXT: [[EVL:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IV]]
; CHECK-NEXT: [[X:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr [[GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[X]], zeroinitializer
; CHECK-NEXT: [[TMP1]] = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> [[CMP]], <vscale x 4 x i8> splat (i8 1), <vscale x 4 x i8> [[TMP0]], i32 [[EVL]])
; CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i1>
; CHECK-NEXT: [[EVL_ZEXT:%.*]] = zext i32 [[EVL]] to i64
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[EVL_ZEXT]]
; CHECK-NEXT: [[DONE:%.*]] = icmp sge i64 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: [[RES:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP4]])
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ]
%avl = sub i64 %n, %iv
%evl = call i32 @llvm.experimental.get.vector.length(i64 %avl, i32 4, i1 true)
%gep = getelementptr i32, ptr %p, i64 %iv
%x = call <vscale x 4 x i32> @llvm.vp.load(ptr %gep, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%cmp = icmp ne <vscale x 4 x i32> %x, zeroinitializer
%rec = call <vscale x 4 x i1> @llvm.vp.merge(<vscale x 4 x i1> %cmp, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> %phi, i32 %evl)
%evl.zext = zext i32 %evl to i64
%iv.next = add i64 %iv, %evl.zext
%done = icmp sge i64 %iv.next, %n
br i1 %done, label %exit, label %loop
exit:
%res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
ret i1 %res
}
define i1 @widen_anyof_rdx_use_in_loop(ptr %p, i64 %n) {
; CHECK-LABEL: @widen_anyof_rdx_use_in_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[TMP0:%.*]] = phi <vscale x 4 x i8> [ zeroinitializer, [[ENTRY]] ], [ [[TMP1:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[N:%.*]], [[IV]]
; CHECK-NEXT: [[EVL:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[IV]]
; CHECK-NEXT: [[X:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr [[GEP]], <vscale x 4 x i1> splat (i1 true), i32 [[EVL]])
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[X]], zeroinitializer
; CHECK-NEXT: [[TMP1]] = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> [[CMP]], <vscale x 4 x i8> splat (i8 1), <vscale x 4 x i8> [[TMP0]], i32 [[EVL]])
; CHECK-NEXT: [[REC:%.*]] = trunc <vscale x 4 x i8> [[TMP1]] to <vscale x 4 x i1>
; CHECK-NEXT: store <vscale x 4 x i1> [[REC]], ptr [[GEP]], align 1
; CHECK-NEXT: [[EVL_ZEXT:%.*]] = zext i32 [[EVL]] to i64
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[EVL_ZEXT]]
; CHECK-NEXT: [[DONE:%.*]] = icmp sge i64 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: [[RES:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[REC]])
; CHECK-NEXT: ret i1 [[RES]]
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%phi = phi <vscale x 4 x i1> [ zeroinitializer, %entry ], [ %rec, %loop ]
%avl = sub i64 %n, %iv
%evl = call i32 @llvm.experimental.get.vector.length(i64 %avl, i32 4, i1 true)
%gep = getelementptr i32, ptr %p, i64 %iv
%x = call <vscale x 4 x i32> @llvm.vp.load(ptr %gep, <vscale x 4 x i1> splat (i1 true), i32 %evl)
%cmp = icmp ne <vscale x 4 x i32> %x, zeroinitializer
%rec = call <vscale x 4 x i1> @llvm.vp.merge(<vscale x 4 x i1> %cmp, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> %phi, i32 %evl)
store <vscale x 4 x i1> %rec, ptr %gep
%evl.zext = zext i32 %evl to i64
%iv.next = add i64 %iv, %evl.zext
%done = icmp sge i64 %iv.next, %n
br i1 %done, label %exit, label %loop
exit:
%res = call i1 @llvm.vector.reduce.or(<vscale x 4 x i1> %rec)
ret i1 %res
}