
Directly emit shl instead of a multiply if VF * Step is a power-of-2. The main motivation here is to prepare the code and test for directly generating and expanding a SCEV expression of the minimum iteration count. SCEVExpander will directly emit shl for multiplies with powers-of-2. InstCombine will also performs this combine, so end-to-end this should effectively by NFC. PR: https://github.com/llvm/llvm-project/pull/153495
1079 lines
70 KiB
LLVM
1079 lines
70 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "for.body:" --version 5
|
|
;; This is the loop in c++ being vectorize in this file with
|
|
;; vector.reverse
|
|
;; #pragma clang loop vectorize_width(4, scalable)
|
|
;; for (int i = N-1; i >= 0; --i)
|
|
;; a[i] = b[i] + 1.0;
|
|
|
|
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S < %s \
|
|
; RUN: | FileCheck --check-prefix=RV64 %s
|
|
|
|
; RUN: opt -passes=loop-vectorize -mtriple=riscv32 -mattr=+v -S < %s \
|
|
; RUN: | FileCheck --check-prefix=RV32 %s
|
|
|
|
; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v \
|
|
; RUN: -force-vector-interleave=2 -S < %s \
|
|
; RUN: | FileCheck --check-prefix=RV64-UF2 %s
|
|
|
|
define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) {
|
|
; RV64-LABEL: define void @vector_reverse_i32(
|
|
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; RV64-NEXT: [[ENTRY:.*:]]
|
|
; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV64: [[VECTOR_PH]]:
|
|
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64: [[VECTOR_BODY]]:
|
|
; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
|
|
; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]]
|
|
; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64
|
|
; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]]
|
|
; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1
|
|
; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]]
|
|
; RV64-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP9]]
|
|
; RV64-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[TMP11]]
|
|
; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
|
|
; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]]
|
|
; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
|
|
; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]]
|
|
; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1
|
|
; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
|
|
; RV64-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP25]]
|
|
; RV64-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP20]], i64 [[TMP18]]
|
|
; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64
|
|
; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
|
|
; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
|
|
; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
|
|
; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; RV64: [[MIDDLE_BLOCK]]:
|
|
; RV64-NEXT: br [[EXIT:label %.*]]
|
|
; RV64: [[SCALAR_PH]]:
|
|
; RV64-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64: [[FOR_BODY]]:
|
|
;
|
|
; RV32-LABEL: define void @vector_reverse_i32(
|
|
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; RV32-NEXT: [[ENTRY:.*:]]
|
|
; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV32: [[VECTOR_PH]]:
|
|
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV32: [[VECTOR_BODY]]:
|
|
; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
|
|
; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP7]]
|
|
; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]]
|
|
; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1
|
|
; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]]
|
|
; RV32-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP8]], i32 [[TMP10]]
|
|
; RV32-NEXT: [[TMP14:%.*]] = getelementptr i32, ptr [[TMP13]], i32 [[TMP12]]
|
|
; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
|
|
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP7]]
|
|
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]]
|
|
; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1
|
|
; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]]
|
|
; RV32-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP16]], i32 [[TMP17]]
|
|
; RV32-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP18]], i32 [[TMP20]]
|
|
; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64
|
|
; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]]
|
|
; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
|
|
; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
|
|
; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; RV32: [[MIDDLE_BLOCK]]:
|
|
; RV32-NEXT: br [[EXIT:label %.*]]
|
|
; RV32: [[SCALAR_PH]]:
|
|
; RV32-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV32: [[FOR_BODY]]:
|
|
;
|
|
; RV64-UF2-LABEL: define void @vector_reverse_i32(
|
|
; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; RV64-UF2-NEXT: [[ENTRY:.*]]:
|
|
; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 3
|
|
; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
|
|
; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV64-UF2: [[VECTOR_PH]]:
|
|
; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
|
|
; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2
|
|
; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 1023, [[TMP6]]
|
|
; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]]
|
|
; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 1023, [[TMP7]]
|
|
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64-UF2: [[VECTOR_BODY]]:
|
|
; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV64-UF2-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
|
|
; RV64-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP8]]
|
|
; RV64-UF2-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]]
|
|
; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]]
|
|
; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]]
|
|
; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP17:%.*]] = mul i64 -1, [[TMP16]]
|
|
; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP15]]
|
|
; RV64-UF2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP18]], i64 [[TMP17]]
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP19]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD1]])
|
|
; RV64-UF2-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
|
|
; RV64-UF2-NEXT: [[TMP21:%.*]] = add <vscale x 4 x i32> [[REVERSE2]], splat (i32 1)
|
|
; RV64-UF2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]]
|
|
; RV64-UF2-NEXT: [[TMP23:%.*]] = mul i64 0, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 -1, [[TMP24]]
|
|
; RV64-UF2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 [[TMP23]]
|
|
; RV64-UF2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP26]], i64 [[TMP25]]
|
|
; RV64-UF2-NEXT: [[TMP28:%.*]] = mul i64 -1, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP29:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP29]]
|
|
; RV64-UF2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP22]], i64 [[TMP28]]
|
|
; RV64-UF2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[TMP31]], i64 [[TMP30]]
|
|
; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP20]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE3]], ptr [[TMP27]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP21]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE4]], ptr [[TMP32]], align 4
|
|
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
|
|
; RV64-UF2-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP7]]
|
|
; RV64-UF2-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; RV64-UF2: [[MIDDLE_BLOCK]]:
|
|
; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[TMP7]]
|
|
; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
|
|
; RV64-UF2: [[SCALAR_PH]]:
|
|
; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP33]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
|
|
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64-UF2: [[FOR_BODY]]:
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ]
|
|
%iv.next = add nsw i64 %dec.iv, -1
|
|
%arrayidx.b = getelementptr inbounds i32, ptr %B, i64 %iv.next
|
|
%0 = load i32, ptr %arrayidx.b, align 4
|
|
%add = add i32 %0, 1
|
|
%arrayidx.a = getelementptr inbounds i32, ptr %A, i64 %iv.next
|
|
store i32 %add, ptr %arrayidx.a, align 4
|
|
%cmp = icmp ugt i64 %dec.iv, 1
|
|
br i1 %cmp, label %for.body, label %exit, !llvm.loop !0
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @vector_reverse_i64(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) {
|
|
; RV64-LABEL: define void @vector_reverse_i64(
|
|
; RV64-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
|
|
; RV64-NEXT: [[ENTRY:.*:]]
|
|
; RV64-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
|
|
; RV64-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
|
|
; RV64-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
|
|
; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
|
|
; RV64: [[FOR_BODY_PREHEADER]]:
|
|
; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
|
|
; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
|
|
; RV64: [[VECTOR_SCEVCHECK]]:
|
|
; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
|
|
; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
|
|
; RV64-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
|
|
; RV64-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]])
|
|
; RV64-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
|
|
; RV64-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
|
|
; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]]
|
|
; RV64-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]]
|
|
; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
|
|
; RV64-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295
|
|
; RV64-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]]
|
|
; RV64-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; RV64: [[VECTOR_MEMCHECK]]:
|
|
; RV64-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
|
|
; RV64-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4
|
|
; RV64-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]]
|
|
; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
|
|
; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; RV64: [[VECTOR_PH]]:
|
|
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64: [[VECTOR_BODY]]:
|
|
; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
|
|
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
|
|
; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
|
|
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
|
|
; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP22]]
|
|
; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64
|
|
; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]]
|
|
; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1
|
|
; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]]
|
|
; RV64-NEXT: [[TMP38:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[TMP28]]
|
|
; RV64-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[TMP38]], i64 [[TMP26]]
|
|
; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: [[TMP29:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
|
|
; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP22]]
|
|
; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64
|
|
; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]]
|
|
; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1
|
|
; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]]
|
|
; RV64-NEXT: [[TMP34:%.*]] = getelementptr i32, ptr [[TMP30]], i64 [[TMP31]]
|
|
; RV64-NEXT: [[TMP35:%.*]] = getelementptr i32, ptr [[TMP34]], i64 [[TMP33]]
|
|
; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64
|
|
; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]]
|
|
; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]]
|
|
; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
|
|
; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
; RV64: [[MIDDLE_BLOCK]]:
|
|
; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
|
|
; RV64: [[SCALAR_PH]]:
|
|
; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
|
|
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
|
|
; RV64: [[FOR_COND_CLEANUP]]:
|
|
; RV64-NEXT: ret void
|
|
; RV64: [[FOR_BODY]]:
|
|
;
|
|
; RV32-LABEL: define void @vector_reverse_i64(
|
|
; RV32-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
|
|
; RV32-NEXT: [[ENTRY:.*:]]
|
|
; RV32-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i32
|
|
; RV32-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i32
|
|
; RV32-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
|
|
; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
|
|
; RV32: [[FOR_BODY_PREHEADER]]:
|
|
; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
|
|
; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; RV32: [[VECTOR_MEMCHECK]]:
|
|
; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
|
|
; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
|
|
; RV32-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 4
|
|
; RV32-NEXT: [[TMP6:%.*]] = sub i32 [[B1]], [[A2]]
|
|
; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]]
|
|
; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; RV32: [[VECTOR_PH]]:
|
|
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV32: [[VECTOR_BODY]]:
|
|
; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
|
|
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
|
|
; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
|
|
; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
|
|
; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP14]]
|
|
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]]
|
|
; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1
|
|
; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]]
|
|
; RV32-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[TMP15]], i32 [[TMP17]]
|
|
; RV32-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP20]], i32 [[TMP19]]
|
|
; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: [[TMP22:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
|
|
; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP14]]
|
|
; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]]
|
|
; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1
|
|
; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]]
|
|
; RV32-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[TMP23]], i32 [[TMP21]]
|
|
; RV32-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[TMP24]], i32 [[TMP27]]
|
|
; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64
|
|
; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]]
|
|
; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]]
|
|
; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
|
|
; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
; RV32: [[MIDDLE_BLOCK]]:
|
|
; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
|
|
; RV32: [[SCALAR_PH]]:
|
|
; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV32-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
|
|
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
|
|
; RV32: [[FOR_COND_CLEANUP]]:
|
|
; RV32-NEXT: ret void
|
|
; RV32: [[FOR_BODY]]:
|
|
;
|
|
; RV64-UF2-LABEL: define void @vector_reverse_i64(
|
|
; RV64-UF2-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
|
|
; RV64-UF2-NEXT: [[ENTRY:.*:]]
|
|
; RV64-UF2-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
|
|
; RV64-UF2-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
|
|
; RV64-UF2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
|
|
; RV64-UF2-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
|
|
; RV64-UF2: [[FOR_BODY_PREHEADER]]:
|
|
; RV64-UF2-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
|
|
; RV64-UF2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 3
|
|
; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
|
|
; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
|
|
; RV64-UF2: [[VECTOR_SCEVCHECK]]:
|
|
; RV64-UF2-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
|
|
; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
|
|
; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
|
|
; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]])
|
|
; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
|
|
; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
|
|
; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]]
|
|
; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]]
|
|
; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
|
|
; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295
|
|
; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]]
|
|
; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; RV64-UF2: [[VECTOR_MEMCHECK]]:
|
|
; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
|
|
; RV64-UF2-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 8
|
|
; RV64-UF2-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]]
|
|
; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
|
|
; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; RV64-UF2: [[VECTOR_PH]]:
|
|
; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
|
|
; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2
|
|
; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 [[TMP0]], [[TMP19]]
|
|
; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
|
|
; RV64-UF2-NEXT: [[TMP48:%.*]] = sub i64 [[TMP0]], [[TMP20]]
|
|
; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[TMP20]] to i32
|
|
; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]]
|
|
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64-UF2: [[VECTOR_BODY]]:
|
|
; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-UF2-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
|
|
; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
|
|
; RV64-UF2-NEXT: [[TMP22:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
|
|
; RV64-UF2-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64
|
|
; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP23]]
|
|
; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP27:%.*]] = mul i64 -1, [[TMP26]]
|
|
; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP25]]
|
|
; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i64 [[TMP27]]
|
|
; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP31:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP32:%.*]] = mul i64 -1, [[TMP31]]
|
|
; RV64-UF2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP30]]
|
|
; RV64-UF2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[TMP33]], i64 [[TMP32]]
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP29]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x i32>, ptr [[TMP34]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE5:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD4]])
|
|
; RV64-UF2-NEXT: [[TMP35:%.*]] = add <vscale x 4 x i32> [[REVERSE]], splat (i32 1)
|
|
; RV64-UF2-NEXT: [[TMP36:%.*]] = add <vscale x 4 x i32> [[REVERSE5]], splat (i32 1)
|
|
; RV64-UF2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP23]]
|
|
; RV64-UF2-NEXT: [[TMP38:%.*]] = mul i64 0, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP39:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP40:%.*]] = mul i64 -1, [[TMP39]]
|
|
; RV64-UF2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[TMP37]], i64 [[TMP38]]
|
|
; RV64-UF2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[TMP41]], i64 [[TMP40]]
|
|
; RV64-UF2-NEXT: [[TMP43:%.*]] = mul i64 -1, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP44:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP45:%.*]] = mul i64 -1, [[TMP44]]
|
|
; RV64-UF2-NEXT: [[TMP46:%.*]] = getelementptr inbounds i32, ptr [[TMP37]], i64 [[TMP43]]
|
|
; RV64-UF2-NEXT: [[TMP47:%.*]] = getelementptr inbounds i32, ptr [[TMP46]], i64 [[TMP45]]
|
|
; RV64-UF2-NEXT: [[REVERSE6:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP35]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE6]], ptr [[TMP42]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[TMP36]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x i32> [[REVERSE7]], ptr [[TMP47]], align 4
|
|
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
|
|
; RV64-UF2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP20]]
|
|
; RV64-UF2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
; RV64-UF2: [[MIDDLE_BLOCK]]:
|
|
; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[TMP20]]
|
|
; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
|
|
; RV64-UF2: [[SCALAR_PH]]:
|
|
; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP48]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]:
|
|
; RV64-UF2-NEXT: br label %[[FOR_COND_CLEANUP]]
|
|
; RV64-UF2: [[FOR_COND_CLEANUP]]:
|
|
; RV64-UF2-NEXT: ret void
|
|
; RV64-UF2: [[FOR_BODY]]:
|
|
;
|
|
entry:
|
|
%cmp7 = icmp sgt i32 %n, 0
|
|
br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
%0 = zext i32 %n to i64
|
|
br label %for.body
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
ret void
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
%indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
%i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
%i.0 = add nsw i32 %i.0.in8, -1
|
|
%idxprom = zext i32 %i.0 to i64
|
|
%arrayidx = getelementptr inbounds i32, ptr %B, i64 %idxprom
|
|
%1 = load i32, ptr %arrayidx, align 4
|
|
%add9 = add i32 %1, 1
|
|
%arrayidx3 = getelementptr inbounds i32, ptr %A, i64 %idxprom
|
|
store i32 %add9, ptr %arrayidx3, align 4
|
|
%cmp = icmp ugt i64 %indvars.iv, 1
|
|
%indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
|
|
}
|
|
|
|
define void @vector_reverse_f32(ptr nocapture noundef writeonly %A, ptr nocapture noundef readonly %B, i32 noundef signext %n) {
|
|
; RV64-LABEL: define void @vector_reverse_f32(
|
|
; RV64-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
|
|
; RV64-NEXT: [[ENTRY:.*:]]
|
|
; RV64-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
|
|
; RV64-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
|
|
; RV64-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
|
|
; RV64-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
|
|
; RV64: [[FOR_BODY_PREHEADER]]:
|
|
; RV64-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
|
|
; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
|
|
; RV64: [[VECTOR_SCEVCHECK]]:
|
|
; RV64-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
|
|
; RV64-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
|
|
; RV64-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
|
|
; RV64-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]])
|
|
; RV64-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
|
|
; RV64-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
|
|
; RV64-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]]
|
|
; RV64-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]]
|
|
; RV64-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
|
|
; RV64-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295
|
|
; RV64-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]]
|
|
; RV64-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; RV64: [[VECTOR_MEMCHECK]]:
|
|
; RV64-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
|
|
; RV64-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4
|
|
; RV64-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]]
|
|
; RV64-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
|
|
; RV64-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; RV64: [[VECTOR_PH]]:
|
|
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64: [[VECTOR_BODY]]:
|
|
; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[TMP20:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV64-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
|
|
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
|
|
; RV64-NEXT: [[TMP21:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
|
|
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP21]] to i64
|
|
; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP22]]
|
|
; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP20]] to i64
|
|
; RV64-NEXT: [[TMP28:%.*]] = mul i64 0, [[TMP24]]
|
|
; RV64-NEXT: [[TMP25:%.*]] = sub i64 [[TMP24]], 1
|
|
; RV64-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP25]]
|
|
; RV64-NEXT: [[TMP38:%.*]] = getelementptr float, ptr [[TMP23]], i64 [[TMP28]]
|
|
; RV64-NEXT: [[TMP27:%.*]] = getelementptr float, ptr [[TMP38]], i64 [[TMP26]]
|
|
; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP27]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: [[TMP29:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
|
|
; RV64-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]]
|
|
; RV64-NEXT: [[TMP39:%.*]] = zext i32 [[TMP20]] to i64
|
|
; RV64-NEXT: [[TMP31:%.*]] = mul i64 0, [[TMP39]]
|
|
; RV64-NEXT: [[TMP32:%.*]] = sub i64 [[TMP39]], 1
|
|
; RV64-NEXT: [[TMP33:%.*]] = mul i64 -1, [[TMP32]]
|
|
; RV64-NEXT: [[TMP34:%.*]] = getelementptr float, ptr [[TMP30]], i64 [[TMP31]]
|
|
; RV64-NEXT: [[TMP35:%.*]] = getelementptr float, ptr [[TMP34]], i64 [[TMP33]]
|
|
; RV64-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP29]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP35]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP20]])
|
|
; RV64-NEXT: [[TMP36:%.*]] = zext i32 [[TMP20]] to i64
|
|
; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP36]], [[INDEX]]
|
|
; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP36]]
|
|
; RV64-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
|
|
; RV64-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
|
|
; RV64: [[MIDDLE_BLOCK]]:
|
|
; RV64-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
|
|
; RV64: [[SCALAR_PH]]:
|
|
; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64: [[FOR_COND_CLEANUP_LOOPEXIT]]:
|
|
; RV64-NEXT: br label %[[FOR_COND_CLEANUP]]
|
|
; RV64: [[FOR_COND_CLEANUP]]:
|
|
; RV64-NEXT: ret void
|
|
; RV64: [[FOR_BODY]]:
|
|
;
|
|
; RV32-LABEL: define void @vector_reverse_f32(
|
|
; RV32-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
|
|
; RV32-NEXT: [[ENTRY:.*:]]
|
|
; RV32-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i32
|
|
; RV32-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i32
|
|
; RV32-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
|
|
; RV32-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
|
|
; RV32: [[FOR_BODY_PREHEADER]]:
|
|
; RV32-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
|
|
; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; RV32: [[VECTOR_MEMCHECK]]:
|
|
; RV32-NEXT: [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
|
|
; RV32-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 4
|
|
; RV32-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 4
|
|
; RV32-NEXT: [[TMP6:%.*]] = sub i32 [[B1]], [[A2]]
|
|
; RV32-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i32 [[TMP6]], [[TMP5]]
|
|
; RV32-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; RV32: [[VECTOR_PH]]:
|
|
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV32: [[VECTOR_BODY]]:
|
|
; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV32-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
|
|
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
|
|
; RV32-NEXT: [[TMP13:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
|
|
; RV32-NEXT: [[TMP14:%.*]] = zext i32 [[TMP13]] to i64
|
|
; RV32-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP14]]
|
|
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP16]]
|
|
; RV32-NEXT: [[TMP18:%.*]] = sub i32 [[TMP16]], 1
|
|
; RV32-NEXT: [[TMP19:%.*]] = mul i32 -1, [[TMP18]]
|
|
; RV32-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i32 [[TMP17]]
|
|
; RV32-NEXT: [[TMP28:%.*]] = getelementptr float, ptr [[TMP20]], i32 [[TMP19]]
|
|
; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP28]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: [[TMP22:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
|
|
; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP14]]
|
|
; RV32-NEXT: [[TMP21:%.*]] = mul i32 0, [[TMP16]]
|
|
; RV32-NEXT: [[TMP26:%.*]] = sub i32 [[TMP16]], 1
|
|
; RV32-NEXT: [[TMP27:%.*]] = mul i32 -1, [[TMP26]]
|
|
; RV32-NEXT: [[TMP24:%.*]] = getelementptr float, ptr [[TMP23]], i32 [[TMP21]]
|
|
; RV32-NEXT: [[TMP25:%.*]] = getelementptr float, ptr [[TMP24]], i32 [[TMP27]]
|
|
; RV32-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE3]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP16]])
|
|
; RV32-NEXT: [[TMP29:%.*]] = zext i32 [[TMP16]] to i64
|
|
; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP29]], [[INDEX]]
|
|
; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP29]]
|
|
; RV32-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
|
|
; RV32-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
|
|
; RV32: [[MIDDLE_BLOCK]]:
|
|
; RV32-NEXT: br label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]]
|
|
; RV32: [[SCALAR_PH]]:
|
|
; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV32-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV32-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV32: [[FOR_COND_CLEANUP_LOOPEXIT]]:
|
|
; RV32-NEXT: br label %[[FOR_COND_CLEANUP]]
|
|
; RV32: [[FOR_COND_CLEANUP]]:
|
|
; RV32-NEXT: ret void
|
|
; RV32: [[FOR_BODY]]:
|
|
;
|
|
; RV64-UF2-LABEL: define void @vector_reverse_f32(
|
|
; RV64-UF2-SAME: ptr noundef writeonly captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] {
|
|
; RV64-UF2-NEXT: [[ENTRY:.*:]]
|
|
; RV64-UF2-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
|
|
; RV64-UF2-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
|
|
; RV64-UF2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[N]], 0
|
|
; RV64-UF2-NEXT: br i1 [[CMP7]], label %[[FOR_BODY_PREHEADER:.*]], label %[[FOR_COND_CLEANUP:.*]]
|
|
; RV64-UF2: [[FOR_BODY_PREHEADER]]:
|
|
; RV64-UF2-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
|
|
; RV64-UF2-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 3
|
|
; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
|
|
; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]]
|
|
; RV64-UF2: [[VECTOR_SCEVCHECK]]:
|
|
; RV64-UF2-NEXT: [[TMP3:%.*]] = add nsw i64 [[TMP0]], -1
|
|
; RV64-UF2-NEXT: [[TMP4:%.*]] = add i32 [[N]], -1
|
|
; RV64-UF2-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP3]] to i32
|
|
; RV64-UF2-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 [[TMP5]])
|
|
; RV64-UF2-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
|
|
; RV64-UF2-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
|
|
; RV64-UF2-NEXT: [[TMP6:%.*]] = sub i32 [[TMP4]], [[MUL_RESULT]]
|
|
; RV64-UF2-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP4]]
|
|
; RV64-UF2-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
|
|
; RV64-UF2-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP3]], 4294967295
|
|
; RV64-UF2-NEXT: [[TMP10:%.*]] = or i1 [[TMP8]], [[TMP9]]
|
|
; RV64-UF2-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_MEMCHECK:.*]]
|
|
; RV64-UF2: [[VECTOR_MEMCHECK]]:
|
|
; RV64-UF2-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
|
|
; RV64-UF2-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 8
|
|
; RV64-UF2-NEXT: [[TMP14:%.*]] = sub i64 [[B1]], [[A2]]
|
|
; RV64-UF2-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP14]], [[TMP13]]
|
|
; RV64-UF2-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
|
|
; RV64-UF2: [[VECTOR_PH]]:
|
|
; RV64-UF2-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
|
|
; RV64-UF2-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2
|
|
; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 [[TMP0]], [[TMP19]]
|
|
; RV64-UF2-NEXT: [[TMP20:%.*]] = sub i64 [[TMP0]], [[N_VEC]]
|
|
; RV64-UF2-NEXT: [[TMP48:%.*]] = sub i64 [[TMP0]], [[TMP20]]
|
|
; RV64-UF2-NEXT: [[DOTCAST:%.*]] = trunc i64 [[TMP20]] to i32
|
|
; RV64-UF2-NEXT: [[TMP21:%.*]] = sub i32 [[N]], [[DOTCAST]]
|
|
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64-UF2: [[VECTOR_BODY]]:
|
|
; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-UF2-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i32
|
|
; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[N]], [[DOTCAST3]]
|
|
; RV64-UF2-NEXT: [[TMP22:%.*]] = add nsw i32 [[OFFSET_IDX]], -1
|
|
; RV64-UF2-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64
|
|
; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP23]]
|
|
; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP26:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP27:%.*]] = mul i64 -1, [[TMP26]]
|
|
; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[TMP25]]
|
|
; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[TMP27]]
|
|
; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP31:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP32:%.*]] = mul i64 -1, [[TMP31]]
|
|
; RV64-UF2-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[TMP30]]
|
|
; RV64-UF2-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP33]], i64 [[TMP32]]
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP29]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>, ptr [[TMP34]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE5:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD4]])
|
|
; RV64-UF2-NEXT: [[TMP35:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
|
|
; RV64-UF2-NEXT: [[TMP36:%.*]] = fadd <vscale x 4 x float> [[REVERSE5]], splat (float 1.000000e+00)
|
|
; RV64-UF2-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP23]]
|
|
; RV64-UF2-NEXT: [[TMP38:%.*]] = mul i64 0, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP39:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP40:%.*]] = mul i64 -1, [[TMP39]]
|
|
; RV64-UF2-NEXT: [[TMP41:%.*]] = getelementptr inbounds float, ptr [[TMP37]], i64 [[TMP38]]
|
|
; RV64-UF2-NEXT: [[TMP42:%.*]] = getelementptr inbounds float, ptr [[TMP41]], i64 [[TMP40]]
|
|
; RV64-UF2-NEXT: [[TMP43:%.*]] = mul i64 -1, [[TMP18]]
|
|
; RV64-UF2-NEXT: [[TMP44:%.*]] = sub i64 [[TMP18]], 1
|
|
; RV64-UF2-NEXT: [[TMP45:%.*]] = mul i64 -1, [[TMP44]]
|
|
; RV64-UF2-NEXT: [[TMP46:%.*]] = getelementptr inbounds float, ptr [[TMP37]], i64 [[TMP43]]
|
|
; RV64-UF2-NEXT: [[TMP47:%.*]] = getelementptr inbounds float, ptr [[TMP46]], i64 [[TMP45]]
|
|
; RV64-UF2-NEXT: [[REVERSE6:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP35]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE6]], ptr [[TMP42]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE7:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP36]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE7]], ptr [[TMP47]], align 4
|
|
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]]
|
|
; RV64-UF2-NEXT: [[TMP49:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP20]]
|
|
; RV64-UF2-NEXT: br i1 [[TMP49]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
; RV64-UF2: [[MIDDLE_BLOCK]]:
|
|
; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[TMP20]]
|
|
; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
|
|
; RV64-UF2: [[SCALAR_PH]]:
|
|
; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP48]], %[[MIDDLE_BLOCK]] ], [ [[TMP0]], %[[FOR_BODY_PREHEADER]] ], [ [[TMP0]], %[[VECTOR_SCEVCHECK]] ], [ [[TMP0]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-UF2-NEXT: [[BC_RESUME_VAL8:%.*]] = phi i32 [ [[TMP21]], %[[MIDDLE_BLOCK]] ], [ [[N]], %[[FOR_BODY_PREHEADER]] ], [ [[N]], %[[VECTOR_SCEVCHECK]] ], [ [[N]], %[[VECTOR_MEMCHECK]] ]
|
|
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64-UF2: [[FOR_COND_CLEANUP_LOOPEXIT]]:
|
|
; RV64-UF2-NEXT: br label %[[FOR_COND_CLEANUP]]
|
|
; RV64-UF2: [[FOR_COND_CLEANUP]]:
|
|
; RV64-UF2-NEXT: ret void
|
|
; RV64-UF2: [[FOR_BODY]]:
|
|
;
|
|
entry:
|
|
%cmp7 = icmp sgt i32 %n, 0
|
|
br i1 %cmp7, label %for.body.preheader, label %for.cond.cleanup
|
|
|
|
for.body.preheader: ; preds = %entry
|
|
%0 = zext i32 %n to i64
|
|
br label %for.body
|
|
|
|
for.cond.cleanup: ; preds = %for.body, %entry
|
|
ret void
|
|
|
|
for.body: ; preds = %for.body.preheader, %for.body
|
|
%indvars.iv = phi i64 [ %0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
|
|
%i.0.in8 = phi i32 [ %n, %for.body.preheader ], [ %i.0, %for.body ]
|
|
%i.0 = add nsw i32 %i.0.in8, -1
|
|
%idxprom = zext i32 %i.0 to i64
|
|
%arrayidx = getelementptr inbounds float, ptr %B, i64 %idxprom
|
|
%1 = load float, ptr %arrayidx, align 4
|
|
%conv1 = fadd float %1, 1.000000e+00
|
|
%arrayidx3 = getelementptr inbounds float, ptr %A, i64 %idxprom
|
|
store float %conv1, ptr %arrayidx3, align 4
|
|
%cmp = icmp ugt i64 %indvars.iv, 1
|
|
%indvars.iv.next = add nsw i64 %indvars.iv, -1
|
|
br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
|
|
}
|
|
|
|
define void @vector_reverse_f32_simplify(ptr noalias %A, ptr noalias %B) {
|
|
; RV64-LABEL: define void @vector_reverse_f32_simplify(
|
|
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
|
|
; RV64-NEXT: [[ENTRY:.*:]]
|
|
; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV64: [[VECTOR_PH]]:
|
|
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64: [[VECTOR_BODY]]:
|
|
; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[TMP19:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
|
|
; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]]
|
|
; RV64-NEXT: [[TMP24:%.*]] = zext i32 [[TMP19]] to i64
|
|
; RV64-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP24]]
|
|
; RV64-NEXT: [[TMP10:%.*]] = sub i64 [[TMP24]], 1
|
|
; RV64-NEXT: [[TMP11:%.*]] = mul i64 -1, [[TMP10]]
|
|
; RV64-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[TMP9]]
|
|
; RV64-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP12]], i64 [[TMP11]]
|
|
; RV64-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: [[TMP14:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
|
|
; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]]
|
|
; RV64-NEXT: [[TMP16:%.*]] = zext i32 [[TMP19]] to i64
|
|
; RV64-NEXT: [[TMP25:%.*]] = mul i64 0, [[TMP16]]
|
|
; RV64-NEXT: [[TMP17:%.*]] = sub i64 [[TMP16]], 1
|
|
; RV64-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
|
|
; RV64-NEXT: [[TMP20:%.*]] = getelementptr float, ptr [[TMP15]], i64 [[TMP25]]
|
|
; RV64-NEXT: [[TMP21:%.*]] = getelementptr float, ptr [[TMP20]], i64 [[TMP18]]
|
|
; RV64-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP19]])
|
|
; RV64-NEXT: [[TMP22:%.*]] = zext i32 [[TMP19]] to i64
|
|
; RV64-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP22]], [[INDEX]]
|
|
; RV64-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP22]]
|
|
; RV64-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
|
|
; RV64-NEXT: br i1 [[TMP23]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
; RV64: [[MIDDLE_BLOCK]]:
|
|
; RV64-NEXT: br [[EXIT:label %.*]]
|
|
; RV64: [[SCALAR_PH]]:
|
|
; RV64-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64: [[FOR_BODY]]:
|
|
;
|
|
; RV32-LABEL: define void @vector_reverse_f32_simplify(
|
|
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
|
|
; RV32-NEXT: [[ENTRY:.*:]]
|
|
; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV32: [[VECTOR_PH]]:
|
|
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV32: [[VECTOR_BODY]]:
|
|
; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[AVL:%.*]] = phi i64 [ 1023, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
|
|
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
|
|
; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP7]]
|
|
; RV32-NEXT: [[TMP10:%.*]] = mul i32 0, [[TMP9]]
|
|
; RV32-NEXT: [[TMP11:%.*]] = sub i32 [[TMP9]], 1
|
|
; RV32-NEXT: [[TMP12:%.*]] = mul i32 -1, [[TMP11]]
|
|
; RV32-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[TMP8]], i32 [[TMP10]]
|
|
; RV32-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[TMP13]], i32 [[TMP12]]
|
|
; RV32-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: [[TMP15:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
|
|
; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP7]]
|
|
; RV32-NEXT: [[TMP17:%.*]] = mul i32 0, [[TMP9]]
|
|
; RV32-NEXT: [[TMP19:%.*]] = sub i32 [[TMP9]], 1
|
|
; RV32-NEXT: [[TMP20:%.*]] = mul i32 -1, [[TMP19]]
|
|
; RV32-NEXT: [[TMP18:%.*]] = getelementptr float, ptr [[TMP16]], i32 [[TMP17]]
|
|
; RV32-NEXT: [[TMP22:%.*]] = getelementptr float, ptr [[TMP18]], i32 [[TMP20]]
|
|
; RV32-NEXT: [[VP_REVERSE1:%.*]] = call <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float> [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_REVERSE1]], ptr align 4 [[TMP22]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
|
|
; RV32-NEXT: [[TMP23:%.*]] = zext i32 [[TMP9]] to i64
|
|
; RV32-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP23]], [[INDEX]]
|
|
; RV32-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
|
|
; RV32-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1023
|
|
; RV32-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
; RV32: [[MIDDLE_BLOCK]]:
|
|
; RV32-NEXT: br [[EXIT:label %.*]]
|
|
; RV32: [[SCALAR_PH]]:
|
|
; RV32-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV32: [[FOR_BODY]]:
|
|
;
|
|
; RV64-UF2-LABEL: define void @vector_reverse_f32_simplify(
|
|
; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
|
|
; RV64-UF2-NEXT: [[ENTRY:.*]]:
|
|
; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 3
|
|
; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]]
|
|
; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV64-UF2: [[VECTOR_PH]]:
|
|
; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
|
|
; RV64-UF2-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
|
|
; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2
|
|
; RV64-UF2-NEXT: [[N_VEC:%.*]] = urem i64 1023, [[TMP6]]
|
|
; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]]
|
|
; RV64-UF2-NEXT: [[TMP33:%.*]] = sub i64 1023, [[TMP7]]
|
|
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64-UF2: [[VECTOR_BODY]]:
|
|
; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV64-UF2-NEXT: [[TMP8:%.*]] = add nsw i64 [[OFFSET_IDX]], -1
|
|
; RV64-UF2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP8]]
|
|
; RV64-UF2-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]]
|
|
; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP10]]
|
|
; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP12]]
|
|
; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP17:%.*]] = mul i64 -1, [[TMP16]]
|
|
; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP15]]
|
|
; RV64-UF2-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[TMP17]]
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP14]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]])
|
|
; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP19]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD1]])
|
|
; RV64-UF2-NEXT: [[TMP20:%.*]] = fadd <vscale x 4 x float> [[REVERSE]], splat (float 1.000000e+00)
|
|
; RV64-UF2-NEXT: [[TMP21:%.*]] = fadd <vscale x 4 x float> [[REVERSE2]], splat (float 1.000000e+00)
|
|
; RV64-UF2-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]]
|
|
; RV64-UF2-NEXT: [[TMP23:%.*]] = mul i64 0, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP24:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP25:%.*]] = mul i64 -1, [[TMP24]]
|
|
; RV64-UF2-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[TMP23]]
|
|
; RV64-UF2-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[TMP26]], i64 [[TMP25]]
|
|
; RV64-UF2-NEXT: [[TMP28:%.*]] = mul i64 -1, [[TMP5]]
|
|
; RV64-UF2-NEXT: [[TMP29:%.*]] = sub i64 [[TMP5]], 1
|
|
; RV64-UF2-NEXT: [[TMP30:%.*]] = mul i64 -1, [[TMP29]]
|
|
; RV64-UF2-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[TMP28]]
|
|
; RV64-UF2-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP31]], i64 [[TMP30]]
|
|
; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP20]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE3]], ptr [[TMP27]], align 4
|
|
; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x float> @llvm.vector.reverse.nxv4f32(<vscale x 4 x float> [[TMP21]])
|
|
; RV64-UF2-NEXT: store <vscale x 4 x float> [[REVERSE4]], ptr [[TMP32]], align 4
|
|
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
|
|
; RV64-UF2-NEXT: [[TMP34:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[TMP7]]
|
|
; RV64-UF2-NEXT: br i1 [[TMP34]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
|
|
; RV64-UF2: [[MIDDLE_BLOCK]]:
|
|
; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[TMP7]]
|
|
; RV64-UF2-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
|
|
; RV64-UF2: [[SCALAR_PH]]:
|
|
; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP33]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
|
|
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64-UF2: [[FOR_BODY]]:
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ]
|
|
%iv.next = add nsw i64 %dec.iv, -1
|
|
%arrayidx.b = getelementptr inbounds float, ptr %B, i64 %iv.next
|
|
%0 = load float, ptr %arrayidx.b, align 4
|
|
%fadd = fadd float %0, 1.000000e+00
|
|
%arrayidx.a = getelementptr inbounds float, ptr %A, i64 %iv.next
|
|
store float %fadd, ptr %arrayidx.a, align 4
|
|
%cmp = icmp ugt i64 %dec.iv, 1
|
|
br i1 %cmp, label %for.body, label %exit, !llvm.loop !0
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @vector_reverse_irregular_type(ptr noalias %A, ptr noalias %B) {
|
|
; RV64-LABEL: define void @vector_reverse_irregular_type(
|
|
; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
|
|
; RV64-NEXT: [[ENTRY:.*]]:
|
|
; RV64-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV64: [[VECTOR_PH]]:
|
|
; RV64-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64: [[VECTOR_BODY]]:
|
|
; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV64-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
|
|
; RV64-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1
|
|
; RV64-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2
|
|
; RV64-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3
|
|
; RV64-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP0]], -1
|
|
; RV64-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP1]], -1
|
|
; RV64-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP2]], -1
|
|
; RV64-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP3]], -1
|
|
; RV64-NEXT: [[TMP8:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP4]]
|
|
; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP5]]
|
|
; RV64-NEXT: [[TMP10:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP6]]
|
|
; RV64-NEXT: [[TMP11:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP7]]
|
|
; RV64-NEXT: [[TMP12:%.*]] = load i7, ptr [[TMP8]], align 1
|
|
; RV64-NEXT: [[TMP13:%.*]] = load i7, ptr [[TMP9]], align 1
|
|
; RV64-NEXT: [[TMP14:%.*]] = load i7, ptr [[TMP10]], align 1
|
|
; RV64-NEXT: [[TMP15:%.*]] = load i7, ptr [[TMP11]], align 1
|
|
; RV64-NEXT: [[TMP16:%.*]] = insertelement <4 x i7> poison, i7 [[TMP12]], i32 0
|
|
; RV64-NEXT: [[TMP17:%.*]] = insertelement <4 x i7> [[TMP16]], i7 [[TMP13]], i32 1
|
|
; RV64-NEXT: [[TMP18:%.*]] = insertelement <4 x i7> [[TMP17]], i7 [[TMP14]], i32 2
|
|
; RV64-NEXT: [[TMP19:%.*]] = insertelement <4 x i7> [[TMP18]], i7 [[TMP15]], i32 3
|
|
; RV64-NEXT: [[TMP20:%.*]] = add <4 x i7> [[TMP19]], splat (i7 1)
|
|
; RV64-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP4]]
|
|
; RV64-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP5]]
|
|
; RV64-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP6]]
|
|
; RV64-NEXT: [[TMP24:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP7]]
|
|
; RV64-NEXT: [[TMP25:%.*]] = extractelement <4 x i7> [[TMP20]], i32 0
|
|
; RV64-NEXT: store i7 [[TMP25]], ptr [[TMP21]], align 1
|
|
; RV64-NEXT: [[TMP26:%.*]] = extractelement <4 x i7> [[TMP20]], i32 1
|
|
; RV64-NEXT: store i7 [[TMP26]], ptr [[TMP22]], align 1
|
|
; RV64-NEXT: [[TMP27:%.*]] = extractelement <4 x i7> [[TMP20]], i32 2
|
|
; RV64-NEXT: store i7 [[TMP27]], ptr [[TMP23]], align 1
|
|
; RV64-NEXT: [[TMP28:%.*]] = extractelement <4 x i7> [[TMP20]], i32 3
|
|
; RV64-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1
|
|
; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
|
|
; RV64-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020
|
|
; RV64-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
|
|
; RV64: [[MIDDLE_BLOCK]]:
|
|
; RV64-NEXT: br label %[[SCALAR_PH]]
|
|
; RV64: [[SCALAR_PH]]:
|
|
; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
|
|
; RV64-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64: [[FOR_BODY]]:
|
|
;
|
|
; RV32-LABEL: define void @vector_reverse_irregular_type(
|
|
; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
|
|
; RV32-NEXT: [[ENTRY:.*]]:
|
|
; RV32-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV32: [[VECTOR_PH]]:
|
|
; RV32-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV32: [[VECTOR_BODY]]:
|
|
; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV32-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
|
|
; RV32-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1
|
|
; RV32-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2
|
|
; RV32-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3
|
|
; RV32-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP0]], -1
|
|
; RV32-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP1]], -1
|
|
; RV32-NEXT: [[TMP6:%.*]] = add nsw i64 [[TMP2]], -1
|
|
; RV32-NEXT: [[TMP7:%.*]] = add nsw i64 [[TMP3]], -1
|
|
; RV32-NEXT: [[TMP8:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP4]]
|
|
; RV32-NEXT: [[TMP9:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP5]]
|
|
; RV32-NEXT: [[TMP10:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP6]]
|
|
; RV32-NEXT: [[TMP11:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP7]]
|
|
; RV32-NEXT: [[TMP12:%.*]] = load i7, ptr [[TMP8]], align 1
|
|
; RV32-NEXT: [[TMP13:%.*]] = load i7, ptr [[TMP9]], align 1
|
|
; RV32-NEXT: [[TMP14:%.*]] = load i7, ptr [[TMP10]], align 1
|
|
; RV32-NEXT: [[TMP15:%.*]] = load i7, ptr [[TMP11]], align 1
|
|
; RV32-NEXT: [[TMP16:%.*]] = insertelement <4 x i7> poison, i7 [[TMP12]], i32 0
|
|
; RV32-NEXT: [[TMP17:%.*]] = insertelement <4 x i7> [[TMP16]], i7 [[TMP13]], i32 1
|
|
; RV32-NEXT: [[TMP18:%.*]] = insertelement <4 x i7> [[TMP17]], i7 [[TMP14]], i32 2
|
|
; RV32-NEXT: [[TMP19:%.*]] = insertelement <4 x i7> [[TMP18]], i7 [[TMP15]], i32 3
|
|
; RV32-NEXT: [[TMP20:%.*]] = add <4 x i7> [[TMP19]], splat (i7 1)
|
|
; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP4]]
|
|
; RV32-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP5]]
|
|
; RV32-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP6]]
|
|
; RV32-NEXT: [[TMP24:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP7]]
|
|
; RV32-NEXT: [[TMP25:%.*]] = extractelement <4 x i7> [[TMP20]], i32 0
|
|
; RV32-NEXT: store i7 [[TMP25]], ptr [[TMP21]], align 1
|
|
; RV32-NEXT: [[TMP26:%.*]] = extractelement <4 x i7> [[TMP20]], i32 1
|
|
; RV32-NEXT: store i7 [[TMP26]], ptr [[TMP22]], align 1
|
|
; RV32-NEXT: [[TMP27:%.*]] = extractelement <4 x i7> [[TMP20]], i32 2
|
|
; RV32-NEXT: store i7 [[TMP27]], ptr [[TMP23]], align 1
|
|
; RV32-NEXT: [[TMP28:%.*]] = extractelement <4 x i7> [[TMP20]], i32 3
|
|
; RV32-NEXT: store i7 [[TMP28]], ptr [[TMP24]], align 1
|
|
; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
|
|
; RV32-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1020
|
|
; RV32-NEXT: br i1 [[TMP29]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
|
|
; RV32: [[MIDDLE_BLOCK]]:
|
|
; RV32-NEXT: br label %[[SCALAR_PH]]
|
|
; RV32: [[SCALAR_PH]]:
|
|
; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
|
|
; RV32-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV32: [[FOR_BODY]]:
|
|
;
|
|
; RV64-UF2-LABEL: define void @vector_reverse_irregular_type(
|
|
; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] {
|
|
; RV64-UF2-NEXT: [[ENTRY:.*]]:
|
|
; RV64-UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
|
|
; RV64-UF2: [[VECTOR_PH]]:
|
|
; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]]
|
|
; RV64-UF2: [[VECTOR_BODY]]:
|
|
; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
|
|
; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
|
|
; RV64-UF2-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
|
|
; RV64-UF2-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], -1
|
|
; RV64-UF2-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], -2
|
|
; RV64-UF2-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], -3
|
|
; RV64-UF2-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], -4
|
|
; RV64-UF2-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], -5
|
|
; RV64-UF2-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], -6
|
|
; RV64-UF2-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], -7
|
|
; RV64-UF2-NEXT: [[TMP8:%.*]] = add nsw i64 [[TMP0]], -1
|
|
; RV64-UF2-NEXT: [[TMP9:%.*]] = add nsw i64 [[TMP1]], -1
|
|
; RV64-UF2-NEXT: [[TMP10:%.*]] = add nsw i64 [[TMP2]], -1
|
|
; RV64-UF2-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP3]], -1
|
|
; RV64-UF2-NEXT: [[TMP12:%.*]] = add nsw i64 [[TMP4]], -1
|
|
; RV64-UF2-NEXT: [[TMP13:%.*]] = add nsw i64 [[TMP5]], -1
|
|
; RV64-UF2-NEXT: [[TMP14:%.*]] = add nsw i64 [[TMP6]], -1
|
|
; RV64-UF2-NEXT: [[TMP15:%.*]] = add nsw i64 [[TMP7]], -1
|
|
; RV64-UF2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP8]]
|
|
; RV64-UF2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP9]]
|
|
; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP10]]
|
|
; RV64-UF2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP11]]
|
|
; RV64-UF2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP12]]
|
|
; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP13]]
|
|
; RV64-UF2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP14]]
|
|
; RV64-UF2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i7, ptr [[B]], i64 [[TMP15]]
|
|
; RV64-UF2-NEXT: [[TMP24:%.*]] = load i7, ptr [[TMP16]], align 1
|
|
; RV64-UF2-NEXT: [[TMP25:%.*]] = load i7, ptr [[TMP17]], align 1
|
|
; RV64-UF2-NEXT: [[TMP26:%.*]] = load i7, ptr [[TMP18]], align 1
|
|
; RV64-UF2-NEXT: [[TMP27:%.*]] = load i7, ptr [[TMP19]], align 1
|
|
; RV64-UF2-NEXT: [[TMP28:%.*]] = insertelement <4 x i7> poison, i7 [[TMP24]], i32 0
|
|
; RV64-UF2-NEXT: [[TMP29:%.*]] = insertelement <4 x i7> [[TMP28]], i7 [[TMP25]], i32 1
|
|
; RV64-UF2-NEXT: [[TMP30:%.*]] = insertelement <4 x i7> [[TMP29]], i7 [[TMP26]], i32 2
|
|
; RV64-UF2-NEXT: [[TMP31:%.*]] = insertelement <4 x i7> [[TMP30]], i7 [[TMP27]], i32 3
|
|
; RV64-UF2-NEXT: [[TMP32:%.*]] = load i7, ptr [[TMP20]], align 1
|
|
; RV64-UF2-NEXT: [[TMP33:%.*]] = load i7, ptr [[TMP21]], align 1
|
|
; RV64-UF2-NEXT: [[TMP34:%.*]] = load i7, ptr [[TMP22]], align 1
|
|
; RV64-UF2-NEXT: [[TMP35:%.*]] = load i7, ptr [[TMP23]], align 1
|
|
; RV64-UF2-NEXT: [[TMP36:%.*]] = insertelement <4 x i7> poison, i7 [[TMP32]], i32 0
|
|
; RV64-UF2-NEXT: [[TMP37:%.*]] = insertelement <4 x i7> [[TMP36]], i7 [[TMP33]], i32 1
|
|
; RV64-UF2-NEXT: [[TMP38:%.*]] = insertelement <4 x i7> [[TMP37]], i7 [[TMP34]], i32 2
|
|
; RV64-UF2-NEXT: [[TMP39:%.*]] = insertelement <4 x i7> [[TMP38]], i7 [[TMP35]], i32 3
|
|
; RV64-UF2-NEXT: [[TMP40:%.*]] = add <4 x i7> [[TMP31]], splat (i7 1)
|
|
; RV64-UF2-NEXT: [[TMP41:%.*]] = add <4 x i7> [[TMP39]], splat (i7 1)
|
|
; RV64-UF2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP8]]
|
|
; RV64-UF2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP9]]
|
|
; RV64-UF2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP10]]
|
|
; RV64-UF2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP11]]
|
|
; RV64-UF2-NEXT: [[TMP46:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP12]]
|
|
; RV64-UF2-NEXT: [[TMP47:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP13]]
|
|
; RV64-UF2-NEXT: [[TMP48:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP14]]
|
|
; RV64-UF2-NEXT: [[TMP49:%.*]] = getelementptr inbounds i7, ptr [[A]], i64 [[TMP15]]
|
|
; RV64-UF2-NEXT: [[TMP50:%.*]] = extractelement <4 x i7> [[TMP40]], i32 0
|
|
; RV64-UF2-NEXT: store i7 [[TMP50]], ptr [[TMP42]], align 1
|
|
; RV64-UF2-NEXT: [[TMP51:%.*]] = extractelement <4 x i7> [[TMP40]], i32 1
|
|
; RV64-UF2-NEXT: store i7 [[TMP51]], ptr [[TMP43]], align 1
|
|
; RV64-UF2-NEXT: [[TMP52:%.*]] = extractelement <4 x i7> [[TMP40]], i32 2
|
|
; RV64-UF2-NEXT: store i7 [[TMP52]], ptr [[TMP44]], align 1
|
|
; RV64-UF2-NEXT: [[TMP53:%.*]] = extractelement <4 x i7> [[TMP40]], i32 3
|
|
; RV64-UF2-NEXT: store i7 [[TMP53]], ptr [[TMP45]], align 1
|
|
; RV64-UF2-NEXT: [[TMP54:%.*]] = extractelement <4 x i7> [[TMP41]], i32 0
|
|
; RV64-UF2-NEXT: store i7 [[TMP54]], ptr [[TMP46]], align 1
|
|
; RV64-UF2-NEXT: [[TMP55:%.*]] = extractelement <4 x i7> [[TMP41]], i32 1
|
|
; RV64-UF2-NEXT: store i7 [[TMP55]], ptr [[TMP47]], align 1
|
|
; RV64-UF2-NEXT: [[TMP56:%.*]] = extractelement <4 x i7> [[TMP41]], i32 2
|
|
; RV64-UF2-NEXT: store i7 [[TMP56]], ptr [[TMP48]], align 1
|
|
; RV64-UF2-NEXT: [[TMP57:%.*]] = extractelement <4 x i7> [[TMP41]], i32 3
|
|
; RV64-UF2-NEXT: store i7 [[TMP57]], ptr [[TMP49]], align 1
|
|
; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
|
|
; RV64-UF2-NEXT: [[TMP58:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1016
|
|
; RV64-UF2-NEXT: br i1 [[TMP58]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
|
|
; RV64-UF2: [[MIDDLE_BLOCK]]:
|
|
; RV64-UF2-NEXT: br label %[[SCALAR_PH]]
|
|
; RV64-UF2: [[SCALAR_PH]]:
|
|
; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 7, %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ]
|
|
; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]]
|
|
; RV64-UF2: [[FOR_BODY]]:
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ]
|
|
%iv.next = add nsw i64 %dec.iv, -1
|
|
%arrayidx.b = getelementptr inbounds i7, ptr %B, i64 %iv.next
|
|
%0 = load i7, ptr %arrayidx.b, align 1
|
|
%add = add i7 %0, 1
|
|
%arrayidx.a = getelementptr inbounds i7, ptr %A, i64 %iv.next
|
|
store i7 %add, ptr %arrayidx.a, align 1
|
|
%cmp = icmp ugt i64 %dec.iv, 1
|
|
br i1 %cmp, label %for.body, label %exit, !llvm.loop !4
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
!0 = distinct !{!0, !1, !2, !3}
|
|
!1 = !{!"llvm.loop.vectorize.width", i32 4}
|
|
!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|
|
!3 = !{!"llvm.loop.vectorize.enable", i1 true}
|
|
!4 = distinct !{!4, !1, !3}
|