
This patch adds more instructions to the Uniforms list, for example certain intrinsics that are uniform by definition or whose operands are loop invariant. This list includes: 1. The intrinsics 'experimental.noalias.scope.decl' and 'sideeffect', which are always uniform by definition. 2. If intrinsics 'lifetime.start', 'lifetime.end' and 'assume' have loop invariant input operands then these are also uniform too. Also, in VPRecipeBuilder::handleReplication we check if an instruction is uniform based purely on whether or not the instruction lives in the Uniforms list. However, there are certain cases where calls to some intrinsics can be effectively treated as uniform too. Therefore, we now also treat the following cases as uniform for scalable vectors: 1. If the 'assume' intrinsic's operand is not loop invariant, then we are free to treat this as uniform anyway since it's only a performance hint. We will get the benefit for the first lane. 2. When the input pointers for 'lifetime.start' and 'lifetime.end' are loop variant then for scalable vectors we assume these still ultimately come from the broadcast of an alloca. We do not support scalable vectorisation of loops containing alloca instructions, hence the alloca itself would be invariant. If the pointer does not come from an alloca then the intrinsic itself has no effect. I have updated the assume test for fixed width, since we now treat it as uniform: Transforms/LoopVectorize/assume.ll I've also added new scalable vectorisation tests for other intriniscs: Transforms/LoopVectorize/scalable-assume.ll Transforms/LoopVectorize/scalable-lifetime.ll Transforms/LoopVectorize/scalable-noalias-scope-decl.ll Differential Revision: https://reviews.llvm.org/D107284
82 lines
3.4 KiB
LLVM
82 lines
3.4 KiB
LLVM
; RUN: opt -S -scalable-vectorization=on -force-target-supports-scalable-vectors=true -loop-vectorize -force-vector-width=2 -force-vector-interleave=1 < %s | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
|
|
; Make sure we can vectorize loops which contain lifetime markers.
|
|
|
|
define void @test(i32 *%d) {
|
|
; CHECK-LABEL: @test(
|
|
; CHECK: entry:
|
|
; CHECK: [[ALLOCA:%.*]] = alloca [1024 x i32], align 16
|
|
; CHECK-NEXT: [[BC:%.*]] = bitcast [1024 x i32]* [[ALLOCA]] to i8*
|
|
; CHECK: vector.body:
|
|
; CHECK: call void @llvm.lifetime.end.p0i8(i64 4096, i8* [[BC]])
|
|
; CHECK: store <vscale x 2 x i32>
|
|
; CHECK: call void @llvm.lifetime.start.p0i8(i64 4096, i8* [[BC]])
|
|
|
|
entry:
|
|
%arr = alloca [1024 x i32], align 16
|
|
%0 = bitcast [1024 x i32]* %arr to i8*
|
|
call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
|
call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1
|
|
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
|
|
%1 = load i32, i32* %arrayidx, align 8
|
|
store i32 100, i32* %arrayidx, align 8
|
|
call void @llvm.lifetime.start.p0i8(i64 4096, i8* %0) #1
|
|
%indvars.iv.next = add i64 %indvars.iv, 1
|
|
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
|
%exitcond = icmp ne i32 %lftr.wideiv, 128
|
|
br i1 %exitcond, label %for.body, label %for.end, !llvm.loop !0
|
|
|
|
for.end:
|
|
call void @llvm.lifetime.end.p0i8(i64 4096, i8* %0) #1
|
|
ret void
|
|
}
|
|
|
|
; CHECK-LABEL: @testloopvariant(
|
|
; CHECK: entry:
|
|
; CHECK: [[ALLOCA:%.*]] = alloca [1024 x i32], align 16
|
|
; CHECK: vector.ph:
|
|
; CHECK: [[TMP1:%.*]] = insertelement <vscale x 2 x [1024 x i32]*> poison, [1024 x i32]* %arr, i32 0
|
|
; CHECK-NEXT: [[SPLAT_ALLOCA:%.*]] = shufflevector <vscale x 2 x [1024 x i32]*> [[TMP1]], <vscale x 2 x [1024 x i32]*> poison, <vscale x 2 x i32> zeroinitializer
|
|
; CHECK: vector.body:
|
|
; CHECK: [[BC_ALLOCA:%.*]] = bitcast <vscale x 2 x [1024 x i32]*> [[SPLAT_ALLOCA]] to <vscale x 2 x i8*>
|
|
; CHECK-NEXT: [[ONE_LIFETIME:%.*]] = extractelement <vscale x 2 x i8*> [[BC_ALLOCA]], i32 0
|
|
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4096, i8* [[ONE_LIFETIME]])
|
|
; CHECK: store <vscale x 2 x i32>
|
|
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4096, i8* [[ONE_LIFETIME]])
|
|
|
|
define void @testloopvariant(i32 *%d) {
|
|
entry:
|
|
%arr = alloca [1024 x i32], align 16
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
|
%0 = getelementptr [1024 x i32], [1024 x i32]* %arr, i32 0, i64 %indvars.iv
|
|
%1 = bitcast [1024 x i32]* %arr to i8*
|
|
call void @llvm.lifetime.end.p0i8(i64 4096, i8* %1) #1
|
|
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
|
|
%2 = load i32, i32* %arrayidx, align 8
|
|
store i32 100, i32* %arrayidx, align 8
|
|
call void @llvm.lifetime.start.p0i8(i64 4096, i8* %1) #1
|
|
%indvars.iv.next = add i64 %indvars.iv, 1
|
|
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
|
%exitcond = icmp ne i32 %lftr.wideiv, 128
|
|
br i1 %exitcond, label %for.body, label %for.end, !llvm.loop !0
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
|
|
|
|
declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
|
|
|
|
!0 = distinct !{!0, !1}
|
|
!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
|