llvm-project/llvm/test/Transforms/SLPVectorizer/extract-many-users-buildvector.ll
Alexey Bataev 07d284d4eb
[SLP]Add cost estimation for gather node reshuffling
Adds cost estimation for the variants of the permutations of the scalar
values, used in gather nodes. Currently, SLP just unconditionally emits
shuffles for the reused buildvectors, but in some cases better to leave
them as buildvectors rather than shuffles, if the cost of such
buildvectors is better.

X86, AVX512, -O3+LTO
Metric: size..text

Program                                                                        size..text
                                                                               results     results0    diff
                 test-suite :: External/SPEC/CINT2006/445.gobmk/445.gobmk.test   912998.00   913238.00  0.0%
 test-suite :: MultiSource/Benchmarks/MiBench/consumer-lame/consumer-lame.test   203070.00   203102.00  0.0%
     test-suite :: External/SPEC/CFP2017speed/638.imagick_s/638.imagick_s.test  1396320.00  1396448.00  0.0%
      test-suite :: External/SPEC/CFP2017rate/538.imagick_r/538.imagick_r.test  1396320.00  1396448.00  0.0%
                       test-suite :: MultiSource/Benchmarks/Bullet/bullet.test   309790.00   309678.00 -0.0%
      test-suite :: External/SPEC/CFP2017rate/526.blender_r/526.blender_r.test 12477607.00 12470807.00 -0.1%

CINT2006/445.gobmk - extra code vectorized
MiBench/consumer-lame - small variations
CFP2017speed/638.imagick_s
CFP2017rate/538.imagick_r - extra vectorized code
Benchmarks/Bullet - extra code vectorized
CFP2017rate/526.blender_r - extra vector code

RISC-V, sifive-p670, -O3+LTO
CFP2006/433.milc - regressions, should be fixed by https://github.com/llvm/llvm-project/pull/115173
CFP2006/453.povray - extra vectorized code
CFP2017rate/508.namd_r - better vector code
CFP2017rate/510.parest_r - extra vectorized code
SPEC/CFP2017rate - extra/better vector code
CFP2017rate/526.blender_r - extra vectorized code
CFP2017rate/538.imagick_r - extra vectorized code
CINT2006/403.gcc - extra vectorized code
CINT2006/445.gobmk - extra vectorized code
CINT2006/464.h264ref - extra vectorized code
CINT2006/483.xalancbmk - small variations
CINT2017rate/525.x264_r - better vectorization

Reviewers: RKSimon

Reviewed By: RKSimon

Pull Request: https://github.com/llvm/llvm-project/pull/115201
2024-12-24 15:35:29 -05:00

110 lines
7.6 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefix X86 %}
; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefix AARCH64 %}
define i1 @test(float %0, double %1) {
; X86-LABEL: define i1 @test
; X86-SAME: (float [[TMP0:%.*]], double [[TMP1:%.*]]) {
; X86-NEXT: [[TMP3:%.*]] = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison>, float [[TMP0]], i32 3
; X86-NEXT: [[TMP4:%.*]] = fpext <4 x float> [[TMP3]] to <4 x double>
; X86-NEXT: [[TMP5:%.*]] = insertelement <2 x double> <double poison, double 0.000000e+00>, double [[TMP1]], i32 0
; X86-NEXT: [[TMP6:%.*]] = fmul <2 x double> zeroinitializer, [[TMP5]]
; X86-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> [[TMP6]], <4 x i32> <i32 poison, i32 0, i32 3, i32 3>
; X86-NEXT: [[TMP8:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> <double 0.000000e+00, double poison, double poison, double poison>, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
; X86-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP4]], <4 x double> <double poison, double poison, double poison, double 0.000000e+00>, <4 x i32> <i32 2, i32 0, i32 1, i32 7>
; X86-NEXT: [[TMP10:%.*]] = fmul <4 x double> [[TMP8]], [[TMP9]]
; X86-NEXT: [[TMP11:%.*]] = fmul <4 x double> zeroinitializer, [[TMP4]]
; X86-NEXT: [[TMP12:%.*]] = call <8 x double> @llvm.vector.insert.v8f64.v4f64(<8 x double> <double poison, double poison, double poison, double poison, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>, <4 x double> [[TMP10]], i64 0)
; X86-NEXT: [[TMP13:%.*]] = call <8 x double> @llvm.vector.insert.v8f64.v4f64(<8 x double> <double poison, double poison, double poison, double poison, double poison, double poison, double 0.000000e+00, double 0.000000e+00>, <4 x double> [[TMP11]], i64 0)
; X86-NEXT: [[TMP14:%.*]] = call <8 x double> @llvm.vector.insert.v8f64.v2f64(<8 x double> [[TMP13]], <2 x double> [[TMP6]], i64 4)
; X86-NEXT: [[TMP15:%.*]] = fsub <8 x double> [[TMP12]], [[TMP14]]
; X86-NEXT: [[TMP16:%.*]] = fmul <8 x double> [[TMP12]], [[TMP14]]
; X86-NEXT: [[TMP17:%.*]] = shufflevector <8 x double> [[TMP15]], <8 x double> [[TMP16]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 13, i32 14, i32 15>
; X86-NEXT: [[TMP18:%.*]] = fptrunc <8 x double> [[TMP17]] to <8 x float>
; X86-NEXT: [[TMP19:%.*]] = fmul <8 x float> [[TMP18]], zeroinitializer
; X86-NEXT: [[TMP20:%.*]] = fcmp oeq <8 x float> [[TMP19]], zeroinitializer
; X86-NEXT: [[TMP21:%.*]] = freeze <8 x i1> [[TMP20]]
; X86-NEXT: [[TMP22:%.*]] = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> [[TMP21]])
; X86-NEXT: ret i1 [[TMP22]]
;
; AARCH64-LABEL: define i1 @test
; AARCH64-SAME: (float [[TMP0:%.*]], double [[TMP1:%.*]]) {
; AARCH64-NEXT: [[TMP3:%.*]] = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison>, float [[TMP0]], i32 3
; AARCH64-NEXT: [[TMP4:%.*]] = fpext <4 x float> [[TMP3]] to <4 x double>
; AARCH64-NEXT: [[TMP5:%.*]] = insertelement <2 x double> <double poison, double 0.000000e+00>, double [[TMP1]], i32 0
; AARCH64-NEXT: [[TMP6:%.*]] = fmul <2 x double> zeroinitializer, [[TMP5]]
; AARCH64-NEXT: [[TMP7:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> [[TMP6]], <4 x i32> <i32 poison, i32 0, i32 3, i32 3>
; AARCH64-NEXT: [[TMP8:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> <double 0.000000e+00, double poison, double poison, double poison>, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
; AARCH64-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP4]], <4 x double> <double poison, double poison, double poison, double 0.000000e+00>, <4 x i32> <i32 2, i32 0, i32 poison, i32 7>
; AARCH64-NEXT: [[TMP10:%.*]] = shufflevector <4 x double> [[TMP9]], <4 x double> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 5, i32 3>
; AARCH64-NEXT: [[TMP11:%.*]] = fmul <4 x double> [[TMP8]], [[TMP10]]
; AARCH64-NEXT: [[TMP12:%.*]] = fmul <4 x double> zeroinitializer, [[TMP4]]
; AARCH64-NEXT: [[TMP13:%.*]] = call <8 x double> @llvm.vector.insert.v8f64.v4f64(<8 x double> <double poison, double poison, double poison, double poison, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>, <4 x double> [[TMP11]], i64 0)
; AARCH64-NEXT: [[TMP14:%.*]] = call <8 x double> @llvm.vector.insert.v8f64.v4f64(<8 x double> <double poison, double poison, double poison, double poison, double poison, double poison, double 0.000000e+00, double 0.000000e+00>, <4 x double> [[TMP12]], i64 0)
; AARCH64-NEXT: [[TMP15:%.*]] = call <8 x double> @llvm.vector.insert.v8f64.v2f64(<8 x double> [[TMP14]], <2 x double> [[TMP6]], i64 4)
; AARCH64-NEXT: [[TMP16:%.*]] = fsub <8 x double> [[TMP13]], [[TMP15]]
; AARCH64-NEXT: [[TMP17:%.*]] = fmul <8 x double> [[TMP13]], [[TMP15]]
; AARCH64-NEXT: [[TMP18:%.*]] = shufflevector <8 x double> [[TMP16]], <8 x double> [[TMP17]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 13, i32 14, i32 15>
; AARCH64-NEXT: [[TMP19:%.*]] = fptrunc <8 x double> [[TMP18]] to <8 x float>
; AARCH64-NEXT: [[TMP20:%.*]] = fmul <8 x float> [[TMP19]], zeroinitializer
; AARCH64-NEXT: [[TMP21:%.*]] = fcmp oeq <8 x float> [[TMP20]], zeroinitializer
; AARCH64-NEXT: [[TMP22:%.*]] = freeze <8 x i1> [[TMP21]]
; AARCH64-NEXT: [[TMP23:%.*]] = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> [[TMP22]])
; AARCH64-NEXT: ret i1 [[TMP23]]
;
%3 = fpext float %0 to double
%4 = fpext float 0.000000e+00 to double
%5 = fpext float 0.000000e+00 to double
%6 = fpext float 0.000000e+00 to double
%7 = fmul double 0.000000e+00, 0.000000e+00
%8 = fmul double 0.000000e+00, %1
%9 = fmul double 0.000000e+00, 0.000000e+00
%10 = fmul double 0.000000e+00, %5
%11 = fmul double 0.000000e+00, %6
%12 = fsub double %10, %11
%13 = fptrunc double %12 to float
%14 = fmul double %9, 0.000000e+00
%15 = fmul double 0.000000e+00, %3
%16 = fsub double %14, %15
%17 = fptrunc double %16 to float
%18 = fptrunc double %7 to float
%19 = fmul double %1, %6
%20 = fmul double 0.000000e+00, %4
%21 = fsub double %19, %20
%22 = fptrunc double %21 to float
%23 = fsub double 0.000000e+00, %8
%24 = fptrunc double %23 to float
%25 = fmul double 0.000000e+00, 0.000000e+00
%26 = fptrunc double %25 to float
%27 = fmul double %9, %4
%28 = fmul double 0.000000e+00, %5
%29 = fsub double %27, %28
%30 = fptrunc double %29 to float
%31 = fmul double %9, 0.000000e+00
%32 = fptrunc double %31 to float
%33 = fmul float %13, 0.000000e+00
%34 = fcmp oeq float %33, 0.000000e+00
%35 = fmul float %22, 0.000000e+00
%36 = fcmp oeq float %35, 0.000000e+00
%37 = select i1 %34, i1 %36, i1 false
%38 = fmul float %30, 0.000000e+00
%39 = fcmp oeq float %38, 0.000000e+00
%40 = select i1 %37, i1 %39, i1 false
%41 = fmul float %17, 0.000000e+00
%42 = fcmp oeq float %41, 0.000000e+00
%43 = select i1 %40, i1 %42, i1 false
%44 = fmul float %24, 0.000000e+00
%45 = fcmp oeq float %44, 0.000000e+00
%46 = select i1 %43, i1 %45, i1 false
%47 = fmul float %32, 0.000000e+00
%48 = fcmp oeq float %47, 0.000000e+00
%49 = select i1 %46, i1 %48, i1 false
%50 = fmul float %18, 0.000000e+00
%51 = fcmp oeq float %50, 0.000000e+00
%52 = select i1 %49, i1 %51, i1 false
%53 = fmul float %26, 0.000000e+00
%54 = fcmp oeq float %53, 0.000000e+00
%55 = select i1 %52, i1 %54, i1 false
ret i1 %55
}