llvm-project/llvm/test/CodeGen/X86/znver3-gather.ll
Matthias Braun 189900eb14 X86: Stop assigning register costs for longer encodings.
This stops reporting CostPerUse 1 for `R8`-`R15` and `XMM8`-`XMM31`.
This was previously done because instruction encoding require a REX
prefix when using them resulting in longer instruction encodings. I
found that this regresses the quality of the register allocation as the
costs impose an ordering on eviction candidates. I also feel that there
is a bit of an impedance mismatch as the actual costs occure when
encoding instructions using those registers, but the order of VReg
assignments is not primarily ordered by number of Defs+Uses.

I did extensive measurements with the llvm-test-suite wiht SPEC2006 +
SPEC2017 included, internal services showed similar patterns. Generally
there are a log of improvements but also a lot of regression. But on
average the allocation quality seems to improve at a small code size
regression.

Results for measuring static and dynamic instruction counts:

Dynamic Counts (scaled by execution frequency) / Optimization Remarks:
    Spills+FoldedSpills   -5.6%
    Reloads+FoldedReloads -4.2%
    Copies                -0.1%

Static / LLVM Statistics:
    regalloc.NumSpills    mean -1.6%, geomean -2.8%
    regalloc.NumReloads   mean -1.7%, geomean -3.1%
    size..text            mean +0.4%, geomean +0.4%

Static / LLVM Statistics:
    mean -2.2%, geomean -3.1%) regalloc.NumSpills
    mean -2.6%, geomean -3.9%) regalloc.NumReloads
    mean +0.6%, geomean +0.6%) size..text

Static / LLVM Statistics:
    regalloc.NumSpills   mean -3.0%
    regalloc.NumReloads  mean -3.3%
    size..text           mean +0.3%, geomean +0.3%

Differential Revision: https://reviews.llvm.org/D133902
2022-09-30 16:01:33 -07:00

116 lines
5.0 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=znver3 | FileCheck %s --check-prefix=X64
define <8 x i32> @simple(ptr %base, <8 x i32> %offsets) {
; X64-LABEL: simple:
; X64: # %bb.0:
; X64-NEXT: vextracti128 $1, %ymm0, %xmm2
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovq %rdi, %xmm1
; X64-NEXT: vpbroadcastq %xmm1, %ymm1
; X64-NEXT: vpmovsxdq %xmm2, %ymm2
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; X64-NEXT: vmovq %xmm0, %rax
; X64-NEXT: vpextrq $1, %xmm0, %rcx
; X64-NEXT: vextracti128 $1, %ymm0, %xmm0
; X64-NEXT: vpsllq $2, %ymm2, %ymm2
; X64-NEXT: vpaddq %ymm2, %ymm1, %ymm2
; X64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: vpextrq $1, %xmm0, %rdx
; X64-NEXT: vmovq %xmm0, %rsi
; X64-NEXT: vextracti128 $1, %ymm2, %xmm0
; X64-NEXT: vmovq %xmm2, %rdi
; X64-NEXT: vpextrq $1, %xmm2, %r8
; X64-NEXT: vpinsrd $1, (%rcx), %xmm1, %xmm1
; X64-NEXT: vmovq %xmm0, %r9
; X64-NEXT: vpextrq $1, %xmm0, %r10
; X64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: vpinsrd $2, (%rsi), %xmm1, %xmm1
; X64-NEXT: vpinsrd $1, (%r8), %xmm0, %xmm0
; X64-NEXT: vpinsrd $3, (%rdx), %xmm1, %xmm1
; X64-NEXT: vpinsrd $2, (%r9), %xmm0, %xmm0
; X64-NEXT: vpinsrd $3, (%r10), %xmm0, %xmm0
; X64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %offsets
%wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
ret <8 x i32> %wide.masked.gather
}
define <8 x i32> @optsize(ptr %base, <8 x i32> %offsets) optsize {
; X64-LABEL: optsize:
; X64: # %bb.0:
; X64-NEXT: vextracti128 $1, %ymm0, %xmm2
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovq %rdi, %xmm1
; X64-NEXT: vpbroadcastq %xmm1, %ymm1
; X64-NEXT: vpmovsxdq %xmm2, %ymm2
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; X64-NEXT: vmovq %xmm0, %rax
; X64-NEXT: vpextrq $1, %xmm0, %rcx
; X64-NEXT: vextracti128 $1, %ymm0, %xmm0
; X64-NEXT: vpsllq $2, %ymm2, %ymm2
; X64-NEXT: vpaddq %ymm2, %ymm1, %ymm2
; X64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: vpextrq $1, %xmm0, %rdx
; X64-NEXT: vmovq %xmm0, %rsi
; X64-NEXT: vextracti128 $1, %ymm2, %xmm0
; X64-NEXT: vmovq %xmm2, %rdi
; X64-NEXT: vpextrq $1, %xmm2, %r8
; X64-NEXT: vpinsrd $1, (%rcx), %xmm1, %xmm1
; X64-NEXT: vmovq %xmm0, %r9
; X64-NEXT: vpextrq $1, %xmm0, %r10
; X64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: vpinsrd $2, (%rsi), %xmm1, %xmm1
; X64-NEXT: vpinsrd $1, (%r8), %xmm0, %xmm0
; X64-NEXT: vpinsrd $3, (%rdx), %xmm1, %xmm1
; X64-NEXT: vpinsrd $2, (%r9), %xmm0, %xmm0
; X64-NEXT: vpinsrd $3, (%r10), %xmm0, %xmm0
; X64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %offsets
%wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
ret <8 x i32> %wide.masked.gather
}
define <8 x i32> @minsize(ptr %base, <8 x i32> %offsets) minsize {
; X64-LABEL: minsize:
; X64: # %bb.0:
; X64-NEXT: vextracti128 $1, %ymm0, %xmm2
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovq %rdi, %xmm1
; X64-NEXT: vpbroadcastq %xmm1, %ymm1
; X64-NEXT: vpmovsxdq %xmm2, %ymm2
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; X64-NEXT: vmovq %xmm0, %rax
; X64-NEXT: vpextrq $1, %xmm0, %rcx
; X64-NEXT: vextracti128 $1, %ymm0, %xmm0
; X64-NEXT: vpsllq $2, %ymm2, %ymm2
; X64-NEXT: vpaddq %ymm2, %ymm1, %ymm2
; X64-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: vpextrq $1, %xmm0, %rdx
; X64-NEXT: vmovq %xmm0, %rsi
; X64-NEXT: vextracti128 $1, %ymm2, %xmm0
; X64-NEXT: vmovq %xmm2, %rdi
; X64-NEXT: vpextrq $1, %xmm2, %r8
; X64-NEXT: vpinsrd $1, (%rcx), %xmm1, %xmm1
; X64-NEXT: vmovq %xmm0, %r9
; X64-NEXT: vpextrq $1, %xmm0, %r10
; X64-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: vpinsrd $2, (%rsi), %xmm1, %xmm1
; X64-NEXT: vpinsrd $1, (%r8), %xmm0, %xmm0
; X64-NEXT: vpinsrd $3, (%rdx), %xmm1, %xmm1
; X64-NEXT: vpinsrd $2, (%r9), %xmm0, %xmm0
; X64-NEXT: vpinsrd $3, (%r10), %xmm0, %xmm0
; X64-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%ptrs = getelementptr inbounds i32, ptr %base, <8 x i32> %offsets
%wide.masked.gather = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef)
ret <8 x i32> %wide.masked.gather
}
declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32 immarg, <8 x i1>, <8 x i32>)