
BreakFalseDeps picks the best register for undef operands if instructions have false dependency. The problem is if the instruction is close to the beginning of the function, ReachingDefAnalysis is over optimism to the unused registers, which results in collision with registers just defined in the caller. This patch changes the selection of undef register in an reverse order, which reduces the probability of register collisions between caller and callee. It brings improvement in some of our internal benchmarks with negligible effect on other benchmarks.
157 lines
4.8 KiB
LLVM
157 lines
4.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+avx512f -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=AVX
|
|
; RUN: llc -verify-machineinstrs -mtriple=i686-unknown-unknown -mcpu=generic -mattr=+avx512f -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=AVX_X86
|
|
|
|
|
|
define double @int_to_double_rr(i32 %a) {
|
|
; AVX-LABEL: int_to_double_rr:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vcvtusi2sd %edi, %xmm15, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX_X86-LABEL: int_to_double_rr:
|
|
; AVX_X86: # %bb.0: # %entry
|
|
; AVX_X86-NEXT: pushl %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
|
|
; AVX_X86-NEXT: .cfi_offset %ebp, -8
|
|
; AVX_X86-NEXT: movl %esp, %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa_register %ebp
|
|
; AVX_X86-NEXT: andl $-8, %esp
|
|
; AVX_X86-NEXT: subl $8, %esp
|
|
; AVX_X86-NEXT: vcvtusi2sdl 8(%ebp), %xmm7, %xmm0
|
|
; AVX_X86-NEXT: vmovsd %xmm0, (%esp)
|
|
; AVX_X86-NEXT: fldl (%esp)
|
|
; AVX_X86-NEXT: movl %ebp, %esp
|
|
; AVX_X86-NEXT: popl %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa %esp, 4
|
|
; AVX_X86-NEXT: retl
|
|
entry:
|
|
%0 = uitofp i32 %a to double
|
|
ret double %0
|
|
}
|
|
|
|
define double @int_to_double_rm(ptr %a) {
|
|
; AVX-LABEL: int_to_double_rm:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vcvtusi2sdl (%rdi), %xmm15, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX_X86-LABEL: int_to_double_rm:
|
|
; AVX_X86: # %bb.0: # %entry
|
|
; AVX_X86-NEXT: pushl %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
|
|
; AVX_X86-NEXT: .cfi_offset %ebp, -8
|
|
; AVX_X86-NEXT: movl %esp, %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa_register %ebp
|
|
; AVX_X86-NEXT: andl $-8, %esp
|
|
; AVX_X86-NEXT: subl $8, %esp
|
|
; AVX_X86-NEXT: movl 8(%ebp), %eax
|
|
; AVX_X86-NEXT: vcvtusi2sdl (%eax), %xmm7, %xmm0
|
|
; AVX_X86-NEXT: vmovsd %xmm0, (%esp)
|
|
; AVX_X86-NEXT: fldl (%esp)
|
|
; AVX_X86-NEXT: movl %ebp, %esp
|
|
; AVX_X86-NEXT: popl %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa %esp, 4
|
|
; AVX_X86-NEXT: retl
|
|
entry:
|
|
%0 = load i32, ptr %a
|
|
%1 = uitofp i32 %0 to double
|
|
ret double %1
|
|
}
|
|
|
|
define double @int_to_double_rm_optsize(ptr %a) optsize {
|
|
; AVX-LABEL: int_to_double_rm_optsize:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vcvtusi2sdl (%rdi), %xmm15, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX_X86-LABEL: int_to_double_rm_optsize:
|
|
; AVX_X86: # %bb.0: # %entry
|
|
; AVX_X86-NEXT: pushl %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
|
|
; AVX_X86-NEXT: .cfi_offset %ebp, -8
|
|
; AVX_X86-NEXT: movl %esp, %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa_register %ebp
|
|
; AVX_X86-NEXT: andl $-8, %esp
|
|
; AVX_X86-NEXT: subl $8, %esp
|
|
; AVX_X86-NEXT: movl 8(%ebp), %eax
|
|
; AVX_X86-NEXT: vcvtusi2sdl (%eax), %xmm7, %xmm0
|
|
; AVX_X86-NEXT: vmovsd %xmm0, (%esp)
|
|
; AVX_X86-NEXT: fldl (%esp)
|
|
; AVX_X86-NEXT: movl %ebp, %esp
|
|
; AVX_X86-NEXT: popl %ebp
|
|
; AVX_X86-NEXT: .cfi_def_cfa %esp, 4
|
|
; AVX_X86-NEXT: retl
|
|
entry:
|
|
%0 = load i32, ptr %a
|
|
%1 = uitofp i32 %0 to double
|
|
ret double %1
|
|
}
|
|
|
|
define float @int_to_float_rr(i32 %a) {
|
|
; AVX-LABEL: int_to_float_rr:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vcvtusi2ss %edi, %xmm15, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX_X86-LABEL: int_to_float_rr:
|
|
; AVX_X86: # %bb.0: # %entry
|
|
; AVX_X86-NEXT: pushl %eax
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
|
|
; AVX_X86-NEXT: vcvtusi2ssl {{[0-9]+}}(%esp), %xmm7, %xmm0
|
|
; AVX_X86-NEXT: vmovss %xmm0, (%esp)
|
|
; AVX_X86-NEXT: flds (%esp)
|
|
; AVX_X86-NEXT: popl %eax
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 4
|
|
; AVX_X86-NEXT: retl
|
|
entry:
|
|
%0 = uitofp i32 %a to float
|
|
ret float %0
|
|
}
|
|
|
|
define float @int_to_float_rm(ptr %a) {
|
|
; AVX-LABEL: int_to_float_rm:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vcvtusi2ssl (%rdi), %xmm15, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX_X86-LABEL: int_to_float_rm:
|
|
; AVX_X86: # %bb.0: # %entry
|
|
; AVX_X86-NEXT: pushl %eax
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
|
|
; AVX_X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; AVX_X86-NEXT: vcvtusi2ssl (%eax), %xmm7, %xmm0
|
|
; AVX_X86-NEXT: vmovss %xmm0, (%esp)
|
|
; AVX_X86-NEXT: flds (%esp)
|
|
; AVX_X86-NEXT: popl %eax
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 4
|
|
; AVX_X86-NEXT: retl
|
|
entry:
|
|
%0 = load i32, ptr %a
|
|
%1 = uitofp i32 %0 to float
|
|
ret float %1
|
|
}
|
|
|
|
define float @int_to_float_rm_optsize(ptr %a) optsize {
|
|
; AVX-LABEL: int_to_float_rm_optsize:
|
|
; AVX: # %bb.0: # %entry
|
|
; AVX-NEXT: vcvtusi2ssl (%rdi), %xmm15, %xmm0
|
|
; AVX-NEXT: retq
|
|
;
|
|
; AVX_X86-LABEL: int_to_float_rm_optsize:
|
|
; AVX_X86: # %bb.0: # %entry
|
|
; AVX_X86-NEXT: pushl %eax
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 8
|
|
; AVX_X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; AVX_X86-NEXT: vcvtusi2ssl (%eax), %xmm7, %xmm0
|
|
; AVX_X86-NEXT: vmovss %xmm0, (%esp)
|
|
; AVX_X86-NEXT: flds (%esp)
|
|
; AVX_X86-NEXT: popl %eax
|
|
; AVX_X86-NEXT: .cfi_def_cfa_offset 4
|
|
; AVX_X86-NEXT: retl
|
|
entry:
|
|
%0 = load i32, ptr %a
|
|
%1 = uitofp i32 %0 to float
|
|
ret float %1
|
|
}
|