llvm-project/llvm/test/CodeGen/X86/fast-isel-int-float-conversion-x86-64.ll
Phoebe Wang 76e14deb4a
[X86][BreakFalseDeps] Using reverse order for undef register selection (#137569)
BreakFalseDeps picks the best register for undef operands if
instructions have false dependency. The problem is if the instruction is
close to the beginning of the function, ReachingDefAnalysis is over
optimism to the unused registers, which results in collision with
registers just defined in the caller.

This patch changes the selection of undef register in an reverse order,
which reduces the probability of register collisions between caller and
callee. It brings improvement in some of our internal benchmarks with
negligible effect on other benchmarks.
2025-06-11 22:08:20 +08:00

100 lines
2.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+sse2 -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=SSE2
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+avx -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=AVX
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=generic -mattr=+avx512f -fast-isel --fast-isel-abort=1 < %s | FileCheck %s --check-prefix=AVX
define double @long_to_double_rr(i64 %a) {
; SSE2-LABEL: long_to_double_rr:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2sd %rdi, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_double_rr:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sd %rdi, %xmm15, %xmm0
; AVX-NEXT: retq
entry:
%0 = sitofp i64 %a to double
ret double %0
}
define double @long_to_double_rm(ptr %a) {
; SSE2-LABEL: long_to_double_rm:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_double_rm:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdq (%rdi), %xmm15, %xmm0
; AVX-NEXT: retq
entry:
%0 = load i64, ptr %a
%1 = sitofp i64 %0 to double
ret double %1
}
define double @long_to_double_rm_optsize(ptr %a) optsize {
; SSE2-LABEL: long_to_double_rm_optsize:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2sdq (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_double_rm_optsize:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2sdq (%rdi), %xmm15, %xmm0
; AVX-NEXT: retq
entry:
%0 = load i64, ptr %a
%1 = sitofp i64 %0 to double
ret double %1
}
define float @long_to_float_rr(i64 %a) {
; SSE2-LABEL: long_to_float_rr:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2ss %rdi, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_float_rr:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ss %rdi, %xmm15, %xmm0
; AVX-NEXT: retq
entry:
%0 = sitofp i64 %a to float
ret float %0
}
define float @long_to_float_rm(ptr %a) {
; SSE2-LABEL: long_to_float_rm:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_float_rm:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssq (%rdi), %xmm15, %xmm0
; AVX-NEXT: retq
entry:
%0 = load i64, ptr %a
%1 = sitofp i64 %0 to float
ret float %1
}
define float @long_to_float_rm_optsize(ptr %a) optsize {
; SSE2-LABEL: long_to_float_rm_optsize:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: cvtsi2ssq (%rdi), %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: long_to_float_rm_optsize:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vcvtsi2ssq (%rdi), %xmm15, %xmm0
; AVX-NEXT: retq
entry:
%0 = load i64, ptr %a
%1 = sitofp i64 %0 to float
ret float %1
}