
Enable enableMultipleCopyHints() on X86. Original Patch by @jonpa: While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling. Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates. Differential Revision: https://reviews.llvm.org/D38128 llvm-svn: 342578
28 lines
833 B
LLVM
28 lines
833 B
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X32
|
|
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
|
|
|
|
define i64 @foo(i64 %t, i64 %u) nounwind {
|
|
; X32-LABEL: foo:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: pushl %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: imull {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: imull {{[0-9]+}}(%esp), %esi
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: popl %esi
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: foo:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: imulq %rsi, %rax
|
|
; X64-NEXT: retq
|
|
%k = mul i64 %t, %u
|
|
ret i64 %k
|
|
}
|