
The issue with slow compile-time was caused by an assert in AArch64RegisterInfo.cpp. The assert invokes 'checkAllSuperRegsMarked' after adding all the reserved registers. This call gets very expensive after adding the _HI registers due to the way the function searches in the 'Exception' list, which is expected to be a small list but isn't (the patch added 190 _HI regs). It was possible to rewrite the code in such a way that the _HI registers are marked as reserved after the check. This makes the problem go away entirely and restores compile-time to what it was before (tested for `check-runtimes`, which previously showed a ~5x slowdown). This reverts commits: 1434d2ab215e3ea9c5f34689d056edd3d4423a78 2704647fb7986673b89cef1def729e3b022e2607
89 lines
3.0 KiB
YAML
89 lines
3.0 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=aarch64-unknown-unknown -verify-machineinstrs -O0 -run-pass=regbankselect %s -o - | FileCheck %s
|
|
|
|
---
|
|
name: inlineasm_memory_clobber
|
|
alignment: 4
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1:
|
|
; CHECK-LABEL: name: inlineasm_memory_clobber
|
|
; CHECK: INLINEASM &"", 25 /* sideeffect mayload maystore attdialect */
|
|
; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
|
|
; CHECK-NEXT: RET_ReallyLR
|
|
INLINEASM &"", 25
|
|
INLINEASM &"", 1
|
|
RET_ReallyLR
|
|
...
|
|
|
|
---
|
|
name: inlineasm_register_clobber
|
|
alignment: 4
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1:
|
|
; CHECK-LABEL: name: inlineasm_register_clobber
|
|
; CHECK: INLINEASM &"", 25 /* sideeffect mayload maystore attdialect */, 12 /* clobber */, implicit-def early-clobber $d0
|
|
; CHECK-NEXT: RET_ReallyLR
|
|
INLINEASM &"", 25, 12, implicit-def early-clobber $d0
|
|
RET_ReallyLR
|
|
...
|
|
|
|
---
|
|
name: inlineasm_phys_reg_output
|
|
alignment: 4
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1:
|
|
; CHECK-LABEL: name: inlineasm_phys_reg_output
|
|
; CHECK: INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, 10 /* regdef */, implicit-def $w0
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
|
|
; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $w0
|
|
INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, 10 /* regdef */, implicit-def $w0
|
|
%0:_(s32) = COPY $w0
|
|
$w0 = COPY %0(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
|
|
---
|
|
name: inlineasm_virt_reg_output
|
|
alignment: 4
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1:
|
|
; CHECK-LABEL: name: inlineasm_virt_reg_output
|
|
; CHECK: INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, 2883594 /* regdef:GPR32common */, def %0
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY %0
|
|
; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $w0
|
|
INLINEASM &"mov ${0:w}, 7", 0 /* attdialect */, 2883594 /* regdef:GPR32common */, def %0:gpr32common
|
|
%1:_(s32) = COPY %0
|
|
$w0 = COPY %1(s32)
|
|
RET_ReallyLR implicit $w0
|
|
...
|
|
|
|
---
|
|
name: inlineasm_virt_mixed_types
|
|
alignment: 4
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.1:
|
|
; CHECK-LABEL: name: inlineasm_virt_mixed_types
|
|
; CHECK: INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, 2883594 /* regdef:GPR32common */, def %0, 3735562 /* regdef:FPR64 */, def %1
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY %0
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s64) = COPY %1
|
|
; CHECK-NEXT: $d0 = COPY [[COPY1]](s64)
|
|
; CHECK-NEXT: RET_ReallyLR implicit $d0
|
|
INLINEASM &"mov $0, #0; mov $1, #0", 0 /* attdialect */, 2883594 /* regdef:GPR32common */, def %0:gpr32common, 3735562 /* regdef:FPR64 */, def %1:fpr64
|
|
%3:_(s32) = COPY %0
|
|
%4:_(s64) = COPY %1
|
|
$d0 = COPY %4(s64)
|
|
RET_ReallyLR implicit $d0
|
|
...
|