
The logic in RISCVMatInt would previously produce lui+addiw on RV64 whenever a 32-bit integer must be materialised and the Hi20 and Lo12 parts are non-zero. However, sometimes addi can be used equivalently (whenever the sign extension behaviour of addiw would be a no-op). This patch moves to using addiw only when necessary. Although there is absolutely no advantage in terms of compressibility or performance, this has the following advantages: * It's more consistent with logic used elsewhere in the backend. For instance, RISCVOptWInstrs will try to convert addiw to addi on the basis it reduces test diffs vs RV32. * This matches the lowering GCC does in its codegen path. Unlike LLVM, GCC seems to have different expansion logic for the assembler vs codegen. For codegen it will use lui+addi if possible, but expanding `li` in the assembler will always produces lui+addiw as LLVM did prior to this commit. As someone who has been looking at a lot of gcc vs clang diffs lately, reducing unnecessary divergence is of at least some value. * As the diff for fold-mem-offset.ll shows, we can fold memory offsets in more cases when addi is used. Memory offset folding could be taught to recognise when the addiw could be replaced with an addi, but that seems unnecessary when we can simply change the logic in RISCVMatInt. As pointed out by @topperc during review, making this change without modifying RISCVOptWInstrs risks introducing some cases where we fail to remove a sext.w that we removed before. I've incorporated a patch based on a suggestion from Craig that avoids it, and also adds appropriate RISCVOptWInstrs test cases. The initial patch description noted that the main motivation was to avoid unnecessary differences both for RV32/RV64 and when comparing GCC, but noted that very occasionally we see a benefit from memory offset folding kicking in when it didn't before. Looking at the dynamic instruction count difference for SPEC benchmarks targeting rva22u64 and it shows we actually get a meaningful ~4.3% reduction in dynamic icount for 519.lbm_r. Looking at the data more closely, the codegen difference is in `LBM_performStreamCollideTRT` which as a function accounts for ~98% for dynamically executed instructions and the codegen diffs appear to be a knock-on effect of the address merging reducing register pressure right from function entry (for instance, we get a big reduction in dynamically executed loads in that function). Below is the icount data (rva22u64 -O3, no LTO): ``` Benchmark Baseline This PR Diff (%) ============================================================ 500.perlbench_r 174116601991 174115795810 -0.00% 502.gcc_r 218903280858 218903215788 -0.00% 505.mcf_r 131208029185 131207692803 -0.00% 508.namd_r 217497594322 217497594297 -0.00% 510.parest_r 289314486153 289313577652 -0.00% 511.povray_r 30640531048 30640765701 0.00% 519.lbm_r 95897914862 91712688050 -4.36% 520.omnetpp_r 134641549722 134867015683 0.17% 523.xalancbmk_r 281462762992 281432092673 -0.01% 525.x264_r 379776121941 379535558210 -0.06% 526.blender_r 659736022025 659738387343 0.00% 531.deepsjeng_r 349122867552 349122867481 -0.00% 538.imagick_r 238558760552 238558753269 -0.00% 541.leela_r 406578560612 406385135260 -0.05% 544.nab_r 400997131674 400996765827 -0.00% 557.xz_r 130079522194 129945515709 -0.10% ``` The instcounting setup I use doesn't have good support for drilling down into functions from outside the linked executable (e.g. libc). The difference in omnetpp all seems to come from there, and does not reflect any degradation in codegen quality. I can confirm with the current version of the PR there is no change in the number of static sext.w across all the SPEC 2017 benchmarks (rva22u64 O3) Co-authored-by: Craig Topper <craig.topper@sifive.com>
1059 lines
35 KiB
LLVM
1059 lines
35 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=riscv64 -verify-machineinstrs -filetype=obj < %s \
|
|
; RUN: -o /dev/null 2>&1
|
|
; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs \
|
|
; RUN: -filetype=obj < %s -o /dev/null 2>&1
|
|
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s
|
|
; RUN: llc -mtriple=riscv64 -relocation-model=pic -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s
|
|
|
|
define void @relax_bcc(i1 %a) nounwind {
|
|
; CHECK-LABEL: relax_bcc:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: andi a0, a0, 1
|
|
; CHECK-NEXT: bnez a0, .LBB0_1
|
|
; CHECK-NEXT: j .LBB0_2
|
|
; CHECK-NEXT: .LBB0_1: # %iftrue
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: .zero 4096
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: .LBB0_2: # %tail
|
|
; CHECK-NEXT: ret
|
|
br i1 %a, label %iftrue, label %tail
|
|
|
|
iftrue:
|
|
call void asm sideeffect ".space 4096", ""()
|
|
br label %tail
|
|
|
|
tail:
|
|
ret void
|
|
}
|
|
|
|
define i32 @relax_jal(i1 %a) nounwind {
|
|
; CHECK-LABEL: relax_jal:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: addi sp, sp, -16
|
|
; CHECK-NEXT: andi a0, a0, 1
|
|
; CHECK-NEXT: bnez a0, .LBB1_1
|
|
; CHECK-NEXT: # %bb.4:
|
|
; CHECK-NEXT: jump .LBB1_2, a0
|
|
; CHECK-NEXT: .LBB1_1: # %iftrue
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: .zero 1048576
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: j .LBB1_3
|
|
; CHECK-NEXT: .LBB1_2: # %jmp
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: .LBB1_3: # %tail
|
|
; CHECK-NEXT: li a0, 1
|
|
; CHECK-NEXT: addi sp, sp, 16
|
|
; CHECK-NEXT: ret
|
|
br i1 %a, label %iftrue, label %jmp
|
|
|
|
jmp:
|
|
call void asm sideeffect "", ""()
|
|
br label %tail
|
|
|
|
iftrue:
|
|
call void asm sideeffect "", ""()
|
|
br label %space
|
|
|
|
space:
|
|
call void asm sideeffect ".space 1048576", ""()
|
|
br label %tail
|
|
|
|
tail:
|
|
ret i32 1
|
|
}
|
|
|
|
|
|
define void @relax_jal_spill_64() {
|
|
;
|
|
; CHECK-LABEL: relax_jal_spill_64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: addi sp, sp, -112
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 112
|
|
; CHECK-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s0, 96(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s1, 88(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s2, 80(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s3, 72(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s4, 64(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s5, 56(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s6, 48(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s7, 40(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s8, 32(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s9, 24(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s10, 16(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s11, 8(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_offset ra, -8
|
|
; CHECK-NEXT: .cfi_offset s0, -16
|
|
; CHECK-NEXT: .cfi_offset s1, -24
|
|
; CHECK-NEXT: .cfi_offset s2, -32
|
|
; CHECK-NEXT: .cfi_offset s3, -40
|
|
; CHECK-NEXT: .cfi_offset s4, -48
|
|
; CHECK-NEXT: .cfi_offset s5, -56
|
|
; CHECK-NEXT: .cfi_offset s6, -64
|
|
; CHECK-NEXT: .cfi_offset s7, -72
|
|
; CHECK-NEXT: .cfi_offset s8, -80
|
|
; CHECK-NEXT: .cfi_offset s9, -88
|
|
; CHECK-NEXT: .cfi_offset s10, -96
|
|
; CHECK-NEXT: .cfi_offset s11, -104
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li ra, 1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t0, 5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t1, 6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t2, 7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s0, 8
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s1, 9
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a0, 10
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a1, 11
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a2, 12
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a3, 13
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a4, 14
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a5, 15
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a6, 16
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a7, 17
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s2, 18
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s3, 19
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s4, 20
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s5, 21
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s6, 22
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s7, 23
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s8, 24
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s9, 25
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s10, 26
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s11, 27
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t3, 28
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t4, 29
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t5, 30
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t6, 31
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: beq t5, t6, .LBB2_1
|
|
; CHECK-NEXT: # %bb.3:
|
|
; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: jump .LBB2_4, s11
|
|
; CHECK-NEXT: .LBB2_1: # %branch_1
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: .zero 1048576
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: j .LBB2_2
|
|
; CHECK-NEXT: .LBB2_4: # %branch_2
|
|
; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: .LBB2_2: # %branch_2
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use ra
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s8
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s9
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s10
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s11
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s0, 96(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s1, 88(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s2, 80(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s3, 72(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s4, 64(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s5, 56(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s6, 48(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s7, 40(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s8, 32(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s9, 24(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s10, 16(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s11, 8(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: .cfi_restore ra
|
|
; CHECK-NEXT: .cfi_restore s0
|
|
; CHECK-NEXT: .cfi_restore s1
|
|
; CHECK-NEXT: .cfi_restore s2
|
|
; CHECK-NEXT: .cfi_restore s3
|
|
; CHECK-NEXT: .cfi_restore s4
|
|
; CHECK-NEXT: .cfi_restore s5
|
|
; CHECK-NEXT: .cfi_restore s6
|
|
; CHECK-NEXT: .cfi_restore s7
|
|
; CHECK-NEXT: .cfi_restore s8
|
|
; CHECK-NEXT: .cfi_restore s9
|
|
; CHECK-NEXT: .cfi_restore s10
|
|
; CHECK-NEXT: .cfi_restore s11
|
|
; CHECK-NEXT: addi sp, sp, 112
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-NEXT: ret
|
|
%ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
|
|
%t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
|
|
%t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
|
|
%t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
|
|
%s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
|
|
%s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
|
|
%a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
|
|
%a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
|
|
%a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
|
|
%a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
|
|
%a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
|
|
%a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
|
|
%a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
|
|
%a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
|
|
%s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
|
|
%s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
|
|
%s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
|
|
%s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
|
|
%s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
|
|
%s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
|
|
%s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
|
|
%s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
|
|
%s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
|
|
%s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
|
|
%t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
|
|
%t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
|
|
%t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
|
|
%t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
|
|
|
|
%cmp = icmp eq i64 %t5, %t6
|
|
br i1 %cmp, label %branch_1, label %branch_2
|
|
|
|
branch_1:
|
|
call void asm sideeffect ".space 1048576", ""()
|
|
br label %branch_2
|
|
|
|
branch_2:
|
|
call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
|
|
call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
|
|
call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
|
|
call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
|
|
call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
|
|
call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
|
|
call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
|
|
call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
|
|
call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
|
|
call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
|
|
call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
|
|
call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
|
|
call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
|
|
call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
|
|
call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
|
|
call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
|
|
call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
|
|
call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
|
|
call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
|
|
call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
|
|
call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
|
|
call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
|
|
call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
|
|
call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
|
|
call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
|
|
call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
|
|
call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
|
|
call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
|
|
|
|
ret void
|
|
}
|
|
|
|
define void @relax_jal_spill_64_adjust_spill_slot() {
|
|
;
|
|
; If the stack is large and the offset of BranchRelaxationScratchFrameIndex
|
|
; is out the range of 12-bit signed integer, check whether the spill slot is
|
|
; adjusted to close to the stack base register.
|
|
; CHECK-LABEL: relax_jal_spill_64_adjust_spill_slot:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: addi sp, sp, -2032
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 2032
|
|
; CHECK-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s1, 2008(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s2, 2000(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s3, 1992(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s4, 1984(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s5, 1976(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s6, 1968(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s7, 1960(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s8, 1952(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s9, 1944(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s10, 1936(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s11, 1928(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_offset ra, -8
|
|
; CHECK-NEXT: .cfi_offset s0, -16
|
|
; CHECK-NEXT: .cfi_offset s1, -24
|
|
; CHECK-NEXT: .cfi_offset s2, -32
|
|
; CHECK-NEXT: .cfi_offset s3, -40
|
|
; CHECK-NEXT: .cfi_offset s4, -48
|
|
; CHECK-NEXT: .cfi_offset s5, -56
|
|
; CHECK-NEXT: .cfi_offset s6, -64
|
|
; CHECK-NEXT: .cfi_offset s7, -72
|
|
; CHECK-NEXT: .cfi_offset s8, -80
|
|
; CHECK-NEXT: .cfi_offset s9, -88
|
|
; CHECK-NEXT: .cfi_offset s10, -96
|
|
; CHECK-NEXT: .cfi_offset s11, -104
|
|
; CHECK-NEXT: addi s0, sp, 2032
|
|
; CHECK-NEXT: .cfi_def_cfa s0, 0
|
|
; CHECK-NEXT: lui a0, 2
|
|
; CHECK-NEXT: addi a0, a0, -2032
|
|
; CHECK-NEXT: sub sp, sp, a0
|
|
; CHECK-NEXT: srli a0, sp, 12
|
|
; CHECK-NEXT: slli sp, a0, 12
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li ra, 1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t0, 5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t1, 6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t2, 7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s0, 8
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s1, 9
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a0, 10
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a1, 11
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a2, 12
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a3, 13
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a4, 14
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a5, 15
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a6, 16
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a7, 17
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s2, 18
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s3, 19
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s4, 20
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s5, 21
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s6, 22
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s7, 23
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s8, 24
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s9, 25
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s10, 26
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s11, 27
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t3, 28
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t4, 29
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t5, 30
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t6, 31
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: beq t5, t6, .LBB3_1
|
|
; CHECK-NEXT: # %bb.3:
|
|
; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: jump .LBB3_4, s11
|
|
; CHECK-NEXT: .LBB3_1: # %branch_1
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: .zero 1048576
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: j .LBB3_2
|
|
; CHECK-NEXT: .LBB3_4: # %branch_2
|
|
; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: .LBB3_2: # %branch_2
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use ra
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s8
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s9
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s10
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s11
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: addi sp, s0, -2032
|
|
; CHECK-NEXT: .cfi_def_cfa sp, 2032
|
|
; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s1, 2008(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s2, 2000(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s3, 1992(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s4, 1984(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s5, 1976(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s6, 1968(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s7, 1960(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s8, 1952(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s9, 1944(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s10, 1936(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s11, 1928(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: .cfi_restore ra
|
|
; CHECK-NEXT: .cfi_restore s0
|
|
; CHECK-NEXT: .cfi_restore s1
|
|
; CHECK-NEXT: .cfi_restore s2
|
|
; CHECK-NEXT: .cfi_restore s3
|
|
; CHECK-NEXT: .cfi_restore s4
|
|
; CHECK-NEXT: .cfi_restore s5
|
|
; CHECK-NEXT: .cfi_restore s6
|
|
; CHECK-NEXT: .cfi_restore s7
|
|
; CHECK-NEXT: .cfi_restore s8
|
|
; CHECK-NEXT: .cfi_restore s9
|
|
; CHECK-NEXT: .cfi_restore s10
|
|
; CHECK-NEXT: .cfi_restore s11
|
|
; CHECK-NEXT: addi sp, sp, 2032
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-NEXT: ret
|
|
%stack_obj = alloca i64, align 4096
|
|
|
|
%ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
|
|
%t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
|
|
%t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
|
|
%t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
|
|
%s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
|
|
%s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
|
|
%a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
|
|
%a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
|
|
%a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
|
|
%a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
|
|
%a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
|
|
%a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
|
|
%a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
|
|
%a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
|
|
%s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
|
|
%s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
|
|
%s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
|
|
%s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
|
|
%s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
|
|
%s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
|
|
%s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
|
|
%s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
|
|
%s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
|
|
%s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
|
|
%t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
|
|
%t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
|
|
%t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
|
|
%t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
|
|
|
|
%cmp = icmp eq i64 %t5, %t6
|
|
br i1 %cmp, label %branch_1, label %branch_2
|
|
|
|
branch_1:
|
|
call void asm sideeffect ".space 1048576", ""()
|
|
br label %branch_2
|
|
|
|
branch_2:
|
|
call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
|
|
call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
|
|
call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
|
|
call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
|
|
call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
|
|
call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
|
|
call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
|
|
call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
|
|
call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
|
|
call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
|
|
call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
|
|
call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
|
|
call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
|
|
call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
|
|
call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
|
|
call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
|
|
call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
|
|
call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
|
|
call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
|
|
call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
|
|
call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
|
|
call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
|
|
call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
|
|
call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
|
|
call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
|
|
call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
|
|
call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
|
|
call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
|
|
|
|
ret void
|
|
}
|
|
|
|
define void @relax_jal_spill_64_restore_block_correspondence() {
|
|
;
|
|
; CHECK-LABEL: relax_jal_spill_64_restore_block_correspondence:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: addi sp, sp, -112
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 112
|
|
; CHECK-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s0, 96(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s1, 88(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s2, 80(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s3, 72(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s4, 64(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s5, 56(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s6, 48(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s7, 40(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s8, 32(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s9, 24(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s10, 16(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: sd s11, 8(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: .cfi_offset ra, -8
|
|
; CHECK-NEXT: .cfi_offset s0, -16
|
|
; CHECK-NEXT: .cfi_offset s1, -24
|
|
; CHECK-NEXT: .cfi_offset s2, -32
|
|
; CHECK-NEXT: .cfi_offset s3, -40
|
|
; CHECK-NEXT: .cfi_offset s4, -48
|
|
; CHECK-NEXT: .cfi_offset s5, -56
|
|
; CHECK-NEXT: .cfi_offset s6, -64
|
|
; CHECK-NEXT: .cfi_offset s7, -72
|
|
; CHECK-NEXT: .cfi_offset s8, -80
|
|
; CHECK-NEXT: .cfi_offset s9, -88
|
|
; CHECK-NEXT: .cfi_offset s10, -96
|
|
; CHECK-NEXT: .cfi_offset s11, -104
|
|
; CHECK-NEXT: .cfi_remember_state
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li ra, 1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t0, 5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t1, 6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t2, 7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s0, 8
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s1, 9
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a0, 10
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a1, 11
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a2, 12
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a3, 13
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a4, 14
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a5, 15
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a6, 16
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li a7, 17
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s2, 18
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s3, 19
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s4, 20
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s5, 21
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s6, 22
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s7, 23
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s8, 24
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s9, 25
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s10, 26
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li s11, 27
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t3, 28
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t4, 29
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t5, 30
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: li t6, 31
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: bne t5, t6, .LBB4_2
|
|
; CHECK-NEXT: j .LBB4_1
|
|
; CHECK-NEXT: .LBB4_8: # %dest_1
|
|
; CHECK-NEXT: ld s11, 0(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: .LBB4_1: # %dest_1
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # dest 1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: j .LBB4_3
|
|
; CHECK-NEXT: .LBB4_2: # %cond_2
|
|
; CHECK-NEXT: bne t3, t4, .LBB4_5
|
|
; CHECK-NEXT: .LBB4_3: # %dest_2
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # dest 2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: .LBB4_4: # %dest_3
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # dest 3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use ra
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a0
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a1
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use a7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s2
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s7
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s8
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s9
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s10
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use s11
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t3
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t4
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t5
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: # reg use t6
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s0, 96(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s1, 88(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s2, 80(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s3, 72(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s4, 64(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s5, 56(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s6, 48(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s7, 40(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s8, 32(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s9, 24(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s10, 16(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: ld s11, 8(sp) # 8-byte Folded Reload
|
|
; CHECK-NEXT: .cfi_restore ra
|
|
; CHECK-NEXT: .cfi_restore s0
|
|
; CHECK-NEXT: .cfi_restore s1
|
|
; CHECK-NEXT: .cfi_restore s2
|
|
; CHECK-NEXT: .cfi_restore s3
|
|
; CHECK-NEXT: .cfi_restore s4
|
|
; CHECK-NEXT: .cfi_restore s5
|
|
; CHECK-NEXT: .cfi_restore s6
|
|
; CHECK-NEXT: .cfi_restore s7
|
|
; CHECK-NEXT: .cfi_restore s8
|
|
; CHECK-NEXT: .cfi_restore s9
|
|
; CHECK-NEXT: .cfi_restore s10
|
|
; CHECK-NEXT: .cfi_restore s11
|
|
; CHECK-NEXT: addi sp, sp, 112
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 0
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB4_5: # %cond_3
|
|
; CHECK-NEXT: .cfi_restore_state
|
|
; CHECK-NEXT: beq t1, t2, .LBB4_4
|
|
; CHECK-NEXT: # %bb.6: # %space
|
|
; CHECK-NEXT: #APP
|
|
; CHECK-NEXT: .zero 1048576
|
|
; CHECK-NEXT: #NO_APP
|
|
; CHECK-NEXT: # %bb.7: # %space
|
|
; CHECK-NEXT: sd s11, 0(sp) # 8-byte Folded Spill
|
|
; CHECK-NEXT: jump .LBB4_8, s11
|
|
entry:
|
|
%ra = call i64 asm sideeffect "addi ra, x0, 1", "={ra}"()
|
|
%t0 = call i64 asm sideeffect "addi t0, x0, 5", "={t0}"()
|
|
%t1 = call i64 asm sideeffect "addi t1, x0, 6", "={t1}"()
|
|
%t2 = call i64 asm sideeffect "addi t2, x0, 7", "={t2}"()
|
|
%s0 = call i64 asm sideeffect "addi s0, x0, 8", "={s0}"()
|
|
%s1 = call i64 asm sideeffect "addi s1, x0, 9", "={s1}"()
|
|
%a0 = call i64 asm sideeffect "addi a0, x0, 10", "={a0}"()
|
|
%a1 = call i64 asm sideeffect "addi a1, x0, 11", "={a1}"()
|
|
%a2 = call i64 asm sideeffect "addi a2, x0, 12", "={a2}"()
|
|
%a3 = call i64 asm sideeffect "addi a3, x0, 13", "={a3}"()
|
|
%a4 = call i64 asm sideeffect "addi a4, x0, 14", "={a4}"()
|
|
%a5 = call i64 asm sideeffect "addi a5, x0, 15", "={a5}"()
|
|
%a6 = call i64 asm sideeffect "addi a6, x0, 16", "={a6}"()
|
|
%a7 = call i64 asm sideeffect "addi a7, x0, 17", "={a7}"()
|
|
%s2 = call i64 asm sideeffect "addi s2, x0, 18", "={s2}"()
|
|
%s3 = call i64 asm sideeffect "addi s3, x0, 19", "={s3}"()
|
|
%s4 = call i64 asm sideeffect "addi s4, x0, 20", "={s4}"()
|
|
%s5 = call i64 asm sideeffect "addi s5, x0, 21", "={s5}"()
|
|
%s6 = call i64 asm sideeffect "addi s6, x0, 22", "={s6}"()
|
|
%s7 = call i64 asm sideeffect "addi s7, x0, 23", "={s7}"()
|
|
%s8 = call i64 asm sideeffect "addi s8, x0, 24", "={s8}"()
|
|
%s9 = call i64 asm sideeffect "addi s9, x0, 25", "={s9}"()
|
|
%s10 = call i64 asm sideeffect "addi s10, x0, 26", "={s10}"()
|
|
%s11 = call i64 asm sideeffect "addi s11, x0, 27", "={s11}"()
|
|
%t3 = call i64 asm sideeffect "addi t3, x0, 28", "={t3}"()
|
|
%t4 = call i64 asm sideeffect "addi t4, x0, 29", "={t4}"()
|
|
%t5 = call i64 asm sideeffect "addi t5, x0, 30", "={t5}"()
|
|
%t6 = call i64 asm sideeffect "addi t6, x0, 31", "={t6}"()
|
|
|
|
br label %cond_1
|
|
|
|
cond_1:
|
|
%cmp1 = icmp eq i64 %t5, %t6
|
|
br i1 %cmp1, label %dest_1, label %cond_2
|
|
|
|
cond_2:
|
|
%cmp2 = icmp eq i64 %t3, %t4
|
|
br i1 %cmp2, label %dest_2, label %cond_3
|
|
|
|
cond_3:
|
|
%cmp3 = icmp eq i64 %t1, %t2
|
|
br i1 %cmp3, label %dest_3, label %space
|
|
|
|
space:
|
|
call void asm sideeffect ".space 1048576", ""()
|
|
br label %dest_1
|
|
|
|
dest_1:
|
|
call void asm sideeffect "# dest 1", ""()
|
|
br label %dest_2
|
|
|
|
dest_2:
|
|
call void asm sideeffect "# dest 2", ""()
|
|
br label %dest_3
|
|
|
|
dest_3:
|
|
call void asm sideeffect "# dest 3", ""()
|
|
br label %tail
|
|
|
|
tail:
|
|
call void asm sideeffect "# reg use $0", "{ra}"(i64 %ra)
|
|
call void asm sideeffect "# reg use $0", "{t0}"(i64 %t0)
|
|
call void asm sideeffect "# reg use $0", "{t1}"(i64 %t1)
|
|
call void asm sideeffect "# reg use $0", "{t2}"(i64 %t2)
|
|
call void asm sideeffect "# reg use $0", "{s0}"(i64 %s0)
|
|
call void asm sideeffect "# reg use $0", "{s1}"(i64 %s1)
|
|
call void asm sideeffect "# reg use $0", "{a0}"(i64 %a0)
|
|
call void asm sideeffect "# reg use $0", "{a1}"(i64 %a1)
|
|
call void asm sideeffect "# reg use $0", "{a2}"(i64 %a2)
|
|
call void asm sideeffect "# reg use $0", "{a3}"(i64 %a3)
|
|
call void asm sideeffect "# reg use $0", "{a4}"(i64 %a4)
|
|
call void asm sideeffect "# reg use $0", "{a5}"(i64 %a5)
|
|
call void asm sideeffect "# reg use $0", "{a6}"(i64 %a6)
|
|
call void asm sideeffect "# reg use $0", "{a7}"(i64 %a7)
|
|
call void asm sideeffect "# reg use $0", "{s2}"(i64 %s2)
|
|
call void asm sideeffect "# reg use $0", "{s3}"(i64 %s3)
|
|
call void asm sideeffect "# reg use $0", "{s4}"(i64 %s4)
|
|
call void asm sideeffect "# reg use $0", "{s5}"(i64 %s5)
|
|
call void asm sideeffect "# reg use $0", "{s6}"(i64 %s6)
|
|
call void asm sideeffect "# reg use $0", "{s7}"(i64 %s7)
|
|
call void asm sideeffect "# reg use $0", "{s8}"(i64 %s8)
|
|
call void asm sideeffect "# reg use $0", "{s9}"(i64 %s9)
|
|
call void asm sideeffect "# reg use $0", "{s10}"(i64 %s10)
|
|
call void asm sideeffect "# reg use $0", "{s11}"(i64 %s11)
|
|
call void asm sideeffect "# reg use $0", "{t3}"(i64 %t3)
|
|
call void asm sideeffect "# reg use $0", "{t4}"(i64 %t4)
|
|
call void asm sideeffect "# reg use $0", "{t5}"(i64 %t5)
|
|
call void asm sideeffect "# reg use $0", "{t6}"(i64 %t6)
|
|
|
|
ret void
|
|
}
|