[LoongArch] Strengthen stack size estimation for LSX/LASX extension (#146455)

This patch adds an emergency spill slot when ran out of registers.
PR #139201 introduces `vstelm` instructions with only 8-bit imm offset, 
it causes no spill slot to store the spill registers.
This commit is contained in:
tangaac 2025-07-18 16:12:11 +08:00 committed by GitHub
parent efedd49a22
commit 64a0478e08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 799 additions and 432 deletions

View File

@ -158,7 +158,12 @@ void LoongArchFrameLowering::processFunctionBeforeFrameFinalized(
// estimateStackSize has been observed to under-estimate the final stack
// size, so give ourselves wiggle-room by checking for stack size
// representable an 11-bit signed field rather than 12-bits.
if (!isInt<11>(MFI.estimateStackSize(MF)))
// For [x]vstelm.{b/h/w/d} memory instructions with 8 imm offset, 7-bit
// signed field is fine.
unsigned EstimateStackSize = MFI.estimateStackSize(MF);
if (!isInt<11>(EstimateStackSize) ||
(MF.getSubtarget<LoongArchSubtarget>().hasExtLSX() &&
!isInt<7>(EstimateStackSize)))
ScavSlotsNum = std::max(ScavSlotsNum, 1u);
// For CFR spill.

View File

@ -122,23 +122,23 @@ define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind {
define i64 @caller_large_scalars() nounwind {
; CHECK-LABEL: caller_large_scalars:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -80
; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; CHECK-NEXT: st.d $zero, $sp, 24
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $zero, $sp, 40
; CHECK-NEXT: vrepli.b $vr0, 0
; CHECK-NEXT: vst $vr0, $sp, 8
; CHECK-NEXT: vst $vr0, $sp, 24
; CHECK-NEXT: ori $a0, $zero, 2
; CHECK-NEXT: st.d $a0, $sp, 0
; CHECK-NEXT: st.d $zero, $sp, 56
; CHECK-NEXT: vst $vr0, $sp, 40
; CHECK-NEXT: st.d $a0, $sp, 16
; CHECK-NEXT: st.d $zero, $sp, 72
; CHECK-NEXT: vst $vr0, $sp, 56
; CHECK-NEXT: ori $a2, $zero, 1
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: addi.d $a1, $sp, 0
; CHECK-NEXT: st.d $a2, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 48
; CHECK-NEXT: addi.d $a1, $sp, 16
; CHECK-NEXT: st.d $a2, $sp, 48
; CHECK-NEXT: pcaddu18i $ra, %call36(callee_large_scalars)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 80
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%1 = call i64 @callee_large_scalars(i256 1, i256 2)
ret i64 %1
@ -177,20 +177,20 @@ define i64 @callee_large_scalars_exhausted_regs(i64 %a, i64 %b, i64 %c, i64 %d,
define i64 @caller_large_scalars_exhausted_regs() nounwind {
; CHECK-LABEL: caller_large_scalars_exhausted_regs:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $a0, $sp, 16
; CHECK-NEXT: addi.d $sp, $sp, -112
; CHECK-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: st.d $a0, $sp, 8
; CHECK-NEXT: ori $a0, $zero, 9
; CHECK-NEXT: st.d $a0, $sp, 0
; CHECK-NEXT: st.d $zero, $sp, 40
; CHECK-NEXT: st.d $zero, $sp, 56
; CHECK-NEXT: vrepli.b $vr0, 0
; CHECK-NEXT: vst $vr0, $sp, 24
; CHECK-NEXT: vst $vr0, $sp, 40
; CHECK-NEXT: ori $a0, $zero, 10
; CHECK-NEXT: st.d $a0, $sp, 16
; CHECK-NEXT: st.d $zero, $sp, 72
; CHECK-NEXT: st.d $a0, $sp, 32
; CHECK-NEXT: st.d $zero, $sp, 88
; CHECK-NEXT: ori $a0, $zero, 8
; CHECK-NEXT: st.d $a0, $sp, 48
; CHECK-NEXT: st.d $a0, $sp, 64
; CHECK-NEXT: ori $a0, $zero, 1
; CHECK-NEXT: ori $a1, $zero, 2
; CHECK-NEXT: ori $a2, $zero, 3
@ -198,12 +198,12 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind {
; CHECK-NEXT: ori $a4, $zero, 5
; CHECK-NEXT: ori $a5, $zero, 6
; CHECK-NEXT: ori $a6, $zero, 7
; CHECK-NEXT: addi.d $a7, $sp, 48
; CHECK-NEXT: vst $vr0, $sp, 56
; CHECK-NEXT: addi.d $a7, $sp, 64
; CHECK-NEXT: vst $vr0, $sp, 72
; CHECK-NEXT: pcaddu18i $ra, %call36(callee_large_scalars_exhausted_regs)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 112
; CHECK-NEXT: ret
%1 = call i64 @callee_large_scalars_exhausted_regs(
i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i256 8, i64 9,

View File

@ -1252,8 +1252,8 @@ define i32 @caller_half_on_stack() nounwind {
;
; LA64F-LP64S-LABEL: caller_half_on_stack:
; LA64F-LP64S: # %bb.0:
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -80
; LA64F-LP64S-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; LA64F-LP64S-NEXT: addi.d $sp, $sp, -96
; LA64F-LP64S-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; LA64F-LP64S-NEXT: lu12i.w $a0, -12
; LA64F-LP64S-NEXT: ori $a1, $a0, 3200
; LA64F-LP64S-NEXT: lu32i.d $a1, 0
@ -1292,8 +1292,8 @@ define i32 @caller_half_on_stack() nounwind {
; LA64F-LP64S-NEXT: st.w $t0, $sp, 0
; LA64F-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_on_stack)
; LA64F-LP64S-NEXT: jirl $ra, $ra, 0
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 80
; LA64F-LP64S-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; LA64F-LP64S-NEXT: addi.d $sp, $sp, 96
; LA64F-LP64S-NEXT: ret
;
; LA64F-LP64D-LABEL: caller_half_on_stack:
@ -1336,8 +1336,8 @@ define i32 @caller_half_on_stack() nounwind {
;
; LA64D-LP64S-LABEL: caller_half_on_stack:
; LA64D-LP64S: # %bb.0:
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -80
; LA64D-LP64S-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; LA64D-LP64S-NEXT: addi.d $sp, $sp, -96
; LA64D-LP64S-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; LA64D-LP64S-NEXT: lu12i.w $a0, -12
; LA64D-LP64S-NEXT: ori $a1, $a0, 3200
; LA64D-LP64S-NEXT: lu32i.d $a1, 0
@ -1376,8 +1376,8 @@ define i32 @caller_half_on_stack() nounwind {
; LA64D-LP64S-NEXT: st.w $t0, $sp, 0
; LA64D-LP64S-NEXT: pcaddu18i $ra, %call36(callee_half_on_stack)
; LA64D-LP64S-NEXT: jirl $ra, $ra, 0
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 80
; LA64D-LP64S-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; LA64D-LP64S-NEXT: addi.d $sp, $sp, 96
; LA64D-LP64S-NEXT: ret
;
; LA64D-LP64D-LABEL: caller_half_on_stack:

View File

@ -14,41 +14,41 @@
define dso_local noundef signext i32 @main() nounwind {
; CHECK-LABEL: main:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi.d $sp, $sp, -272
; CHECK-NEXT: st.d $ra, $sp, 264 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $sp, $sp, -288
; CHECK-NEXT: st.d $ra, $sp, 280 # 8-byte Folded Spill
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_0)
; CHECK-NEXT: xvld $xr0, $a0, %pc_lo12(.LCPI0_0)
; CHECK-NEXT: xvst $xr0, $sp, 96 # 32-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 112 # 32-byte Folded Spill
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_1)
; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI0_1)
; CHECK-NEXT: xvst $xr1, $sp, 64 # 32-byte Folded Spill
; CHECK-NEXT: xvst $xr1, $sp, 80 # 32-byte Folded Spill
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_2)
; CHECK-NEXT: xvld $xr2, $a0, %pc_lo12(.LCPI0_2)
; CHECK-NEXT: xvst $xr2, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvst $xr2, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI0_3)
; CHECK-NEXT: xvld $xr3, $a0, %pc_lo12(.LCPI0_3)
; CHECK-NEXT: xvst $xr3, $sp, 0 # 32-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 136
; CHECK-NEXT: xvst $xr1, $sp, 168
; CHECK-NEXT: xvst $xr2, $sp, 200
; CHECK-NEXT: xvst $xr3, $sp, 232
; CHECK-NEXT: addi.d $a0, $sp, 136
; CHECK-NEXT: xvst $xr3, $sp, 16 # 32-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 152
; CHECK-NEXT: xvst $xr1, $sp, 184
; CHECK-NEXT: xvst $xr2, $sp, 216
; CHECK-NEXT: xvst $xr3, $sp, 248
; CHECK-NEXT: addi.d $a0, $sp, 152
; CHECK-NEXT: pcaddu18i $ra, %call36(foo)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: xvld $xr0, $sp, 96 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 136
; CHECK-NEXT: xvld $xr0, $sp, 64 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 168
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 200
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 232
; CHECK-NEXT: addi.d $a0, $sp, 136
; CHECK-NEXT: xvld $xr0, $sp, 112 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 152
; CHECK-NEXT: xvld $xr0, $sp, 80 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 184
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 216
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 248
; CHECK-NEXT: addi.d $a0, $sp, 152
; CHECK-NEXT: pcaddu18i $ra, %call36(bar)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: move $a0, $zero
; CHECK-NEXT: ld.d $ra, $sp, 264 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 272
; CHECK-NEXT: ld.d $ra, $sp, 280 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 288
; CHECK-NEXT: ret
entry:
%s = alloca %struct.S, align 2

View File

@ -28,12 +28,12 @@ define void @func() {
; CHECK-NEXT: ld.w $a3, $a1, 0
; CHECK-NEXT: ld.w $a2, $a1, 0
; CHECK-NEXT: ld.w $a0, $a1, 0
; CHECK-NEXT: st.d $fp, $sp, 0
; CHECK-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
; CHECK-NEXT: lu12i.w $fp, 1
; CHECK-NEXT: ori $fp, $fp, 12
; CHECK-NEXT: add.d $fp, $sp, $fp
; CHECK-NEXT: st.w $t8, $fp, 0
; CHECK-NEXT: ld.d $fp, $sp, 0
; CHECK-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
; CHECK-NEXT: st.w $t8, $a1, 0
; CHECK-NEXT: st.w $t7, $a1, 0
; CHECK-NEXT: st.w $t6, $a1, 0

View File

@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc --mtriple=loongarch64 -mattr=+d,-lsx < %s | FileCheck %s --check-prefixes=CHECK,NOLSX
; RUN: llc --mtriple=loongarch64 -mattr=+d,+lsx < %s | FileCheck %s --check-prefixes=CHECK,LSX
%struct.key_t = type { i32, [16 x i8] }
@ -7,20 +8,35 @@ declare void @llvm.memset.p0.i64(ptr, i8, i64, i1)
declare void @test1(ptr)
define i32 @test() nounwind {
; CHECK-LABEL: test:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -32
; CHECK-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; CHECK-NEXT: st.w $zero, $sp, 16
; CHECK-NEXT: vrepli.b $vr0, 0
; CHECK-NEXT: vst $vr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 4
; CHECK-NEXT: pcaddu18i $ra, %call36(test1)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: move $a0, $zero
; CHECK-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 32
; CHECK-NEXT: ret
; NOLSX-LABEL: test:
; NOLSX: # %bb.0:
; NOLSX-NEXT: addi.d $sp, $sp, -32
; NOLSX-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; NOLSX-NEXT: st.w $zero, $sp, 16
; NOLSX-NEXT: st.d $zero, $sp, 8
; NOLSX-NEXT: st.d $zero, $sp, 0
; NOLSX-NEXT: addi.d $a0, $sp, 4
; NOLSX-NEXT: pcaddu18i $ra, %call36(test1)
; NOLSX-NEXT: jirl $ra, $ra, 0
; NOLSX-NEXT: move $a0, $zero
; NOLSX-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; NOLSX-NEXT: addi.d $sp, $sp, 32
; NOLSX-NEXT: ret
;
; LSX-LABEL: test:
; LSX: # %bb.0:
; LSX-NEXT: addi.d $sp, $sp, -32
; LSX-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LSX-NEXT: st.w $zero, $sp, 16
; LSX-NEXT: vrepli.b $vr0, 0
; LSX-NEXT: vst $vr0, $sp, 0
; LSX-NEXT: addi.d $a0, $sp, 4
; LSX-NEXT: pcaddu18i $ra, %call36(test1)
; LSX-NEXT: jirl $ra, $ra, 0
; LSX-NEXT: move $a0, $zero
; LSX-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LSX-NEXT: addi.d $sp, $sp, 32
; LSX-NEXT: ret
%key = alloca %struct.key_t, align 4
call void @llvm.memset.p0.i64(ptr %key, i8 0, i64 20, i1 false)
%1 = getelementptr inbounds %struct.key_t, ptr %key, i64 0, i32 1, i64 0
@ -98,3 +114,62 @@ define void @test_large_frame_size_1234576() "frame-pointer"="all" {
%1 = alloca i8, i32 1234567
ret void
}
;; Note: will create an emergency spill slot, if (!isInt<7>(StackSize)).
;; Should involve only one SP-adjusting addi per adjustment.
;; LSX 112 + 16(emergency solt) = 128
define void @test_frame_size_112() {
; NOLSX-LABEL: test_frame_size_112:
; NOLSX: # %bb.0:
; NOLSX-NEXT: addi.d $sp, $sp, -112
; NOLSX-NEXT: .cfi_def_cfa_offset 112
; NOLSX-NEXT: addi.d $sp, $sp, 112
; NOLSX-NEXT: ret
;
; LSX-LABEL: test_frame_size_112:
; LSX: # %bb.0:
; LSX-NEXT: addi.d $sp, $sp, -128
; LSX-NEXT: .cfi_def_cfa_offset 128
; LSX-NEXT: addi.d $sp, $sp, 128
; LSX-NEXT: ret
%1 = alloca i8, i32 112
ret void
}
;; LSX 128 + 16(emergency solt) = 144
define void @test_frame_size_128() {
; NOLSX-LABEL: test_frame_size_128:
; NOLSX: # %bb.0:
; NOLSX-NEXT: addi.d $sp, $sp, -128
; NOLSX-NEXT: .cfi_def_cfa_offset 128
; NOLSX-NEXT: addi.d $sp, $sp, 128
; NOLSX-NEXT: ret
;
; LSX-LABEL: test_frame_size_128:
; LSX: # %bb.0:
; LSX-NEXT: addi.d $sp, $sp, -144
; LSX-NEXT: .cfi_def_cfa_offset 144
; LSX-NEXT: addi.d $sp, $sp, 144
; LSX-NEXT: ret
%1 = alloca i8, i32 128
ret void
}
;; LSX 144 + 16(emergency solt) = 160
define void @test_frame_size_144() {
; NOLSX-LABEL: test_frame_size_144:
; NOLSX: # %bb.0:
; NOLSX-NEXT: addi.d $sp, $sp, -144
; NOLSX-NEXT: .cfi_def_cfa_offset 144
; NOLSX-NEXT: addi.d $sp, $sp, 144
; NOLSX-NEXT: ret
;
; LSX-LABEL: test_frame_size_144:
; LSX: # %bb.0:
; LSX-NEXT: addi.d $sp, $sp, -160
; LSX-NEXT: .cfi_def_cfa_offset 160
; LSX-NEXT: addi.d $sp, $sp, 160
; LSX-NEXT: ret
%1 = alloca i8, i32 144
ret void
}

View File

@ -6,11 +6,11 @@
define void @box(ptr noalias nocapture noundef writeonly sret(%Box) align 16 dereferenceable(48) %b, i64 %i) {
; CHECK-LABEL: box:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: .cfi_def_cfa_offset 96
; CHECK-NEXT: addi.d $sp, $sp, -112
; CHECK-NEXT: .cfi_def_cfa_offset 112
; CHECK-NEXT: slli.d $a2, $a1, 5
; CHECK-NEXT: alsl.d $a1, $a1, $a2, 4
; CHECK-NEXT: addi.d $a2, $sp, 0
; CHECK-NEXT: addi.d $a2, $sp, 16
; CHECK-NEXT: add.d $a3, $a2, $a1
; CHECK-NEXT: vldx $vr0, $a1, $a2
; CHECK-NEXT: vld $vr1, $a3, 32
@ -18,7 +18,7 @@ define void @box(ptr noalias nocapture noundef writeonly sret(%Box) align 16 der
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: vst $vr1, $a0, 32
; CHECK-NEXT: vst $vr2, $a0, 16
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: addi.d $sp, $sp, 112
; CHECK-NEXT: ret
%1 = alloca [2 x %Box], align 16
%2 = getelementptr inbounds [2 x %Box], ptr %1, i64 0, i64 %i

View File

@ -6,10 +6,10 @@ declare <8 x float> @llvm.powi.v8f32.i32(<8 x float>, i32)
define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-LABEL: powi_v8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi.d $sp, $sp, -80
; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 64 # 8-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 0 # 32-byte Folded Spill
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
; CHECK-NEXT: addi.w $fp, $a0, 0
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 0
; CHECK-NEXT: movgr2fr.w $fa0, $a0
@ -18,79 +18,79 @@ define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 1
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 2
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 2
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 3
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 3
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 4
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 4
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 5
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 5
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 6
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 6
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7
; CHECK-NEXT: movgr2fr.w $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powisf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.s $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 7
; CHECK-NEXT: ld.d $fp, $sp, 64 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 80
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
entry:
%res = call <8 x float> @llvm.powi.v8f32.i32(<8 x float> %va, i32 %b)
@ -102,10 +102,10 @@ declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32)
define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind {
; CHECK-LABEL: powi_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi.d $sp, $sp, -80
; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 64 # 8-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 0 # 32-byte Folded Spill
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: xvst $xr0, $sp, 16 # 32-byte Folded Spill
; CHECK-NEXT: addi.w $fp, $a0, 0
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
; CHECK-NEXT: movgr2fr.d $fa0, $a0
@ -114,39 +114,39 @@ define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind {
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.d $a0, $fa0
; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
; CHECK-NEXT: movgr2fr.d $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.d $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2
; CHECK-NEXT: movgr2fr.d $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.d $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 2
; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
; CHECK-NEXT: xvst $xr0, $sp, 48 # 32-byte Folded Spill
; CHECK-NEXT: xvld $xr0, $sp, 16 # 32-byte Folded Reload
; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
; CHECK-NEXT: movgr2fr.d $fa0, $a0
; CHECK-NEXT: move $a0, $fp
; CHECK-NEXT: pcaddu18i $ra, %call36(__powidf2)
; CHECK-NEXT: jirl $ra, $ra, 0
; CHECK-NEXT: movfr2gr.d $a0, $fa0
; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
; CHECK-NEXT: xvld $xr0, $sp, 48 # 32-byte Folded Reload
; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3
; CHECK-NEXT: ld.d $fp, $sp, 64 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 80
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
entry:
%res = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %va, i32 %b)

View File

@ -76,21 +76,21 @@ define void @extract_4xdouble(ptr %src, ptr %dst) nounwind {
define void @extract_32xi8_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
; CHECK-LABEL: extract_32xi8_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 0
; CHECK-NEXT: ld.b $a0, $a0, 0
; CHECK-NEXT: st.b $a0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <32 x i8>, ptr %src
%e = extractelement <32 x i8> %v, i32 %idx
@ -101,21 +101,21 @@ define void @extract_32xi8_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
define void @extract_16xi16_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
; CHECK-LABEL: extract_16xi16_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 1
; CHECK-NEXT: ld.h $a0, $a0, 0
; CHECK-NEXT: st.h $a0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <16 x i16>, ptr %src
%e = extractelement <16 x i16> %v, i32 %idx
@ -126,21 +126,21 @@ define void @extract_16xi16_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
define void @extract_8xi32_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
; CHECK-LABEL: extract_8xi32_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2
; CHECK-NEXT: ld.w $a0, $a0, 0
; CHECK-NEXT: st.w $a0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <8 x i32>, ptr %src
%e = extractelement <8 x i32> %v, i32 %idx
@ -151,21 +151,21 @@ define void @extract_8xi32_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
define void @extract_4xi64_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
; CHECK-LABEL: extract_4xi64_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3
; CHECK-NEXT: ld.d $a0, $a0, 0
; CHECK-NEXT: st.d $a0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <4 x i64>, ptr %src
%e = extractelement <4 x i64> %v, i32 %idx
@ -176,21 +176,21 @@ define void @extract_4xi64_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
define void @extract_8xfloat_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
; CHECK-LABEL: extract_8xfloat_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2
; CHECK-NEXT: fld.s $fa0, $a0, 0
; CHECK-NEXT: fst.s $fa0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <8 x float>, ptr %src
%e = extractelement <8 x float> %v, i32 %idx
@ -201,21 +201,21 @@ define void @extract_8xfloat_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
define void @extract_4xdouble_idx(ptr %src, ptr %dst, i32 %idx) nounwind {
; CHECK-LABEL: extract_4xdouble_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3
; CHECK-NEXT: fld.d $fa0, $a0, 0
; CHECK-NEXT: fst.d $fa0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <4 x double>, ptr %src
%e = extractelement <4 x double> %v, i32 %idx

View File

@ -114,22 +114,22 @@ define void @insert_4xdouble(ptr %src, ptr %dst, double %in) nounwind {
define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_32xi8_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a3, 4, 0
; CHECK-NEXT: st.b $a2, $a0, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvld $xr0, $sp, 32
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <32 x i8>, ptr %src
%v_new = insertelement <32 x i8> %v, i8 %in, i32 %idx
@ -140,22 +140,22 @@ define void @insert_32xi8_idx(ptr %src, ptr %dst, i8 %in, i32 %idx) nounwind {
define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_16xi16_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a3, 4, 1
; CHECK-NEXT: st.h $a2, $a0, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvld $xr0, $sp, 32
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <16 x i16>, ptr %src
%v_new = insertelement <16 x i16> %v, i16 %in, i32 %idx
@ -166,22 +166,22 @@ define void @insert_16xi16_idx(ptr %src, ptr %dst, i16 %in, i32 %idx) nounwind {
define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_8xi32_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a3, 4, 2
; CHECK-NEXT: st.w $a2, $a0, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvld $xr0, $sp, 32
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <8 x i32>, ptr %src
%v_new = insertelement <8 x i32> %v, i32 %in, i32 %idx
@ -192,22 +192,22 @@ define void @insert_8xi32_idx(ptr %src, ptr %dst, i32 %in, i32 %idx) nounwind {
define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_4xi64_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr0, $a0, 0
; CHECK-NEXT: xvst $xr0, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr0, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a3, 4, 3
; CHECK-NEXT: st.d $a2, $a0, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvld $xr0, $sp, 32
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <4 x i64>, ptr %src
%v_new = insertelement <4 x i64> %v, i64 %in, i32 %idx
@ -218,22 +218,22 @@ define void @insert_4xi64_idx(ptr %src, ptr %dst, i64 %in, i32 %idx) nounwind {
define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_8xfloat_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr1, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 2
; CHECK-NEXT: fst.s $fa0, $a0, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvld $xr0, $sp, 32
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <8 x float>, ptr %src
%v_new = insertelement <8 x float> %v, float %in, i32 %idx
@ -244,22 +244,22 @@ define void @insert_8xfloat_idx(ptr %src, ptr %dst, float %in, i32 %idx) nounwin
define void @insert_4xdouble_idx(ptr %src, ptr %dst, double %in, i32 %idx) nounwind {
; CHECK-LABEL: insert_4xdouble_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -64
; CHECK-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 64
; CHECK-NEXT: addi.d $sp, $sp, -96
; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $fp, $sp, 96
; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
; CHECK-NEXT: xvld $xr1, $a0, 0
; CHECK-NEXT: xvst $xr1, $sp, 0
; CHECK-NEXT: addi.d $a0, $sp, 0
; CHECK-NEXT: xvst $xr1, $sp, 32
; CHECK-NEXT: addi.d $a0, $sp, 32
; CHECK-NEXT: bstrins.d $a0, $a2, 4, 3
; CHECK-NEXT: fst.d $fa0, $a0, 0
; CHECK-NEXT: xvld $xr0, $sp, 0
; CHECK-NEXT: xvld $xr0, $sp, 32
; CHECK-NEXT: xvst $xr0, $a1, 0
; CHECK-NEXT: addi.d $sp, $fp, -64
; CHECK-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 64
; CHECK-NEXT: addi.d $sp, $fp, -96
; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
%v = load volatile <4 x double>, ptr %src
%v_new = insertelement <4 x double> %v, double %in, i32 %idx

View File

@ -347,42 +347,42 @@ define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) #0 {
;
; LA64-LABEL: test_sincos_v2f32:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -64
; LA64-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -80
; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: vreplvei.w $vr0, $vr0, 0
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sinf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
; LA64-NEXT: vreplvei.w $vr0, $vr0, 1
; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sinf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload
; LA64-NEXT: vpackev.w $vr0, $vr0, $vr1
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: vpackev.w $vr1, $vr0, $vr1
; LA64-NEXT: vpackev.w $vr0, $vr0, $vr1
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 64
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
; LA64-NEXT: vld $vr1, $sp, 48 # 16-byte Folded Reload
; LA64-NEXT: vpackev.w $vr1, $vr0, $vr1
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 80
; LA64-NEXT: ret
%result = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> %a)
ret { <2 x float>, <2 x float> } %result
@ -439,48 +439,48 @@ define { <3 x float>, <3 x float> } @test_sincos_v3f32(<3 x float> %a) #0 {
;
; LA64-LABEL: test_sincos_v3f32:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -96
; LA64-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -112
; LA64-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill
; LA64-NEXT: vreplvei.w $vr0, $vr0, 2
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sinf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 88
; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload
; LA64-NEXT: vreplvei.w $vr0, $vr0, 1
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sinf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 72
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: vreplvei.w $vr0, $vr0, 1
; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sinf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 68
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: fst.s $fa0, $sp, 84
; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload
; LA64-NEXT: vreplvei.w $vr0, $vr0, 0
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sinf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 64
; LA64-NEXT: fst.s $fa0, $sp, 80
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 72
; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 56
; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
; LA64-NEXT: fst.s $fa0, $sp, 68
; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 52
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cosf)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: fst.s $fa0, $sp, 48
; LA64-NEXT: vld $vr0, $sp, 64
; LA64-NEXT: vld $vr1, $sp, 48
; LA64-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 96
; LA64-NEXT: fst.s $fa0, $sp, 64
; LA64-NEXT: vld $vr0, $sp, 80
; LA64-NEXT: vld $vr1, $sp, 64
; LA64-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 112
; LA64-NEXT: ret
%result = call { <3 x float>, <3 x float> } @llvm.sincos.v3f32(<3 x float> %a)
ret { <3 x float>, <3 x float> } %result
@ -568,44 +568,44 @@ define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) #0 {
;
; LA64-LABEL: test_sincos_v2f64:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -64
; LA64-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
; LA64-NEXT: vreplvei.d $vr0, $vr0, 0
; LA64-NEXT: addi.d $sp, $sp, -80
; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: vreplvei.d $vr0, $vr0, 0
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sin)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
; LA64-NEXT: vreplvei.d $vr0, $vr0, 1
; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(sin)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload
; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 1
; LA64-NEXT: vst $vr0, $sp, 48 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cos)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0
; LA64-NEXT: vst $vr0, $sp, 32 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cos)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0
; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
; LA64-NEXT: pcaddu18i $ra, %call36(cos)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: movfr2gr.d $a0, $fa0
; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload
; LA64-NEXT: vld $vr1, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: vinsgr2vr.d $vr1, $a0, 1
; LA64-NEXT: vld $vr0, $sp, 32 # 16-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 64
; LA64-NEXT: vld $vr0, $sp, 48 # 16-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 80
; LA64-NEXT: ret
%result = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> %a)
ret { <2 x double>, <2 x double> } %result
@ -801,17 +801,17 @@ define { <2 x fp128>, <2 x fp128> } @test_sincos_v2f128(<2 x fp128> %a) #0 {
;
; LA64-LABEL: test_sincos_v2f128:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -80
; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 64 # 8-byte Folded Spill
; LA64-NEXT: st.d $s0, $sp, 56 # 8-byte Folded Spill
; LA64-NEXT: st.d $s1, $sp, 48 # 8-byte Folded Spill
; LA64-NEXT: st.d $s2, $sp, 40 # 8-byte Folded Spill
; LA64-NEXT: st.d $s3, $sp, 32 # 8-byte Folded Spill
; LA64-NEXT: st.d $s4, $sp, 24 # 8-byte Folded Spill
; LA64-NEXT: st.d $s5, $sp, 16 # 8-byte Folded Spill
; LA64-NEXT: st.d $s6, $sp, 8 # 8-byte Folded Spill
; LA64-NEXT: st.d $s7, $sp, 0 # 8-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -96
; LA64-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
; LA64-NEXT: st.d $s0, $sp, 72 # 8-byte Folded Spill
; LA64-NEXT: st.d $s1, $sp, 64 # 8-byte Folded Spill
; LA64-NEXT: st.d $s2, $sp, 56 # 8-byte Folded Spill
; LA64-NEXT: st.d $s3, $sp, 48 # 8-byte Folded Spill
; LA64-NEXT: st.d $s4, $sp, 40 # 8-byte Folded Spill
; LA64-NEXT: st.d $s5, $sp, 32 # 8-byte Folded Spill
; LA64-NEXT: st.d $s6, $sp, 24 # 8-byte Folded Spill
; LA64-NEXT: st.d $s7, $sp, 16 # 8-byte Folded Spill
; LA64-NEXT: ld.d $fp, $a1, 16
; LA64-NEXT: ld.d $s0, $a1, 24
; LA64-NEXT: ld.d $s1, $a1, 0
@ -847,17 +847,17 @@ define { <2 x fp128>, <2 x fp128> } @test_sincos_v2f128(<2 x fp128> %a) #0 {
; LA64-NEXT: st.d $s6, $s3, 16
; LA64-NEXT: st.d $s5, $s3, 8
; LA64-NEXT: st.d $s4, $s3, 0
; LA64-NEXT: ld.d $s7, $sp, 0 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s6, $sp, 8 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s5, $sp, 16 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s4, $sp, 24 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s3, $sp, 32 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s2, $sp, 40 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s1, $sp, 48 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s0, $sp, 56 # 8-byte Folded Reload
; LA64-NEXT: ld.d $fp, $sp, 64 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 80
; LA64-NEXT: ld.d $s7, $sp, 16 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s6, $sp, 24 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s5, $sp, 32 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s4, $sp, 40 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s3, $sp, 48 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s2, $sp, 56 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s1, $sp, 64 # 8-byte Folded Reload
; LA64-NEXT: ld.d $s0, $sp, 72 # 8-byte Folded Reload
; LA64-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 96
; LA64-NEXT: ret
%result = call { <2 x fp128>, <2 x fp128> } @llvm.sincos.v2f128(<2 x fp128> %a)
ret { <2 x fp128>, <2 x fp128> } %result

View File

@ -0,0 +1,287 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc --mtriple=loongarch64 -mattr=+d,+lsx --verify-machineinstrs < %s | FileCheck %s
define void @eliminate_frame_index(<16 x i8> %a) nounwind {
; CHECK-LABEL: eliminate_frame_index:
; CHECK: # %bb.0:
; CHECK-NEXT: addi.d $sp, $sp, -240
; CHECK-NEXT: st.d $ra, $sp, 232 # 8-byte Folded Spill
; CHECK-NEXT: st.d $fp, $sp, 224 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s0, $sp, 216 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s1, $sp, 208 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s2, $sp, 200 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s3, $sp, 192 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s4, $sp, 184 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s5, $sp, 176 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s6, $sp, 168 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s7, $sp, 160 # 8-byte Folded Spill
; CHECK-NEXT: st.d $s8, $sp, 152 # 8-byte Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $zero, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $ra, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $tp, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a0, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a1, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a2, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a3, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a4, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a5, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a6, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $a7, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t0, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t1, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t2, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t3, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t4, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t5, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t6, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t7, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $t8, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $fp, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s0, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s1, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s2, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s3, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s4, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s5, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s6, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s7, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: addi.d $s8, $zero, 1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: st.d $a0, $sp, 0 # 8-byte Folded Spill
; CHECK-NEXT: addi.d $a0, $sp, 136
; CHECK-NEXT: vstelm.b $vr0, $a0, 0, 0
; CHECK-NEXT: ld.d $a0, $sp, 0 # 8-byte Folded Reload
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $zero
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $ra
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $tp
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a3
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a4
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a5
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a6
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $a7
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t3
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t4
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t5
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t6
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t7
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $t8
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $fp
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s1
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s3
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s4
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s5
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s6
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s7
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: #APP
; CHECK-NEXT: # reg use $s8
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: ld.d $s8, $sp, 152 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s7, $sp, 160 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s6, $sp, 168 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s5, $sp, 176 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s4, $sp, 184 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s3, $sp, 192 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s2, $sp, 200 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s1, $sp, 208 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $s0, $sp, 216 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $fp, $sp, 224 # 8-byte Folded Reload
; CHECK-NEXT: ld.d $ra, $sp, 232 # 8-byte Folded Reload
; CHECK-NEXT: addi.d $sp, $sp, 240
; CHECK-NEXT: ret
%s = alloca [16 x i8]
%ss = alloca [128 x i8]
%zero = call i64 asm sideeffect "addi.d $$zero, $$zero, 1", "={r0}"()
%ra = call i64 asm sideeffect "addi.d $$ra, $$zero, 1", "={r1}"()
%tp = call i64 asm sideeffect "addi.d $$tp, $$zero, 1", "={r2}"()
%a0 = call i64 asm sideeffect "addi.d $$a0, $$zero, 1", "={r4}"()
%a1 = call i64 asm sideeffect "addi.d $$a1, $$zero, 1", "={r5}"()
%a2 = call i64 asm sideeffect "addi.d $$a2, $$zero, 1", "={r6}"()
%a3 = call i64 asm sideeffect "addi.d $$a3, $$zero, 1", "={r7}"()
%a4 = call i64 asm sideeffect "addi.d $$a4, $$zero, 1", "={r8}"()
%a5 = call i64 asm sideeffect "addi.d $$a5, $$zero, 1", "={r9}"()
%a6 = call i64 asm sideeffect "addi.d $$a6, $$zero, 1", "={r10}"()
%a7 = call i64 asm sideeffect "addi.d $$a7, $$zero, 1", "={r11}"()
%t0 = call i64 asm sideeffect "addi.d $$t0, $$zero, 1", "={r12}"()
%t1 = call i64 asm sideeffect "addi.d $$t1, $$zero, 1", "={r13}"()
%t2 = call i64 asm sideeffect "addi.d $$t2, $$zero, 1", "={r14}"()
%t3 = call i64 asm sideeffect "addi.d $$t3, $$zero, 1", "={r15}"()
%t4 = call i64 asm sideeffect "addi.d $$t4, $$zero, 1", "={r16}"()
%t5 = call i64 asm sideeffect "addi.d $$t5, $$zero, 1", "={r17}"()
%t6 = call i64 asm sideeffect "addi.d $$t6, $$zero, 1", "={r18}"()
%t7 = call i64 asm sideeffect "addi.d $$t7, $$zero, 1", "={r19}"()
%t8 = call i64 asm sideeffect "addi.d $$t8, $$zero, 1", "={r20}"()
;; r21 Reserved (Non-allocatable)
%s9 = call i64 asm sideeffect "addi.d $$s9, $$zero, 1", "={r22}"()
%s0 = call i64 asm sideeffect "addi.d $$s0, $$zero, 1", "={r23}"()
%s1 = call i64 asm sideeffect "addi.d $$s1, $$zero, 1", "={r24}"()
%s2 = call i64 asm sideeffect "addi.d $$s2, $$zero, 1", "={r25}"()
%s3 = call i64 asm sideeffect "addi.d $$s3, $$zero, 1", "={r26}"()
%s4 = call i64 asm sideeffect "addi.d $$s4, $$zero, 1", "={r27}"()
%s5 = call i64 asm sideeffect "addi.d $$s5, $$zero, 1", "={r28}"()
%s6 = call i64 asm sideeffect "addi.d $$s6, $$zero, 1", "={r29}"()
%s7 = call i64 asm sideeffect "addi.d $$s7, $$zero, 1", "={r30}"()
%s8 = call i64 asm sideeffect "addi.d $$s8, $$zero, 1", "={r31}"()
%e = extractelement <16 x i8> %a, i64 0
store volatile i8 %e, ptr %s
call void asm sideeffect "# reg use $0", "{r0}"(i64 %zero)
call void asm sideeffect "# reg use $0", "{r1}"(i64 %ra)
call void asm sideeffect "# reg use $0", "{r2}"(i64 %tp)
call void asm sideeffect "# reg use $0", "{r4}"(i64 %a0)
call void asm sideeffect "# reg use $0", "{r5}"(i64 %a1)
call void asm sideeffect "# reg use $0", "{r6}"(i64 %a2)
call void asm sideeffect "# reg use $0", "{r7}"(i64 %a3)
call void asm sideeffect "# reg use $0", "{r8}"(i64 %a4)
call void asm sideeffect "# reg use $0", "{r9}"(i64 %a5)
call void asm sideeffect "# reg use $0", "{r10}"(i64 %a6)
call void asm sideeffect "# reg use $0", "{r11}"(i64 %a7)
call void asm sideeffect "# reg use $0", "{r12}"(i64 %t0)
call void asm sideeffect "# reg use $0", "{r13}"(i64 %t1)
call void asm sideeffect "# reg use $0", "{r14}"(i64 %t2)
call void asm sideeffect "# reg use $0", "{r15}"(i64 %t3)
call void asm sideeffect "# reg use $0", "{r16}"(i64 %t4)
call void asm sideeffect "# reg use $0", "{r17}"(i64 %t5)
call void asm sideeffect "# reg use $0", "{r18}"(i64 %t6)
call void asm sideeffect "# reg use $0", "{r19}"(i64 %t7)
call void asm sideeffect "# reg use $0", "{r20}"(i64 %t8)
;; r21 Reserved (Non-allocatable)
call void asm sideeffect "# reg use $0", "{r22}"(i64 %s9)
call void asm sideeffect "# reg use $0", "{r23}"(i64 %s0)
call void asm sideeffect "# reg use $0", "{r24}"(i64 %s1)
call void asm sideeffect "# reg use $0", "{r25}"(i64 %s2)
call void asm sideeffect "# reg use $0", "{r26}"(i64 %s3)
call void asm sideeffect "# reg use $0", "{r27}"(i64 %s4)
call void asm sideeffect "# reg use $0", "{r28}"(i64 %s5)
call void asm sideeffect "# reg use $0", "{r29}"(i64 %s6)
call void asm sideeffect "# reg use $0", "{r30}"(i64 %s7)
call void asm sideeffect "# reg use $0", "{r31}"(i64 %s8)
ret void
}

View File

@ -36,15 +36,15 @@ define void @caller(i32 %n) {
;
; LA64-LABEL: caller:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -64
; LA64-NEXT: .cfi_def_cfa_offset 64
; LA64-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; LA64-NEXT: st.d $s8, $sp, 40 # 8-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -128
; LA64-NEXT: .cfi_def_cfa_offset 128
; LA64-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill
; LA64-NEXT: st.d $s8, $sp, 104 # 8-byte Folded Spill
; LA64-NEXT: .cfi_offset 1, -8
; LA64-NEXT: .cfi_offset 22, -16
; LA64-NEXT: .cfi_offset 31, -24
; LA64-NEXT: addi.d $fp, $sp, 64
; LA64-NEXT: addi.d $fp, $sp, 128
; LA64-NEXT: .cfi_def_cfa 22, 0
; LA64-NEXT: bstrins.d $sp, $zero, 5, 0
; LA64-NEXT: move $s8, $sp
@ -54,14 +54,14 @@ define void @caller(i32 %n) {
; LA64-NEXT: slli.d $a0, $a0, 4
; LA64-NEXT: sub.d $a0, $sp, $a0
; LA64-NEXT: move $sp, $a0
; LA64-NEXT: addi.d $a1, $s8, 0
; LA64-NEXT: addi.d $a1, $s8, 64
; LA64-NEXT: pcaddu18i $ra, %call36(callee)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: addi.d $sp, $fp, -64
; LA64-NEXT: ld.d $s8, $sp, 40 # 8-byte Folded Reload
; LA64-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 64
; LA64-NEXT: addi.d $sp, $fp, -128
; LA64-NEXT: ld.d $s8, $sp, 104 # 8-byte Folded Reload
; LA64-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 128
; LA64-NEXT: ret
%1 = alloca i8, i32 %n
%2 = alloca i32, align 64

View File

@ -28,22 +28,22 @@ define void @caller32() {
;
; LA64-LABEL: caller32:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -32
; LA64-NEXT: .cfi_def_cfa_offset 32
; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -64
; LA64-NEXT: .cfi_def_cfa_offset 64
; LA64-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; LA64-NEXT: .cfi_offset 1, -8
; LA64-NEXT: .cfi_offset 22, -16
; LA64-NEXT: addi.d $fp, $sp, 32
; LA64-NEXT: addi.d $fp, $sp, 64
; LA64-NEXT: .cfi_def_cfa 22, 0
; LA64-NEXT: bstrins.d $sp, $zero, 4, 0
; LA64-NEXT: addi.d $a0, $sp, 0
; LA64-NEXT: addi.d $a0, $sp, 32
; LA64-NEXT: pcaddu18i $ra, %call36(callee)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: addi.d $sp, $fp, -32
; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 32
; LA64-NEXT: addi.d $sp, $fp, -64
; LA64-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 64
; LA64-NEXT: ret
%1 = alloca i8, align 32
call void @callee(ptr %1)
@ -102,22 +102,22 @@ define void @caller64() {
;
; LA64-LABEL: caller64:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -64
; LA64-NEXT: .cfi_def_cfa_offset 64
; LA64-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -128
; LA64-NEXT: .cfi_def_cfa_offset 128
; LA64-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill
; LA64-NEXT: .cfi_offset 1, -8
; LA64-NEXT: .cfi_offset 22, -16
; LA64-NEXT: addi.d $fp, $sp, 64
; LA64-NEXT: addi.d $fp, $sp, 128
; LA64-NEXT: .cfi_def_cfa 22, 0
; LA64-NEXT: bstrins.d $sp, $zero, 5, 0
; LA64-NEXT: addi.d $a0, $sp, 0
; LA64-NEXT: addi.d $a0, $sp, 64
; LA64-NEXT: pcaddu18i $ra, %call36(callee)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: addi.d $sp, $fp, -64
; LA64-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 64
; LA64-NEXT: addi.d $sp, $fp, -128
; LA64-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 128
; LA64-NEXT: ret
%1 = alloca i8, align 64
call void @callee(ptr %1)
@ -176,22 +176,22 @@ define void @caller128() {
;
; LA64-LABEL: caller128:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -128
; LA64-NEXT: .cfi_def_cfa_offset 128
; LA64-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -256
; LA64-NEXT: .cfi_def_cfa_offset 256
; LA64-NEXT: st.d $ra, $sp, 248 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 240 # 8-byte Folded Spill
; LA64-NEXT: .cfi_offset 1, -8
; LA64-NEXT: .cfi_offset 22, -16
; LA64-NEXT: addi.d $fp, $sp, 128
; LA64-NEXT: addi.d $fp, $sp, 256
; LA64-NEXT: .cfi_def_cfa 22, 0
; LA64-NEXT: bstrins.d $sp, $zero, 6, 0
; LA64-NEXT: addi.d $a0, $sp, 0
; LA64-NEXT: addi.d $a0, $sp, 128
; LA64-NEXT: pcaddu18i $ra, %call36(callee)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: addi.d $sp, $fp, -128
; LA64-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 128
; LA64-NEXT: addi.d $sp, $fp, -256
; LA64-NEXT: ld.d $fp, $sp, 240 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 248 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 256
; LA64-NEXT: ret
%1 = alloca i8, align 128
call void @callee(ptr %1)
@ -250,22 +250,22 @@ define void @caller256() {
;
; LA64-LABEL: caller256:
; LA64: # %bb.0:
; LA64-NEXT: addi.d $sp, $sp, -256
; LA64-NEXT: .cfi_def_cfa_offset 256
; LA64-NEXT: st.d $ra, $sp, 248 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 240 # 8-byte Folded Spill
; LA64-NEXT: addi.d $sp, $sp, -512
; LA64-NEXT: .cfi_def_cfa_offset 512
; LA64-NEXT: st.d $ra, $sp, 504 # 8-byte Folded Spill
; LA64-NEXT: st.d $fp, $sp, 496 # 8-byte Folded Spill
; LA64-NEXT: .cfi_offset 1, -8
; LA64-NEXT: .cfi_offset 22, -16
; LA64-NEXT: addi.d $fp, $sp, 256
; LA64-NEXT: addi.d $fp, $sp, 512
; LA64-NEXT: .cfi_def_cfa 22, 0
; LA64-NEXT: bstrins.d $sp, $zero, 7, 0
; LA64-NEXT: addi.d $a0, $sp, 0
; LA64-NEXT: addi.d $a0, $sp, 256
; LA64-NEXT: pcaddu18i $ra, %call36(callee)
; LA64-NEXT: jirl $ra, $ra, 0
; LA64-NEXT: addi.d $sp, $fp, -256
; LA64-NEXT: ld.d $fp, $sp, 240 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 248 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 256
; LA64-NEXT: addi.d $sp, $fp, -512
; LA64-NEXT: ld.d $fp, $sp, 496 # 8-byte Folded Reload
; LA64-NEXT: ld.d $ra, $sp, 504 # 8-byte Folded Reload
; LA64-NEXT: addi.d $sp, $sp, 512
; LA64-NEXT: ret
%1 = alloca i8, align 256
call void @callee(ptr %1)

View File

@ -121,19 +121,19 @@ define void @t3() {
;
; LA64-LABEL: t3:
; LA64: # %bb.0: # %entry
; LA64-NEXT: addi.d $sp, $sp, -64
; LA64-NEXT: .cfi_def_cfa_offset 64
; LA64-NEXT: addi.d $sp, $sp, -80
; LA64-NEXT: .cfi_def_cfa_offset 80
; LA64-NEXT: pcalau12i $a0, %pc_hi20(.L.str)
; LA64-NEXT: addi.d $a0, $a0, %pc_lo12(.L.str)
; LA64-NEXT: ld.h $a1, $a0, 20
; LA64-NEXT: ld.w $a2, $a0, 16
; LA64-NEXT: ld.d $a3, $a0, 8
; LA64-NEXT: ld.d $a0, $a0, 0
; LA64-NEXT: st.h $a1, $sp, 20
; LA64-NEXT: st.w $a2, $sp, 16
; LA64-NEXT: st.d $a3, $sp, 8
; LA64-NEXT: st.d $a0, $sp, 0
; LA64-NEXT: addi.d $sp, $sp, 64
; LA64-NEXT: st.h $a1, $sp, 36
; LA64-NEXT: st.w $a2, $sp, 32
; LA64-NEXT: st.d $a3, $sp, 24
; LA64-NEXT: st.d $a0, $sp, 16
; LA64-NEXT: addi.d $sp, $sp, 80
; LA64-NEXT: ret
entry:
%msgbuf = alloca [64 x i8], align 1

View File

@ -47,7 +47,7 @@ define i64 @va1(ptr %fmt, ...) {
; LA64-WITHFP-NEXT: st.d $a2, $fp, 16
; LA64-WITHFP-NEXT: st.d $a1, $fp, 8
; LA64-WITHFP-NEXT: addi.d $a1, $fp, 16
; LA64-WITHFP-NEXT: st.d $a1, $fp, -24
; LA64-WITHFP-NEXT: st.d $a1, $fp, -32
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 96
@ -94,7 +94,7 @@ define i64 @va1_va_arg(ptr %fmt, ...) nounwind {
; LA64-WITHFP-NEXT: st.d $a2, $fp, 16
; LA64-WITHFP-NEXT: st.d $a1, $fp, 8
; LA64-WITHFP-NEXT: addi.d $a1, $fp, 16
; LA64-WITHFP-NEXT: st.d $a1, $fp, -24
; LA64-WITHFP-NEXT: st.d $a1, $fp, -32
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 96
@ -112,11 +112,11 @@ define i64 @va1_va_arg(ptr %fmt, ...) nounwind {
define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LA64-FPELIM-LABEL: va1_va_arg_alloca:
; LA64-FPELIM: # %bb.0:
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -96
; LA64-FPELIM-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LA64-FPELIM-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
; LA64-FPELIM-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
; LA64-FPELIM-NEXT: addi.d $fp, $sp, 32
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -112
; LA64-FPELIM-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64-FPELIM-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64-FPELIM-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64-FPELIM-NEXT: addi.d $fp, $sp, 48
; LA64-FPELIM-NEXT: move $s0, $a1
; LA64-FPELIM-NEXT: st.d $a7, $fp, 56
; LA64-FPELIM-NEXT: st.d $a6, $fp, 48
@ -126,7 +126,7 @@ define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LA64-FPELIM-NEXT: st.d $a2, $fp, 16
; LA64-FPELIM-NEXT: st.d $a1, $fp, 8
; LA64-FPELIM-NEXT: addi.d $a0, $fp, 16
; LA64-FPELIM-NEXT: st.d $a0, $fp, -32
; LA64-FPELIM-NEXT: st.d $a0, $fp, -40
; LA64-FPELIM-NEXT: addi.d $a0, $a1, 15
; LA64-FPELIM-NEXT: bstrins.d $a0, $zero, 3, 0
; LA64-FPELIM-NEXT: sub.d $a0, $sp, $a0
@ -134,20 +134,20 @@ define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LA64-FPELIM-NEXT: pcaddu18i $ra, %call36(notdead)
; LA64-FPELIM-NEXT: jirl $ra, $ra, 0
; LA64-FPELIM-NEXT: move $a0, $s0
; LA64-FPELIM-NEXT: addi.d $sp, $fp, -32
; LA64-FPELIM-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
; LA64-FPELIM-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64-FPELIM-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 96
; LA64-FPELIM-NEXT: addi.d $sp, $fp, -48
; LA64-FPELIM-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64-FPELIM-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64-FPELIM-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 112
; LA64-FPELIM-NEXT: ret
;
; LA64-WITHFP-LABEL: va1_va_arg_alloca:
; LA64-WITHFP: # %bb.0:
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -96
; LA64-WITHFP-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 32
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -112
; LA64-WITHFP-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 48
; LA64-WITHFP-NEXT: move $s0, $a1
; LA64-WITHFP-NEXT: st.d $a7, $fp, 56
; LA64-WITHFP-NEXT: st.d $a6, $fp, 48
@ -157,7 +157,7 @@ define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LA64-WITHFP-NEXT: st.d $a2, $fp, 16
; LA64-WITHFP-NEXT: st.d $a1, $fp, 8
; LA64-WITHFP-NEXT: addi.d $a0, $fp, 16
; LA64-WITHFP-NEXT: st.d $a0, $fp, -32
; LA64-WITHFP-NEXT: st.d $a0, $fp, -40
; LA64-WITHFP-NEXT: addi.d $a0, $a1, 15
; LA64-WITHFP-NEXT: bstrins.d $a0, $zero, 3, 0
; LA64-WITHFP-NEXT: sub.d $a0, $sp, $a0
@ -165,11 +165,11 @@ define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LA64-WITHFP-NEXT: pcaddu18i $ra, %call36(notdead)
; LA64-WITHFP-NEXT: jirl $ra, $ra, 0
; LA64-WITHFP-NEXT: move $a0, $s0
; LA64-WITHFP-NEXT: addi.d $sp, $fp, -32
; LA64-WITHFP-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 96
; LA64-WITHFP-NEXT: addi.d $sp, $fp, -48
; LA64-WITHFP-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 112
; LA64-WITHFP-NEXT: ret
%va = alloca ptr, align 8
call void @llvm.va_start(ptr %va)
@ -314,10 +314,10 @@ define void @va_aligned_stack_caller() nounwind {
;
; LA64-WITHFP-LABEL: va_aligned_stack_caller:
; LA64-WITHFP: # %bb.0:
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -112
; LA64-WITHFP-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: st.d $fp, $sp, 96 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 112
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -128
; LA64-WITHFP-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 128
; LA64-WITHFP-NEXT: ori $a0, $zero, 17
; LA64-WITHFP-NEXT: st.d $a0, $sp, 48
; LA64-WITHFP-NEXT: ori $a0, $zero, 16
@ -336,23 +336,23 @@ define void @va_aligned_stack_caller() nounwind {
; LA64-WITHFP-NEXT: lu32i.d $a0, 335544
; LA64-WITHFP-NEXT: lu52i.d $a0, $a0, -328
; LA64-WITHFP-NEXT: st.d $a0, $sp, 16
; LA64-WITHFP-NEXT: st.d $zero, $fp, -24
; LA64-WITHFP-NEXT: st.d $zero, $fp, -40
; LA64-WITHFP-NEXT: vrepli.b $vr0, 0
; LA64-WITHFP-NEXT: vst $vr0, $fp, -40
; LA64-WITHFP-NEXT: vst $vr0, $fp, -56
; LA64-WITHFP-NEXT: ori $a5, $zero, 1000
; LA64-WITHFP-NEXT: ori $a0, $zero, 1
; LA64-WITHFP-NEXT: ori $a1, $zero, 11
; LA64-WITHFP-NEXT: addi.d $a2, $fp, -48
; LA64-WITHFP-NEXT: addi.d $a2, $fp, -64
; LA64-WITHFP-NEXT: ori $a3, $zero, 12
; LA64-WITHFP-NEXT: ori $a4, $zero, 13
; LA64-WITHFP-NEXT: ori $a7, $zero, 1
; LA64-WITHFP-NEXT: st.d $a5, $fp, -48
; LA64-WITHFP-NEXT: st.d $a5, $fp, -64
; LA64-WITHFP-NEXT: move $a6, $zero
; LA64-WITHFP-NEXT: pcaddu18i $ra, %call36(va_aligned_stack_callee)
; LA64-WITHFP-NEXT: jirl $ra, $ra, 0
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 96 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 112
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 128
; LA64-WITHFP-NEXT: ret
%1 = call i32 (i32, ...) @va_aligned_stack_callee(i32 1, i32 11,
i256 1000, i32 12, i32 13, i128 18446744073709551616, i32 14,